2006-03-23 14:00:00 +03:00
/*
* linux / kernel / power / swap . c
*
* This file provides functions for reading the suspend image from
* and writing it to a swap partition .
*
2010-07-18 16:27:13 +04:00
* Copyright ( C ) 1998 , 2001 - 2005 Pavel Machek < pavel @ ucw . cz >
2006-03-23 14:00:00 +03:00
* Copyright ( C ) 2006 Rafael J . Wysocki < rjw @ sisk . pl >
2012-04-30 00:42:06 +04:00
* Copyright ( C ) 2010 - 2012 Bojan Smojver < bojan @ rexursive . com >
2006-03-23 14:00:00 +03:00
*
* This file is released under the GPLv2 .
*
*/
# include <linux/module.h>
# include <linux/file.h>
# include <linux/delay.h>
# include <linux/bitops.h>
# include <linux/genhd.h>
# include <linux/device.h>
# include <linux/bio.h>
2006-09-26 10:32:44 +04:00
# include <linux/blkdev.h>
2006-03-23 14:00:00 +03:00
# include <linux/swap.h>
# include <linux/swapops.h>
# include <linux/pm.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
# include <linux/slab.h>
2010-09-10 01:06:23 +04:00
# include <linux/lzo.h>
# include <linux/vmalloc.h>
2011-10-14 01:58:07 +04:00
# include <linux/cpumask.h>
# include <linux/atomic.h>
# include <linux/kthread.h>
# include <linux/crc32.h>
2014-10-30 21:04:53 +03:00
# include <linux/ktime.h>
2006-03-23 14:00:00 +03:00
# include "power.h"
2010-12-11 23:46:44 +03:00
# define HIBERNATE_SIG "S1SUSPEND"
2006-03-23 14:00:00 +03:00
2010-05-02 01:53:02 +04:00
/*
* The swap map is a data structure used for keeping track of each page
* written to a swap partition . It consists of many swap_map_page
2010-06-08 00:23:12 +04:00
* structures that contain each an array of MAP_PAGE_ENTRIES swap entries .
2010-05-02 01:53:02 +04:00
* These structures are stored on the swap and linked together with the
* help of the . next_swap member .
*
* The swap map is created during suspend . The swap map pages are
* allocated and populated one at a time , so we only need one memory
* page to set up the entire structure .
*
2011-10-14 01:58:07 +04:00
* During resume we pick up all swap_map_page structures into a list .
2010-05-02 01:53:02 +04:00
*/
# define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
2012-04-25 01:53:28 +04:00
/*
* Number of free pages that are not high .
*/
static inline unsigned long low_free_pages ( void )
{
return nr_free_pages ( ) - nr_free_highpages ( ) ;
}
/*
* Number of pages required to be kept free while writing the image . Always
* half of all available low pages before the writing starts .
*/
static inline unsigned long reqd_free_pages ( void )
{
return low_free_pages ( ) / 2 ;
}
2010-05-02 01:53:02 +04:00
struct swap_map_page {
sector_t entries [ MAP_PAGE_ENTRIES ] ;
sector_t next_swap ;
} ;
2011-10-14 01:58:07 +04:00
struct swap_map_page_list {
struct swap_map_page * map ;
struct swap_map_page_list * next ;
} ;
2010-05-02 01:53:02 +04:00
/**
* The swap_map_handle structure is used for handling swap in
* a file - alike way
*/
struct swap_map_handle {
struct swap_map_page * cur ;
2011-10-14 01:58:07 +04:00
struct swap_map_page_list * maps ;
2010-05-02 01:53:02 +04:00
sector_t cur_swap ;
sector_t first_sector ;
unsigned int k ;
2012-04-25 01:53:28 +04:00
unsigned long reqd_free_pages ;
2011-10-14 01:58:07 +04:00
u32 crc32 ;
2010-05-02 01:53:02 +04:00
} ;
2007-05-02 21:27:07 +04:00
struct swsusp_header {
2011-10-14 01:58:07 +04:00
char reserved [ PAGE_SIZE - 20 - sizeof ( sector_t ) - sizeof ( int ) -
sizeof ( u32 ) ] ;
u32 crc32 ;
2006-12-07 07:34:10 +03:00
sector_t image ;
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:47:30 +04:00
unsigned int flags ; /* Flags to pass to the "boot" kernel */
2006-03-23 14:00:00 +03:00
char orig_sig [ 10 ] ;
char sig [ 10 ] ;
2014-04-08 02:39:20 +04:00
} __packed ;
2007-05-02 21:27:07 +04:00
static struct swsusp_header * swsusp_header ;
2006-03-23 14:00:00 +03:00
2009-12-06 18:15:53 +03:00
/**
* The following functions are used for tracing the allocated
* swap pages , so that they can be freed in case of an error .
*/
struct swsusp_extent {
struct rb_node node ;
unsigned long start ;
unsigned long end ;
} ;
static struct rb_root swsusp_extents = RB_ROOT ;
static int swsusp_extents_insert ( unsigned long swap_offset )
{
struct rb_node * * new = & ( swsusp_extents . rb_node ) ;
struct rb_node * parent = NULL ;
struct swsusp_extent * ext ;
/* Figure out where to put the new node */
while ( * new ) {
2012-10-23 03:21:09 +04:00
ext = rb_entry ( * new , struct swsusp_extent , node ) ;
2009-12-06 18:15:53 +03:00
parent = * new ;
if ( swap_offset < ext - > start ) {
/* Try to merge */
if ( swap_offset = = ext - > start - 1 ) {
ext - > start - - ;
return 0 ;
}
new = & ( ( * new ) - > rb_left ) ;
} else if ( swap_offset > ext - > end ) {
/* Try to merge */
if ( swap_offset = = ext - > end + 1 ) {
ext - > end + + ;
return 0 ;
}
new = & ( ( * new ) - > rb_right ) ;
} else {
/* It already is in the tree */
return - EINVAL ;
}
}
/* Add the new node and rebalance the tree. */
ext = kzalloc ( sizeof ( struct swsusp_extent ) , GFP_KERNEL ) ;
if ( ! ext )
return - ENOMEM ;
ext - > start = swap_offset ;
ext - > end = swap_offset ;
rb_link_node ( & ext - > node , parent , new ) ;
rb_insert_color ( & ext - > node , & swsusp_extents ) ;
return 0 ;
}
/**
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated , so that it can be freed in case of an error .
*/
sector_t alloc_swapdev_block ( int swap )
{
unsigned long offset ;
2010-09-10 03:38:07 +04:00
offset = swp_offset ( get_swap_page_of_type ( swap ) ) ;
2009-12-06 18:15:53 +03:00
if ( offset ) {
if ( swsusp_extents_insert ( offset ) )
2010-09-10 03:38:07 +04:00
swap_free ( swp_entry ( swap , offset ) ) ;
2009-12-06 18:15:53 +03:00
else
return swapdev_block ( swap , offset ) ;
}
return 0 ;
}
/**
* free_all_swap_pages - free swap pages allocated for saving image data .
2010-06-08 00:23:12 +04:00
* It also frees the extents used to register which swap entries had been
2009-12-06 18:15:53 +03:00
* allocated .
*/
void free_all_swap_pages ( int swap )
{
struct rb_node * node ;
while ( ( node = swsusp_extents . rb_node ) ) {
struct swsusp_extent * ext ;
unsigned long offset ;
ext = container_of ( node , struct swsusp_extent , node ) ;
rb_erase ( node , & swsusp_extents ) ;
for ( offset = ext - > start ; offset < = ext - > end ; offset + + )
2010-09-10 03:38:07 +04:00
swap_free ( swp_entry ( swap , offset ) ) ;
2009-12-06 18:15:53 +03:00
kfree ( ext ) ;
}
}
int swsusp_swap_in_use ( void )
{
return ( swsusp_extents . rb_node ! = NULL ) ;
}
2006-03-23 14:00:00 +03:00
/*
2006-12-07 07:34:09 +03:00
* General things
2006-03-23 14:00:00 +03:00
*/
static unsigned short root_swap = 0xffff ;
2015-05-19 10:23:23 +03:00
static struct block_device * hib_resume_bdev ;
struct hib_bio_batch {
atomic_t count ;
wait_queue_head_t wait ;
int error ;
} ;
static void hib_init_batch ( struct hib_bio_batch * hb )
{
atomic_set ( & hb - > count , 0 ) ;
init_waitqueue_head ( & hb - > wait ) ;
hb - > error = 0 ;
}
2015-07-20 16:29:37 +03:00
static void hib_end_io ( struct bio * bio )
2015-05-19 10:23:23 +03:00
{
struct hib_bio_batch * hb = bio - > bi_private ;
struct page * page = bio - > bi_io_vec [ 0 ] . bv_page ;
2015-07-20 16:29:37 +03:00
if ( bio - > bi_error ) {
2015-05-19 10:23:23 +03:00
printk ( KERN_ALERT " Read-error on swap-device (%u:%u:%Lu) \n " ,
imajor ( bio - > bi_bdev - > bd_inode ) ,
iminor ( bio - > bi_bdev - > bd_inode ) ,
( unsigned long long ) bio - > bi_iter . bi_sector ) ;
}
if ( bio_data_dir ( bio ) = = WRITE )
put_page ( page ) ;
2015-07-20 16:29:37 +03:00
if ( bio - > bi_error & & ! hb - > error )
hb - > error = bio - > bi_error ;
2015-05-19 10:23:23 +03:00
if ( atomic_dec_and_test ( & hb - > count ) )
wake_up ( & hb - > wait ) ;
bio_put ( bio ) ;
}
static int hib_submit_io ( int rw , pgoff_t page_off , void * addr ,
struct hib_bio_batch * hb )
{
struct page * page = virt_to_page ( addr ) ;
struct bio * bio ;
int error = 0 ;
2015-11-07 03:28:28 +03:00
bio = bio_alloc ( __GFP_RECLAIM | __GFP_HIGH , 1 ) ;
2015-05-19 10:23:23 +03:00
bio - > bi_iter . bi_sector = page_off * ( PAGE_SIZE > > 9 ) ;
bio - > bi_bdev = hib_resume_bdev ;
if ( bio_add_page ( bio , page , PAGE_SIZE , 0 ) < PAGE_SIZE ) {
printk ( KERN_ERR " PM: Adding page to bio failed at %llu \n " ,
( unsigned long long ) bio - > bi_iter . bi_sector ) ;
bio_put ( bio ) ;
return - EFAULT ;
}
if ( hb ) {
bio - > bi_end_io = hib_end_io ;
bio - > bi_private = hb ;
atomic_inc ( & hb - > count ) ;
submit_bio ( rw , bio ) ;
} else {
error = submit_bio_wait ( rw , bio ) ;
bio_put ( bio ) ;
}
return error ;
}
static int hib_wait_io ( struct hib_bio_batch * hb )
{
wait_event ( hb - > wait , atomic_read ( & hb - > count ) = = 0 ) ;
return hb - > error ;
}
2006-12-07 07:34:09 +03:00
/*
* Saving part
*/
2006-03-23 14:00:00 +03:00
2010-05-02 01:53:02 +04:00
static int mark_swapfiles ( struct swap_map_handle * handle , unsigned int flags )
2006-03-23 14:00:00 +03:00
{
int error ;
2015-05-19 10:23:23 +03:00
hib_submit_io ( READ_SYNC , swsusp_resume_block , swsusp_header , NULL ) ;
2007-05-02 21:27:07 +04:00
if ( ! memcmp ( " SWAP-SPACE " , swsusp_header - > sig , 10 ) | |
! memcmp ( " SWAPSPACE2 " , swsusp_header - > sig , 10 ) ) {
memcpy ( swsusp_header - > orig_sig , swsusp_header - > sig , 10 ) ;
2010-10-05 00:08:12 +04:00
memcpy ( swsusp_header - > sig , HIBERNATE_SIG , 10 ) ;
2010-05-02 01:53:02 +04:00
swsusp_header - > image = handle - > first_sector ;
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:47:30 +04:00
swsusp_header - > flags = flags ;
2011-10-14 01:58:07 +04:00
if ( flags & SF_CRC32_MODE )
swsusp_header - > crc32 = handle - > crc32 ;
2015-05-19 10:23:23 +03:00
error = hib_submit_io ( WRITE_SYNC , swsusp_resume_block ,
2007-05-02 21:27:07 +04:00
swsusp_header , NULL ) ;
2006-03-23 14:00:00 +03:00
} else {
2007-12-08 04:09:43 +03:00
printk ( KERN_ERR " PM: Swap header not found! \n " ) ;
2006-03-23 14:00:00 +03:00
error = - ENODEV ;
}
return error ;
}
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index ( if so )
2010-05-02 01:54:02 +04:00
*
* This is called before saving image
2006-03-23 14:00:00 +03:00
*/
2010-05-02 01:54:02 +04:00
static int swsusp_swap_check ( void )
2006-03-23 14:00:00 +03:00
{
2006-12-07 07:34:10 +03:00
int res ;
2007-01-06 03:36:28 +03:00
res = swap_type_of ( swsusp_resume_device , swsusp_resume_block ,
2010-05-02 01:52:34 +04:00
& hib_resume_bdev ) ;
2006-12-07 07:34:10 +03:00
if ( res < 0 )
return res ;
root_swap = res ;
block: make blkdev_get/put() handle exclusive access
Over time, block layer has accumulated a set of APIs dealing with bdev
open, close, claim and release.
* blkdev_get/put() are the primary open and close functions.
* bd_claim/release() deal with exclusive open.
* open/close_bdev_exclusive() are combination of open and claim and
the other way around, respectively.
* bd_link/unlink_disk_holder() to create and remove holder/slave
symlinks.
* open_by_devnum() wraps bdget() + blkdev_get().
The interface is a bit confusing and the decoupling of open and claim
makes it impossible to properly guarantee exclusive access as
in-kernel open + claim sequence can disturb the existing exclusive
open even before the block layer knows the current open if for another
exclusive access. Reorganize the interface such that,
* blkdev_get() is extended to include exclusive access management.
@holder argument is added and, if is @FMODE_EXCL specified, it will
gain exclusive access atomically w.r.t. other exclusive accesses.
* blkdev_put() is similarly extended. It now takes @mode argument and
if @FMODE_EXCL is set, it releases an exclusive access. Also, when
the last exclusive claim is released, the holder/slave symlinks are
removed automatically.
* bd_claim/release() and close_bdev_exclusive() are no longer
necessary and either made static or removed.
* bd_link_disk_holder() remains the same but bd_unlink_disk_holder()
is no longer necessary and removed.
* open_bdev_exclusive() becomes a simple wrapper around lookup_bdev()
and blkdev_get(). It also has an unexpected extra bdev_read_only()
test which probably should be moved into blkdev_get().
* open_by_devnum() is modified to take @holder argument and pass it to
blkdev_get().
Most of bdev open/close operations are unified into blkdev_get/put()
and most exclusive accesses are tested atomically at the open time (as
it should). This cleans up code and removes some, both valid and
invalid, but unnecessary all the same, corner cases.
open_bdev_exclusive() and open_by_devnum() can use further cleanup -
rename to blkdev_get_by_path() and blkdev_get_by_devt() and drop
special features. Well, let's leave them for another day.
Most conversions are straight-forward. drbd conversion is a bit more
involved as there was some reordering, but the logic should stay the
same.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Neil Brown <neilb@suse.de>
Acked-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Philipp Reisner <philipp.reisner@linbit.com>
Cc: Peter Osterlund <petero2@telia.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <joel.becker@oracle.com>
Cc: Alex Elder <aelder@sgi.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: dm-devel@redhat.com
Cc: drbd-dev@lists.linbit.com
Cc: Leo Chen <leochen@broadcom.com>
Cc: Scott Branden <sbranden@broadcom.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Cc: Joern Engel <joern@logfs.org>
Cc: reiserfs-devel@vger.kernel.org
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
2010-11-13 13:55:17 +03:00
res = blkdev_get ( hib_resume_bdev , FMODE_WRITE , NULL ) ;
2007-01-06 03:36:28 +03:00
if ( res )
return res ;
2006-12-07 07:34:10 +03:00
2010-05-02 01:52:34 +04:00
res = set_blocksize ( hib_resume_bdev , PAGE_SIZE ) ;
2006-12-07 07:34:10 +03:00
if ( res < 0 )
2010-05-02 01:52:34 +04:00
blkdev_put ( hib_resume_bdev , FMODE_WRITE ) ;
2006-03-23 14:00:00 +03:00
return res ;
}
/**
* write_page - Write one page to given swap location .
* @ buf : Address we ' re writing .
* @ offset : Offset of the swap page we ' re writing to .
2015-05-19 10:23:23 +03:00
* @ hb : bio completion batch
2006-03-23 14:00:00 +03:00
*/
2015-05-19 10:23:23 +03:00
static int write_page ( void * buf , sector_t offset , struct hib_bio_batch * hb )
2006-03-23 14:00:00 +03:00
{
2006-12-07 07:34:10 +03:00
void * src ;
2011-10-14 01:58:07 +04:00
int ret ;
2006-12-07 07:34:10 +03:00
if ( ! offset )
return - ENOSPC ;
2015-05-19 10:23:23 +03:00
if ( hb ) {
2015-11-07 03:28:28 +03:00
src = ( void * ) __get_free_page ( __GFP_RECLAIM | __GFP_NOWARN |
2012-04-30 00:42:06 +04:00
__GFP_NORETRY ) ;
2006-12-07 07:34:10 +03:00
if ( src ) {
2010-10-27 01:22:27 +04:00
copy_page ( src , buf ) ;
2006-12-07 07:34:10 +03:00
} else {
2015-05-19 10:23:23 +03:00
ret = hib_wait_io ( hb ) ; /* Free pages */
2011-10-14 01:58:07 +04:00
if ( ret )
return ret ;
2015-11-07 03:28:28 +03:00
src = ( void * ) __get_free_page ( __GFP_RECLAIM |
2012-04-30 00:42:06 +04:00
__GFP_NOWARN |
__GFP_NORETRY ) ;
2011-10-14 01:58:07 +04:00
if ( src ) {
copy_page ( src , buf ) ;
} else {
WARN_ON_ONCE ( 1 ) ;
2015-05-19 10:23:23 +03:00
hb = NULL ; /* Go synchronous */
2011-10-14 01:58:07 +04:00
src = buf ;
}
2006-09-26 10:32:42 +04:00
}
2006-12-07 07:34:10 +03:00
} else {
src = buf ;
2006-03-23 14:00:00 +03:00
}
2015-05-19 10:23:23 +03:00
return hib_submit_io ( WRITE_SYNC , offset , src , hb ) ;
2006-03-23 14:00:00 +03:00
}
static void release_swap_writer ( struct swap_map_handle * handle )
{
if ( handle - > cur )
free_page ( ( unsigned long ) handle - > cur ) ;
handle - > cur = NULL ;
}
static int get_swap_writer ( struct swap_map_handle * handle )
{
2010-05-02 01:54:02 +04:00
int ret ;
ret = swsusp_swap_check ( ) ;
if ( ret ) {
if ( ret ! = - ENOSPC )
printk ( KERN_ERR " PM: Cannot find swap device, try "
" swapon -a. \n " ) ;
return ret ;
}
2006-03-23 14:00:00 +03:00
handle - > cur = ( struct swap_map_page * ) get_zeroed_page ( GFP_KERNEL ) ;
2010-05-02 01:54:02 +04:00
if ( ! handle - > cur ) {
ret = - ENOMEM ;
goto err_close ;
}
2007-05-07 01:50:47 +04:00
handle - > cur_swap = alloc_swapdev_block ( root_swap ) ;
2006-03-23 14:00:00 +03:00
if ( ! handle - > cur_swap ) {
2010-05-02 01:54:02 +04:00
ret = - ENOSPC ;
goto err_rel ;
2006-03-23 14:00:00 +03:00
}
handle - > k = 0 ;
2012-04-25 01:53:28 +04:00
handle - > reqd_free_pages = reqd_free_pages ( ) ;
2010-05-02 01:53:02 +04:00
handle - > first_sector = handle - > cur_swap ;
2006-03-23 14:00:00 +03:00
return 0 ;
2010-05-02 01:54:02 +04:00
err_rel :
release_swap_writer ( handle ) ;
err_close :
swsusp_close ( FMODE_WRITE ) ;
return ret ;
2006-03-23 14:00:00 +03:00
}
2006-09-26 10:32:42 +04:00
static int swap_write_page ( struct swap_map_handle * handle , void * buf ,
2015-05-19 10:23:23 +03:00
struct hib_bio_batch * hb )
2006-09-26 10:32:42 +04:00
{
int error = 0 ;
2006-12-07 07:34:10 +03:00
sector_t offset ;
2006-03-23 14:00:00 +03:00
if ( ! handle - > cur )
return - EINVAL ;
2007-05-07 01:50:47 +04:00
offset = alloc_swapdev_block ( root_swap ) ;
2015-05-19 10:23:23 +03:00
error = write_page ( buf , offset , hb ) ;
2006-03-23 14:00:00 +03:00
if ( error )
return error ;
handle - > cur - > entries [ handle - > k + + ] = offset ;
if ( handle - > k > = MAP_PAGE_ENTRIES ) {
2007-05-07 01:50:47 +04:00
offset = alloc_swapdev_block ( root_swap ) ;
2006-03-23 14:00:00 +03:00
if ( ! offset )
return - ENOSPC ;
handle - > cur - > next_swap = offset ;
2015-05-19 10:23:23 +03:00
error = write_page ( handle - > cur , handle - > cur_swap , hb ) ;
2006-03-23 14:00:00 +03:00
if ( error )
2006-09-26 10:32:42 +04:00
goto out ;
2010-10-27 01:22:27 +04:00
clear_page ( handle - > cur ) ;
2006-03-23 14:00:00 +03:00
handle - > cur_swap = offset ;
handle - > k = 0 ;
2012-04-30 00:42:06 +04:00
2015-05-19 10:23:23 +03:00
if ( hb & & low_free_pages ( ) < = handle - > reqd_free_pages ) {
error = hib_wait_io ( hb ) ;
2012-04-30 00:42:06 +04:00
if ( error )
goto out ;
/*
* Recalculate the number of required free pages , to
* make sure we never take more than half .
*/
handle - > reqd_free_pages = reqd_free_pages ( ) ;
}
2011-10-14 01:58:07 +04:00
}
2006-12-07 07:34:44 +03:00
out :
2006-09-26 10:32:42 +04:00
return error ;
2006-03-23 14:00:00 +03:00
}
static int flush_swap_writer ( struct swap_map_handle * handle )
{
if ( handle - > cur & & handle - > cur_swap )
2006-09-26 10:32:42 +04:00
return write_page ( handle - > cur , handle - > cur_swap , NULL ) ;
2006-03-23 14:00:00 +03:00
else
return - EINVAL ;
}
2010-05-02 01:54:02 +04:00
static int swap_writer_finish ( struct swap_map_handle * handle ,
unsigned int flags , int error )
{
if ( ! error ) {
flush_swap_writer ( handle ) ;
printk ( KERN_INFO " PM: S " ) ;
error = mark_swapfiles ( handle , flags ) ;
printk ( " | \n " ) ;
}
if ( error )
free_all_swap_pages ( root_swap ) ;
release_swap_writer ( handle ) ;
swsusp_close ( FMODE_WRITE ) ;
return error ;
}
2010-09-10 01:06:23 +04:00
/* We need to remember how much compressed data we need to read. */
# define LZO_HEADER sizeof(size_t)
/* Number of pages/bytes we'll compress at one time. */
# define LZO_UNC_PAGES 32
# define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
/* Number of pages/bytes we need for compressed data (worst case). */
# define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
LZO_HEADER , PAGE_SIZE )
# define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
2011-10-14 01:58:07 +04:00
/* Maximum number of threads for compression/decompression. */
# define LZO_THREADS 3
2012-04-30 00:42:06 +04:00
/* Minimum/maximum number of pages for read buffering. */
# define LZO_MIN_RD_PAGES 1024
# define LZO_MAX_RD_PAGES 8192
2011-10-14 01:58:07 +04:00
2006-03-23 14:00:00 +03:00
/**
* save_image - save the suspend image data
*/
static int save_image ( struct swap_map_handle * handle ,
struct snapshot_handle * snapshot ,
2006-09-26 10:32:41 +04:00
unsigned int nr_to_write )
2006-03-23 14:00:00 +03:00
{
unsigned int m ;
int ret ;
2006-09-26 10:32:41 +04:00
int nr_pages ;
2006-09-26 10:32:42 +04:00
int err2 ;
2015-05-19 10:23:23 +03:00
struct hib_bio_batch hb ;
2014-10-30 21:04:53 +03:00
ktime_t start ;
ktime_t stop ;
2006-03-23 14:00:00 +03:00
2015-05-19 10:23:23 +03:00
hib_init_batch ( & hb ) ;
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Saving image data pages (%u pages)... \n " ,
2007-12-08 04:09:43 +03:00
nr_to_write ) ;
2012-06-22 00:27:24 +04:00
m = nr_to_write / 10 ;
2006-03-23 14:00:00 +03:00
if ( ! m )
m = 1 ;
nr_pages = 0 ;
2014-10-30 21:04:53 +03:00
start = ktime_get ( ) ;
2009-10-29 00:55:33 +03:00
while ( 1 ) {
2010-05-02 01:52:02 +04:00
ret = snapshot_read_next ( snapshot ) ;
2009-10-29 00:55:33 +03:00
if ( ret < = 0 )
break ;
2015-05-19 10:23:23 +03:00
ret = swap_write_page ( handle , data_of ( * snapshot ) , & hb ) ;
2009-10-29 00:55:33 +03:00
if ( ret )
break ;
if ( ! ( nr_pages % m ) )
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Image saving progress: %3d%% \n " ,
nr_pages / m * 10 ) ;
2009-10-29 00:55:33 +03:00
nr_pages + + ;
}
2015-05-19 10:23:23 +03:00
err2 = hib_wait_io ( & hb ) ;
2014-10-30 21:04:53 +03:00
stop = ktime_get ( ) ;
2009-10-29 00:55:33 +03:00
if ( ! ret )
ret = err2 ;
if ( ! ret )
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Image saving done. \n " ) ;
2014-10-30 21:04:53 +03:00
swsusp_show_speed ( start , stop , nr_to_write , " Wrote " ) ;
2009-10-29 00:55:33 +03:00
return ret ;
2006-03-23 14:00:00 +03:00
}
2011-10-14 01:58:07 +04:00
/**
* Structure used for CRC32 .
*/
struct crc_data {
struct task_struct * thr ; /* thread */
atomic_t ready ; /* ready to start flag */
atomic_t stop ; /* ready to stop flag */
unsigned run_threads ; /* nr current threads */
wait_queue_head_t go ; /* start crc update */
wait_queue_head_t done ; /* crc update done */
u32 * crc32 ; /* points to handle's crc32 */
size_t * unc_len [ LZO_THREADS ] ; /* uncompressed lengths */
unsigned char * unc [ LZO_THREADS ] ; /* uncompressed data */
} ;
/**
* CRC32 update function that runs in its own thread .
*/
static int crc32_threadfn ( void * data )
{
struct crc_data * d = data ;
unsigned i ;
while ( 1 ) {
wait_event ( d - > go , atomic_read ( & d - > ready ) | |
kthread_should_stop ( ) ) ;
if ( kthread_should_stop ( ) ) {
d - > thr = NULL ;
atomic_set ( & d - > stop , 1 ) ;
wake_up ( & d - > done ) ;
break ;
}
atomic_set ( & d - > ready , 0 ) ;
for ( i = 0 ; i < d - > run_threads ; i + + )
* d - > crc32 = crc32_le ( * d - > crc32 ,
d - > unc [ i ] , * d - > unc_len [ i ] ) ;
atomic_set ( & d - > stop , 1 ) ;
wake_up ( & d - > done ) ;
}
return 0 ;
}
/**
* Structure used for LZO data compression .
*/
struct cmp_data {
struct task_struct * thr ; /* thread */
atomic_t ready ; /* ready to start flag */
atomic_t stop ; /* ready to stop flag */
int ret ; /* return code */
wait_queue_head_t go ; /* start compression */
wait_queue_head_t done ; /* compression done */
size_t unc_len ; /* uncompressed length */
size_t cmp_len ; /* compressed length */
unsigned char unc [ LZO_UNC_SIZE ] ; /* uncompressed buffer */
unsigned char cmp [ LZO_CMP_SIZE ] ; /* compressed buffer */
unsigned char wrk [ LZO1X_1_MEM_COMPRESS ] ; /* compression workspace */
} ;
/**
* Compression function that runs in its own thread .
*/
static int lzo_compress_threadfn ( void * data )
{
struct cmp_data * d = data ;
while ( 1 ) {
wait_event ( d - > go , atomic_read ( & d - > ready ) | |
kthread_should_stop ( ) ) ;
if ( kthread_should_stop ( ) ) {
d - > thr = NULL ;
d - > ret = - 1 ;
atomic_set ( & d - > stop , 1 ) ;
wake_up ( & d - > done ) ;
break ;
}
atomic_set ( & d - > ready , 0 ) ;
d - > ret = lzo1x_1_compress ( d - > unc , d - > unc_len ,
d - > cmp + LZO_HEADER , & d - > cmp_len ,
d - > wrk ) ;
atomic_set ( & d - > stop , 1 ) ;
wake_up ( & d - > done ) ;
}
return 0 ;
}
2010-09-10 01:06:23 +04:00
/**
* save_image_lzo - Save the suspend image data compressed with LZO .
2014-05-31 14:26:01 +04:00
* @ handle : Swap map handle to use for saving the image .
2010-09-10 01:06:23 +04:00
* @ snapshot : Image to read data from .
* @ nr_to_write : Number of pages to save .
*/
static int save_image_lzo ( struct swap_map_handle * handle ,
struct snapshot_handle * snapshot ,
unsigned int nr_to_write )
{
unsigned int m ;
int ret = 0 ;
int nr_pages ;
int err2 ;
2015-05-19 10:23:23 +03:00
struct hib_bio_batch hb ;
2014-10-30 21:04:53 +03:00
ktime_t start ;
ktime_t stop ;
2011-10-14 01:58:07 +04:00
size_t off ;
unsigned thr , run_threads , nr_threads ;
unsigned char * page = NULL ;
struct cmp_data * data = NULL ;
struct crc_data * crc = NULL ;
2015-05-19 10:23:23 +03:00
hib_init_batch ( & hb ) ;
2011-10-14 01:58:07 +04:00
/*
* We ' ll limit the number of threads for compression to limit memory
* footprint .
*/
nr_threads = num_online_cpus ( ) - 1 ;
nr_threads = clamp_val ( nr_threads , 1 , LZO_THREADS ) ;
2010-09-10 01:06:23 +04:00
2015-11-07 03:28:28 +03:00
page = ( void * ) __get_free_page ( __GFP_RECLAIM | __GFP_HIGH ) ;
2010-09-10 01:06:23 +04:00
if ( ! page ) {
printk ( KERN_ERR " PM: Failed to allocate LZO page \n " ) ;
2011-10-14 01:58:07 +04:00
ret = - ENOMEM ;
goto out_clean ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
data = vmalloc ( sizeof ( * data ) * nr_threads ) ;
if ( ! data ) {
printk ( KERN_ERR " PM: Failed to allocate LZO data \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
for ( thr = 0 ; thr < nr_threads ; thr + + )
memset ( & data [ thr ] , 0 , offsetof ( struct cmp_data , go ) ) ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
crc = kmalloc ( sizeof ( * crc ) , GFP_KERNEL ) ;
if ( ! crc ) {
printk ( KERN_ERR " PM: Failed to allocate crc \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
}
memset ( crc , 0 , offsetof ( struct crc_data , go ) ) ;
/*
* Start the compression threads .
*/
for ( thr = 0 ; thr < nr_threads ; thr + + ) {
init_waitqueue_head ( & data [ thr ] . go ) ;
init_waitqueue_head ( & data [ thr ] . done ) ;
data [ thr ] . thr = kthread_run ( lzo_compress_threadfn ,
& data [ thr ] ,
" image_compress/%u " , thr ) ;
if ( IS_ERR ( data [ thr ] . thr ) ) {
data [ thr ] . thr = NULL ;
printk ( KERN_ERR
" PM: Cannot start compression threads \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
}
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
/*
* Start the CRC32 thread .
*/
init_waitqueue_head ( & crc - > go ) ;
init_waitqueue_head ( & crc - > done ) ;
handle - > crc32 = 0 ;
crc - > crc32 = & handle - > crc32 ;
for ( thr = 0 ; thr < nr_threads ; thr + + ) {
crc - > unc [ thr ] = data [ thr ] . unc ;
crc - > unc_len [ thr ] = & data [ thr ] . unc_len ;
}
crc - > thr = kthread_run ( crc32_threadfn , crc , " image_crc32 " ) ;
if ( IS_ERR ( crc - > thr ) ) {
crc - > thr = NULL ;
printk ( KERN_ERR " PM: Cannot start CRC32 thread \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
2010-09-10 01:06:23 +04:00
}
2012-04-30 00:42:06 +04:00
/*
* Adjust the number of required free pages after all allocations have
* been done . We don ' t want to run out of pages when writing .
*/
handle - > reqd_free_pages = reqd_free_pages ( ) ;
2010-09-10 01:06:23 +04:00
printk ( KERN_INFO
2011-10-14 01:58:07 +04:00
" PM: Using %u thread(s) for compression. \n "
2012-06-22 00:27:24 +04:00
" PM: Compressing and saving image data (%u pages)... \n " ,
2011-10-14 01:58:07 +04:00
nr_threads , nr_to_write ) ;
2012-06-22 00:27:24 +04:00
m = nr_to_write / 10 ;
2010-09-10 01:06:23 +04:00
if ( ! m )
m = 1 ;
nr_pages = 0 ;
2014-10-30 21:04:53 +03:00
start = ktime_get ( ) ;
2010-09-10 01:06:23 +04:00
for ( ; ; ) {
2011-10-14 01:58:07 +04:00
for ( thr = 0 ; thr < nr_threads ; thr + + ) {
for ( off = 0 ; off < LZO_UNC_SIZE ; off + = PAGE_SIZE ) {
ret = snapshot_read_next ( snapshot ) ;
if ( ret < 0 )
goto out_finish ;
if ( ! ret )
break ;
memcpy ( data [ thr ] . unc + off ,
data_of ( * snapshot ) , PAGE_SIZE ) ;
if ( ! ( nr_pages % m ) )
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO
" PM: Image saving progress: "
" %3d%% \n " ,
nr_pages / m * 10 ) ;
2011-10-14 01:58:07 +04:00
nr_pages + + ;
}
if ( ! off )
2010-09-10 01:06:23 +04:00
break ;
2011-10-14 01:58:07 +04:00
data [ thr ] . unc_len = off ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
atomic_set ( & data [ thr ] . ready , 1 ) ;
wake_up ( & data [ thr ] . go ) ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
if ( ! thr )
2010-09-10 01:06:23 +04:00
break ;
2011-10-14 01:58:07 +04:00
crc - > run_threads = thr ;
atomic_set ( & crc - > ready , 1 ) ;
wake_up ( & crc - > go ) ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
for ( run_threads = thr , thr = 0 ; thr < run_threads ; thr + + ) {
wait_event ( data [ thr ] . done ,
atomic_read ( & data [ thr ] . stop ) ) ;
atomic_set ( & data [ thr ] . stop , 0 ) ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
ret = data [ thr ] . ret ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
if ( ret < 0 ) {
printk ( KERN_ERR " PM: LZO compression failed \n " ) ;
goto out_finish ;
}
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
if ( unlikely ( ! data [ thr ] . cmp_len | |
data [ thr ] . cmp_len >
lzo1x_worst_compress ( data [ thr ] . unc_len ) ) ) {
printk ( KERN_ERR
" PM: Invalid LZO compressed length \n " ) ;
ret = - 1 ;
2010-09-10 01:06:23 +04:00
goto out_finish ;
2011-10-14 01:58:07 +04:00
}
* ( size_t * ) data [ thr ] . cmp = data [ thr ] . cmp_len ;
/*
* Given we are writing one page at a time to disk , we
* copy that much from the buffer , although the last
* bit will likely be smaller than full page . This is
* OK - we saved the length of the compressed data , so
* any garbage at the end will be discarded when we
* read it .
*/
for ( off = 0 ;
off < LZO_HEADER + data [ thr ] . cmp_len ;
off + = PAGE_SIZE ) {
memcpy ( page , data [ thr ] . cmp + off , PAGE_SIZE ) ;
2015-05-19 10:23:23 +03:00
ret = swap_write_page ( handle , page , & hb ) ;
2011-10-14 01:58:07 +04:00
if ( ret )
goto out_finish ;
}
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
wait_event ( crc - > done , atomic_read ( & crc - > stop ) ) ;
atomic_set ( & crc - > stop , 0 ) ;
2010-09-10 01:06:23 +04:00
}
out_finish :
2015-05-19 10:23:23 +03:00
err2 = hib_wait_io ( & hb ) ;
2014-10-30 21:04:53 +03:00
stop = ktime_get ( ) ;
2010-09-10 01:06:23 +04:00
if ( ! ret )
ret = err2 ;
2012-06-22 00:27:24 +04:00
if ( ! ret )
printk ( KERN_INFO " PM: Image saving done. \n " ) ;
2014-10-30 21:04:53 +03:00
swsusp_show_speed ( start , stop , nr_to_write , " Wrote " ) ;
2011-10-14 01:58:07 +04:00
out_clean :
if ( crc ) {
if ( crc - > thr )
kthread_stop ( crc - > thr ) ;
kfree ( crc ) ;
}
if ( data ) {
for ( thr = 0 ; thr < nr_threads ; thr + + )
if ( data [ thr ] . thr )
kthread_stop ( data [ thr ] . thr ) ;
vfree ( data ) ;
}
if ( page ) free_page ( ( unsigned long ) page ) ;
2010-09-10 01:06:23 +04:00
return ret ;
}
2006-03-23 14:00:00 +03:00
/**
* enough_swap - Make sure we have enough swap to save the image .
*
* Returns TRUE or FALSE after checking the total amount of swap
* space avaiable from the resume partition .
*/
2010-09-10 01:06:23 +04:00
static int enough_swap ( unsigned int nr_pages , unsigned int flags )
2006-03-23 14:00:00 +03:00
{
unsigned int free_swap = count_swap_pages ( root_swap , 1 ) ;
2010-09-10 01:06:23 +04:00
unsigned int required ;
2006-03-23 14:00:00 +03:00
2007-12-08 04:09:43 +03:00
pr_debug ( " PM: Free swap pages: %u \n " , free_swap ) ;
2010-09-10 01:06:23 +04:00
2012-01-09 08:56:23 +04:00
required = PAGES_FOR_IO + nr_pages ;
2010-09-10 01:06:23 +04:00
return free_swap > required ;
2006-03-23 14:00:00 +03:00
}
/**
* swsusp_write - Write entire image and metadata .
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:47:30 +04:00
* @ flags : flags to pass to the " boot " kernel in the image header
2006-03-23 14:00:00 +03:00
*
* It is important _NOT_ to umount filesystems at this point . We want
* them synced ( in case something goes wrong ) but we DO not want to mark
* filesystem clean : it is not . ( And it does not matter , if we resume
* correctly , we ' ll mark system clean , anyway . )
*/
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:47:30 +04:00
int swsusp_write ( unsigned int flags )
2006-03-23 14:00:00 +03:00
{
struct swap_map_handle handle ;
struct snapshot_handle snapshot ;
struct swsusp_info * header ;
2010-05-02 01:54:02 +04:00
unsigned long pages ;
2006-03-23 14:00:00 +03:00
int error ;
2010-05-02 01:54:02 +04:00
pages = snapshot_get_image_size ( ) ;
error = get_swap_writer ( & handle ) ;
2006-12-07 07:34:10 +03:00
if ( error ) {
2010-05-02 01:54:02 +04:00
printk ( KERN_ERR " PM: Cannot get swap writer \n " ) ;
2006-03-23 14:00:00 +03:00
return error ;
}
2012-01-09 08:56:23 +04:00
if ( flags & SF_NOCOMPRESS_MODE ) {
if ( ! enough_swap ( pages , flags ) ) {
printk ( KERN_ERR " PM: Not enough free swap \n " ) ;
error = - ENOSPC ;
goto out_finish ;
}
2010-05-02 01:54:02 +04:00
}
2006-03-23 14:00:00 +03:00
memset ( & snapshot , 0 , sizeof ( struct snapshot_handle ) ) ;
2010-05-02 01:52:02 +04:00
error = snapshot_read_next ( & snapshot ) ;
2006-12-07 07:34:10 +03:00
if ( error < PAGE_SIZE ) {
if ( error > = 0 )
error = - EFAULT ;
2010-05-02 01:54:02 +04:00
goto out_finish ;
2006-12-07 07:34:10 +03:00
}
2006-03-23 14:00:00 +03:00
header = ( struct swsusp_info * ) data_of ( snapshot ) ;
2010-05-02 01:54:02 +04:00
error = swap_write_page ( & handle , header , NULL ) ;
2010-09-10 01:06:23 +04:00
if ( ! error ) {
error = ( flags & SF_NOCOMPRESS_MODE ) ?
save_image ( & handle , & snapshot , pages - 1 ) :
save_image_lzo ( & handle , & snapshot , pages - 1 ) ;
}
2010-05-02 01:54:02 +04:00
out_finish :
error = swap_writer_finish ( & handle , flags , error ) ;
2006-03-23 14:00:00 +03:00
return error ;
}
/**
* The following functions allow us to read data using a swap map
* in a file - alike way
*/
static void release_swap_reader ( struct swap_map_handle * handle )
{
2011-10-14 01:58:07 +04:00
struct swap_map_page_list * tmp ;
while ( handle - > maps ) {
if ( handle - > maps - > map )
free_page ( ( unsigned long ) handle - > maps - > map ) ;
tmp = handle - > maps ;
handle - > maps = handle - > maps - > next ;
kfree ( tmp ) ;
}
2006-03-23 14:00:00 +03:00
handle - > cur = NULL ;
}
2010-05-02 01:54:02 +04:00
static int get_swap_reader ( struct swap_map_handle * handle ,
unsigned int * flags_p )
2006-03-23 14:00:00 +03:00
{
int error ;
2011-10-14 01:58:07 +04:00
struct swap_map_page_list * tmp , * last ;
sector_t offset ;
2006-03-23 14:00:00 +03:00
2010-05-02 01:54:02 +04:00
* flags_p = swsusp_header - > flags ;
if ( ! swsusp_header - > image ) /* how can this happen? */
2006-03-23 14:00:00 +03:00
return - EINVAL ;
2006-12-07 07:34:10 +03:00
2011-10-14 01:58:07 +04:00
handle - > cur = NULL ;
last = handle - > maps = NULL ;
offset = swsusp_header - > image ;
while ( offset ) {
tmp = kmalloc ( sizeof ( * handle - > maps ) , GFP_KERNEL ) ;
if ( ! tmp ) {
release_swap_reader ( handle ) ;
return - ENOMEM ;
}
memset ( tmp , 0 , sizeof ( * tmp ) ) ;
if ( ! handle - > maps )
handle - > maps = tmp ;
if ( last )
last - > next = tmp ;
last = tmp ;
tmp - > map = ( struct swap_map_page * )
2015-11-07 03:28:28 +03:00
__get_free_page ( __GFP_RECLAIM | __GFP_HIGH ) ;
2011-10-14 01:58:07 +04:00
if ( ! tmp - > map ) {
release_swap_reader ( handle ) ;
return - ENOMEM ;
}
2006-12-07 07:34:10 +03:00
2015-05-19 10:23:23 +03:00
error = hib_submit_io ( READ_SYNC , offset , tmp - > map , NULL ) ;
2011-10-14 01:58:07 +04:00
if ( error ) {
release_swap_reader ( handle ) ;
return error ;
}
offset = tmp - > map - > next_swap ;
2006-03-23 14:00:00 +03:00
}
handle - > k = 0 ;
2011-10-14 01:58:07 +04:00
handle - > cur = handle - > maps - > map ;
2006-03-23 14:00:00 +03:00
return 0 ;
}
2006-09-26 10:32:44 +04:00
static int swap_read_page ( struct swap_map_handle * handle , void * buf ,
2015-05-19 10:23:23 +03:00
struct hib_bio_batch * hb )
2006-03-23 14:00:00 +03:00
{
2006-12-07 07:34:10 +03:00
sector_t offset ;
2006-03-23 14:00:00 +03:00
int error ;
2011-10-14 01:58:07 +04:00
struct swap_map_page_list * tmp ;
2006-03-23 14:00:00 +03:00
if ( ! handle - > cur )
return - EINVAL ;
offset = handle - > cur - > entries [ handle - > k ] ;
if ( ! offset )
return - EFAULT ;
2015-05-19 10:23:23 +03:00
error = hib_submit_io ( READ_SYNC , offset , buf , hb ) ;
2006-03-23 14:00:00 +03:00
if ( error )
return error ;
if ( + + handle - > k > = MAP_PAGE_ENTRIES ) {
handle - > k = 0 ;
2011-10-14 01:58:07 +04:00
free_page ( ( unsigned long ) handle - > maps - > map ) ;
tmp = handle - > maps ;
handle - > maps = handle - > maps - > next ;
kfree ( tmp ) ;
if ( ! handle - > maps )
2006-03-23 14:00:00 +03:00
release_swap_reader ( handle ) ;
2011-10-14 01:58:07 +04:00
else
handle - > cur = handle - > maps - > map ;
2006-03-23 14:00:00 +03:00
}
return error ;
}
2010-05-02 01:54:02 +04:00
static int swap_reader_finish ( struct swap_map_handle * handle )
{
release_swap_reader ( handle ) ;
return 0 ;
}
2006-03-23 14:00:00 +03:00
/**
* load_image - load the image using the swap map handle
* @ handle and the snapshot handle @ snapshot
* ( assume there are @ nr_pages pages to load )
*/
static int load_image ( struct swap_map_handle * handle ,
struct snapshot_handle * snapshot ,
2006-09-26 10:32:44 +04:00
unsigned int nr_to_read )
2006-03-23 14:00:00 +03:00
{
unsigned int m ;
2011-10-14 01:58:07 +04:00
int ret = 0 ;
2014-10-30 21:04:53 +03:00
ktime_t start ;
ktime_t stop ;
2015-05-19 10:23:23 +03:00
struct hib_bio_batch hb ;
2006-09-26 10:32:44 +04:00
int err2 ;
unsigned nr_pages ;
2006-03-23 14:00:00 +03:00
2015-05-19 10:23:23 +03:00
hib_init_batch ( & hb ) ;
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Loading image data pages (%u pages)... \n " ,
2007-12-08 04:09:43 +03:00
nr_to_read ) ;
2012-06-22 00:27:24 +04:00
m = nr_to_read / 10 ;
2006-03-23 14:00:00 +03:00
if ( ! m )
m = 1 ;
nr_pages = 0 ;
2014-10-30 21:04:53 +03:00
start = ktime_get ( ) ;
2006-09-26 10:32:44 +04:00
for ( ; ; ) {
2011-10-14 01:58:07 +04:00
ret = snapshot_write_next ( snapshot ) ;
if ( ret < = 0 )
2006-09-26 10:32:44 +04:00
break ;
2015-05-19 10:23:23 +03:00
ret = swap_read_page ( handle , data_of ( * snapshot ) , & hb ) ;
2011-10-14 01:58:07 +04:00
if ( ret )
2006-09-26 10:32:44 +04:00
break ;
if ( snapshot - > sync_read )
2015-05-19 10:23:23 +03:00
ret = hib_wait_io ( & hb ) ;
2011-10-14 01:58:07 +04:00
if ( ret )
2006-09-26 10:32:44 +04:00
break ;
if ( ! ( nr_pages % m ) )
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Image loading progress: %3d%% \n " ,
nr_pages / m * 10 ) ;
2006-09-26 10:32:44 +04:00
nr_pages + + ;
}
2015-05-19 10:23:23 +03:00
err2 = hib_wait_io ( & hb ) ;
2014-10-30 21:04:53 +03:00
stop = ktime_get ( ) ;
2011-10-14 01:58:07 +04:00
if ( ! ret )
ret = err2 ;
if ( ! ret ) {
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Image loading done. \n " ) ;
[PATCH] swsusp: Improve handling of highmem
Currently swsusp saves the contents of highmem pages by copying them to the
normal zone which is quite inefficient (eg. it requires two normal pages
to be used for saving one highmem page). This may be improved by using
highmem for saving the contents of saveable highmem pages.
Namely, during the suspend phase of the suspend-resume cycle we try to
allocate as many free highmem pages as there are saveable highmem pages.
If there are not enough highmem image pages to store the contents of all of
the saveable highmem pages, some of them will be stored in the "normal"
memory. Next, we allocate as many free "normal" pages as needed to store
the (remaining) image data. We use a memory bitmap to mark the allocated
free pages (ie. highmem as well as "normal" image pages).
Now, we use another memory bitmap to mark all of the saveable pages
(highmem as well as "normal") and the contents of the saveable pages are
copied into the image pages. Then, the second bitmap is used to save the
pfns corresponding to the saveable pages and the first one is used to save
their data.
During the resume phase the pfns of the pages that were saveable during the
suspend are loaded from the image and used to mark the "unsafe" page
frames. Next, we try to allocate as many free highmem page frames as to
load all of the image data that had been in the highmem before the suspend
and we allocate so many free "normal" page frames that the total number of
allocated free pages (highmem and "normal") is equal to the size of the
image. While doing this we have to make sure that there will be some extra
free "normal" and "safe" page frames for two lists of PBEs constructed
later.
Now, the image data are loaded, if possible, into their "original" page
frames. The image data that cannot be written into their "original" page
frames are loaded into "safe" page frames and their "original" kernel
virtual addresses, as well as the addresses of the "safe" pages containing
their copies, are stored in one of two lists of PBEs.
One list of PBEs is for the copies of "normal" suspend pages (ie. "normal"
pages that were saveable during the suspend) and it is used in the same way
as previously (ie. by the architecture-dependent parts of swsusp). The
other list of PBEs is for the copies of highmem suspend pages. The pages
in this list are restored (in a reversible way) right before the
arch-dependent code is called.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-07 07:34:18 +03:00
snapshot_write_finalize ( snapshot ) ;
2006-03-26 13:37:11 +04:00
if ( ! snapshot_image_loaded ( snapshot ) )
2011-10-14 01:58:07 +04:00
ret = - ENODATA ;
2012-06-22 00:27:24 +04:00
}
2014-10-30 21:04:53 +03:00
swsusp_show_speed ( start , stop , nr_to_read , " Read " ) ;
2011-10-14 01:58:07 +04:00
return ret ;
}
/**
* Structure used for LZO data decompression .
*/
struct dec_data {
struct task_struct * thr ; /* thread */
atomic_t ready ; /* ready to start flag */
atomic_t stop ; /* ready to stop flag */
int ret ; /* return code */
wait_queue_head_t go ; /* start decompression */
wait_queue_head_t done ; /* decompression done */
size_t unc_len ; /* uncompressed length */
size_t cmp_len ; /* compressed length */
unsigned char unc [ LZO_UNC_SIZE ] ; /* uncompressed buffer */
unsigned char cmp [ LZO_CMP_SIZE ] ; /* compressed buffer */
} ;
/**
* Deompression function that runs in its own thread .
*/
static int lzo_decompress_threadfn ( void * data )
{
struct dec_data * d = data ;
while ( 1 ) {
wait_event ( d - > go , atomic_read ( & d - > ready ) | |
kthread_should_stop ( ) ) ;
if ( kthread_should_stop ( ) ) {
d - > thr = NULL ;
d - > ret = - 1 ;
atomic_set ( & d - > stop , 1 ) ;
wake_up ( & d - > done ) ;
break ;
}
atomic_set ( & d - > ready , 0 ) ;
d - > unc_len = LZO_UNC_SIZE ;
d - > ret = lzo1x_decompress_safe ( d - > cmp + LZO_HEADER , d - > cmp_len ,
d - > unc , & d - > unc_len ) ;
atomic_set ( & d - > stop , 1 ) ;
wake_up ( & d - > done ) ;
}
return 0 ;
2006-03-23 14:00:00 +03:00
}
2010-09-10 01:06:23 +04:00
/**
* load_image_lzo - Load compressed image data and decompress them with LZO .
* @ handle : Swap map handle to use for loading data .
* @ snapshot : Image to copy uncompressed data into .
* @ nr_to_read : Number of pages to load .
*/
static int load_image_lzo ( struct swap_map_handle * handle ,
struct snapshot_handle * snapshot ,
unsigned int nr_to_read )
{
unsigned int m ;
2011-10-14 01:58:07 +04:00
int ret = 0 ;
int eof = 0 ;
2015-05-19 10:23:23 +03:00
struct hib_bio_batch hb ;
2014-10-30 21:04:53 +03:00
ktime_t start ;
ktime_t stop ;
2010-09-10 01:06:23 +04:00
unsigned nr_pages ;
2011-10-14 01:58:07 +04:00
size_t off ;
unsigned i , thr , run_threads , nr_threads ;
unsigned ring = 0 , pg = 0 , ring_size = 0 ,
have = 0 , want , need , asked = 0 ;
2012-04-30 00:42:06 +04:00
unsigned long read_pages = 0 ;
2011-10-14 01:58:07 +04:00
unsigned char * * page = NULL ;
struct dec_data * data = NULL ;
struct crc_data * crc = NULL ;
2015-05-19 10:23:23 +03:00
hib_init_batch ( & hb ) ;
2011-10-14 01:58:07 +04:00
/*
* We ' ll limit the number of threads for decompression to limit memory
* footprint .
*/
nr_threads = num_online_cpus ( ) - 1 ;
nr_threads = clamp_val ( nr_threads , 1 , LZO_THREADS ) ;
2012-04-30 00:42:06 +04:00
page = vmalloc ( sizeof ( * page ) * LZO_MAX_RD_PAGES ) ;
2011-10-14 01:58:07 +04:00
if ( ! page ) {
printk ( KERN_ERR " PM: Failed to allocate LZO page \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
}
2010-11-26 01:41:39 +03:00
2011-10-14 01:58:07 +04:00
data = vmalloc ( sizeof ( * data ) * nr_threads ) ;
if ( ! data ) {
printk ( KERN_ERR " PM: Failed to allocate LZO data \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
}
for ( thr = 0 ; thr < nr_threads ; thr + + )
memset ( & data [ thr ] , 0 , offsetof ( struct dec_data , go ) ) ;
2010-11-26 01:41:39 +03:00
2011-10-14 01:58:07 +04:00
crc = kmalloc ( sizeof ( * crc ) , GFP_KERNEL ) ;
if ( ! crc ) {
printk ( KERN_ERR " PM: Failed to allocate crc \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
}
memset ( crc , 0 , offsetof ( struct crc_data , go ) ) ;
/*
* Start the decompression threads .
*/
for ( thr = 0 ; thr < nr_threads ; thr + + ) {
init_waitqueue_head ( & data [ thr ] . go ) ;
init_waitqueue_head ( & data [ thr ] . done ) ;
data [ thr ] . thr = kthread_run ( lzo_decompress_threadfn ,
& data [ thr ] ,
" image_decompress/%u " , thr ) ;
if ( IS_ERR ( data [ thr ] . thr ) ) {
data [ thr ] . thr = NULL ;
printk ( KERN_ERR
" PM: Cannot start decompression threads \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
2010-11-26 01:41:39 +03:00
}
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
/*
* Start the CRC32 thread .
*/
init_waitqueue_head ( & crc - > go ) ;
init_waitqueue_head ( & crc - > done ) ;
handle - > crc32 = 0 ;
crc - > crc32 = & handle - > crc32 ;
for ( thr = 0 ; thr < nr_threads ; thr + + ) {
crc - > unc [ thr ] = data [ thr ] . unc ;
crc - > unc_len [ thr ] = & data [ thr ] . unc_len ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
crc - > thr = kthread_run ( crc32_threadfn , crc , " image_crc32 " ) ;
if ( IS_ERR ( crc - > thr ) ) {
crc - > thr = NULL ;
printk ( KERN_ERR " PM: Cannot start CRC32 thread \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
}
2010-11-26 01:41:39 +03:00
2011-10-14 01:58:07 +04:00
/*
2012-04-30 00:42:06 +04:00
* Set the number of pages for read buffering .
* This is complete guesswork , because we ' ll only know the real
* picture once prepare_image ( ) is called , which is much later on
* during the image load phase . We ' ll assume the worst case and
* say that none of the image pages are from high memory .
2011-10-14 01:58:07 +04:00
*/
2012-04-30 00:42:06 +04:00
if ( low_free_pages ( ) > snapshot_get_image_size ( ) )
read_pages = ( low_free_pages ( ) - snapshot_get_image_size ( ) ) / 2 ;
read_pages = clamp_val ( read_pages , LZO_MIN_RD_PAGES , LZO_MAX_RD_PAGES ) ;
2010-11-26 01:41:39 +03:00
2011-10-14 01:58:07 +04:00
for ( i = 0 ; i < read_pages ; i + + ) {
page [ i ] = ( void * ) __get_free_page ( i < LZO_CMP_PAGES ?
2015-11-07 03:28:28 +03:00
__GFP_RECLAIM | __GFP_HIGH :
__GFP_RECLAIM | __GFP_NOWARN |
__GFP_NORETRY ) ;
2012-04-30 00:42:06 +04:00
2011-10-14 01:58:07 +04:00
if ( ! page [ i ] ) {
if ( i < LZO_CMP_PAGES ) {
ring_size = i ;
printk ( KERN_ERR
" PM: Failed to allocate LZO pages \n " ) ;
ret = - ENOMEM ;
goto out_clean ;
} else {
break ;
}
}
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
want = ring_size = i ;
2010-09-10 01:06:23 +04:00
printk ( KERN_INFO
2011-10-14 01:58:07 +04:00
" PM: Using %u thread(s) for decompression. \n "
2012-06-22 00:27:24 +04:00
" PM: Loading and decompressing image data (%u pages)... \n " ,
2011-10-14 01:58:07 +04:00
nr_threads , nr_to_read ) ;
2012-06-22 00:27:24 +04:00
m = nr_to_read / 10 ;
2010-09-10 01:06:23 +04:00
if ( ! m )
m = 1 ;
nr_pages = 0 ;
2014-10-30 21:04:53 +03:00
start = ktime_get ( ) ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
ret = snapshot_write_next ( snapshot ) ;
if ( ret < = 0 )
2010-09-10 01:06:23 +04:00
goto out_finish ;
2011-10-14 01:58:07 +04:00
for ( ; ; ) {
for ( i = 0 ; ! eof & & i < want ; i + + ) {
2015-05-19 10:23:23 +03:00
ret = swap_read_page ( handle , page [ ring ] , & hb ) ;
2011-10-14 01:58:07 +04:00
if ( ret ) {
/*
* On real read error , finish . On end of data ,
* set EOF flag and just exit the read loop .
*/
if ( handle - > cur & &
handle - > cur - > entries [ handle - > k ] ) {
goto out_finish ;
} else {
eof = 1 ;
break ;
}
}
if ( + + ring > = ring_size )
ring = 0 ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
asked + = i ;
want - = i ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
/*
* We are out of data , wait for some more .
*/
if ( ! have ) {
if ( ! asked )
break ;
2015-05-19 10:23:23 +03:00
ret = hib_wait_io ( & hb ) ;
2011-10-14 01:58:07 +04:00
if ( ret )
2010-09-10 01:06:23 +04:00
goto out_finish ;
2011-10-14 01:58:07 +04:00
have + = asked ;
asked = 0 ;
if ( eof )
eof = 2 ;
2010-11-26 01:41:39 +03:00
}
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
if ( crc - > run_threads ) {
wait_event ( crc - > done , atomic_read ( & crc - > stop ) ) ;
atomic_set ( & crc - > stop , 0 ) ;
crc - > run_threads = 0 ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
for ( thr = 0 ; have & & thr < nr_threads ; thr + + ) {
data [ thr ] . cmp_len = * ( size_t * ) page [ pg ] ;
if ( unlikely ( ! data [ thr ] . cmp_len | |
data [ thr ] . cmp_len >
lzo1x_worst_compress ( LZO_UNC_SIZE ) ) ) {
printk ( KERN_ERR
" PM: Invalid LZO compressed length \n " ) ;
ret = - 1 ;
goto out_finish ;
}
need = DIV_ROUND_UP ( data [ thr ] . cmp_len + LZO_HEADER ,
PAGE_SIZE ) ;
if ( need > have ) {
if ( eof > 1 ) {
ret = - 1 ;
goto out_finish ;
}
break ;
}
for ( off = 0 ;
off < LZO_HEADER + data [ thr ] . cmp_len ;
off + = PAGE_SIZE ) {
memcpy ( data [ thr ] . cmp + off ,
page [ pg ] , PAGE_SIZE ) ;
have - - ;
want + + ;
if ( + + pg > = ring_size )
pg = 0 ;
}
atomic_set ( & data [ thr ] . ready , 1 ) ;
wake_up ( & data [ thr ] . go ) ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
/*
* Wait for more data while we are decompressing .
*/
if ( have < LZO_CMP_PAGES & & asked ) {
2015-05-19 10:23:23 +03:00
ret = hib_wait_io ( & hb ) ;
2011-10-14 01:58:07 +04:00
if ( ret )
goto out_finish ;
have + = asked ;
asked = 0 ;
if ( eof )
eof = 2 ;
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
for ( run_threads = thr , thr = 0 ; thr < run_threads ; thr + + ) {
wait_event ( data [ thr ] . done ,
atomic_read ( & data [ thr ] . stop ) ) ;
atomic_set ( & data [ thr ] . stop , 0 ) ;
ret = data [ thr ] . ret ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
if ( ret < 0 ) {
printk ( KERN_ERR
" PM: LZO decompression failed \n " ) ;
goto out_finish ;
}
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
if ( unlikely ( ! data [ thr ] . unc_len | |
data [ thr ] . unc_len > LZO_UNC_SIZE | |
data [ thr ] . unc_len & ( PAGE_SIZE - 1 ) ) ) {
printk ( KERN_ERR
" PM: Invalid LZO uncompressed length \n " ) ;
ret = - 1 ;
2010-09-10 01:06:23 +04:00
goto out_finish ;
2011-10-14 01:58:07 +04:00
}
for ( off = 0 ;
off < data [ thr ] . unc_len ; off + = PAGE_SIZE ) {
memcpy ( data_of ( * snapshot ) ,
data [ thr ] . unc + off , PAGE_SIZE ) ;
if ( ! ( nr_pages % m ) )
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO
" PM: Image loading progress: "
" %3d%% \n " ,
nr_pages / m * 10 ) ;
2011-10-14 01:58:07 +04:00
nr_pages + + ;
ret = snapshot_write_next ( snapshot ) ;
if ( ret < = 0 ) {
crc - > run_threads = thr + 1 ;
atomic_set ( & crc - > ready , 1 ) ;
wake_up ( & crc - > go ) ;
goto out_finish ;
}
}
2010-09-10 01:06:23 +04:00
}
2011-10-14 01:58:07 +04:00
crc - > run_threads = thr ;
atomic_set ( & crc - > ready , 1 ) ;
wake_up ( & crc - > go ) ;
2010-09-10 01:06:23 +04:00
}
out_finish :
2011-10-14 01:58:07 +04:00
if ( crc - > run_threads ) {
wait_event ( crc - > done , atomic_read ( & crc - > stop ) ) ;
atomic_set ( & crc - > stop , 0 ) ;
}
2014-10-30 21:04:53 +03:00
stop = ktime_get ( ) ;
2011-10-14 01:58:07 +04:00
if ( ! ret ) {
2012-06-22 00:27:24 +04:00
printk ( KERN_INFO " PM: Image loading done. \n " ) ;
2010-09-10 01:06:23 +04:00
snapshot_write_finalize ( snapshot ) ;
if ( ! snapshot_image_loaded ( snapshot ) )
2011-10-14 01:58:07 +04:00
ret = - ENODATA ;
if ( ! ret ) {
if ( swsusp_header - > flags & SF_CRC32_MODE ) {
if ( handle - > crc32 ! = swsusp_header - > crc32 ) {
printk ( KERN_ERR
" PM: Invalid image CRC32! \n " ) ;
ret = - ENODATA ;
}
}
}
2012-06-22 00:27:24 +04:00
}
2014-10-30 21:04:53 +03:00
swsusp_show_speed ( start , stop , nr_to_read , " Read " ) ;
2011-10-14 01:58:07 +04:00
out_clean :
for ( i = 0 ; i < ring_size ; i + + )
2010-11-26 01:41:39 +03:00
free_page ( ( unsigned long ) page [ i ] ) ;
2011-10-14 01:58:07 +04:00
if ( crc ) {
if ( crc - > thr )
kthread_stop ( crc - > thr ) ;
kfree ( crc ) ;
}
if ( data ) {
for ( thr = 0 ; thr < nr_threads ; thr + + )
if ( data [ thr ] . thr )
kthread_stop ( data [ thr ] . thr ) ;
vfree ( data ) ;
}
2014-11-16 16:18:28 +03:00
vfree ( page ) ;
2010-09-10 01:06:23 +04:00
2011-10-14 01:58:07 +04:00
return ret ;
2010-09-10 01:06:23 +04:00
}
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:47:30 +04:00
/**
* swsusp_read - read the hibernation image .
* @ flags_p : flags passed by the " frozen " kernel in the image header should
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 22:38:34 +03:00
* be written into this memory location
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:47:30 +04:00
*/
int swsusp_read ( unsigned int * flags_p )
2006-03-23 14:00:00 +03:00
{
int error ;
struct swap_map_handle handle ;
struct snapshot_handle snapshot ;
struct swsusp_info * header ;
memset ( & snapshot , 0 , sizeof ( struct snapshot_handle ) ) ;
2010-05-02 01:52:02 +04:00
error = snapshot_write_next ( & snapshot ) ;
2006-03-23 14:00:00 +03:00
if ( error < PAGE_SIZE )
return error < 0 ? error : - EFAULT ;
header = ( struct swsusp_info * ) data_of ( snapshot ) ;
2010-05-02 01:54:02 +04:00
error = get_swap_reader ( & handle , flags_p ) ;
if ( error )
goto end ;
2006-03-23 14:00:00 +03:00
if ( ! error )
2006-09-26 10:32:44 +04:00
error = swap_read_page ( & handle , header , NULL ) ;
2010-09-10 01:06:23 +04:00
if ( ! error ) {
error = ( * flags_p & SF_NOCOMPRESS_MODE ) ?
load_image ( & handle , & snapshot , header - > pages - 1 ) :
load_image_lzo ( & handle , & snapshot , header - > pages - 1 ) ;
}
2010-05-02 01:54:02 +04:00
swap_reader_finish ( & handle ) ;
end :
2006-03-23 14:00:00 +03:00
if ( ! error )
2007-12-08 04:09:43 +03:00
pr_debug ( " PM: Image successfully loaded \n " ) ;
2006-03-23 14:00:00 +03:00
else
2007-12-08 04:09:43 +03:00
pr_debug ( " PM: Error %d resuming \n " , error ) ;
2006-03-23 14:00:00 +03:00
return error ;
}
/**
* swsusp_check - Check for swsusp signature in the resume device
*/
int swsusp_check ( void )
{
int error ;
2010-11-13 13:55:18 +03:00
hib_resume_bdev = blkdev_get_by_dev ( swsusp_resume_device ,
FMODE_READ , NULL ) ;
2010-05-02 01:52:34 +04:00
if ( ! IS_ERR ( hib_resume_bdev ) ) {
set_blocksize ( hib_resume_bdev , PAGE_SIZE ) ;
2010-10-27 01:22:27 +04:00
clear_page ( swsusp_header ) ;
2015-05-19 10:23:23 +03:00
error = hib_submit_io ( READ_SYNC , swsusp_resume_block ,
2007-05-02 21:27:07 +04:00
swsusp_header , NULL ) ;
2006-12-07 07:34:12 +03:00
if ( error )
2009-10-08 00:37:35 +04:00
goto put ;
2006-12-07 07:34:12 +03:00
2010-10-05 00:08:12 +04:00
if ( ! memcmp ( HIBERNATE_SIG , swsusp_header - > sig , 10 ) ) {
2007-05-02 21:27:07 +04:00
memcpy ( swsusp_header - > sig , swsusp_header - > orig_sig , 10 ) ;
2006-03-23 14:00:00 +03:00
/* Reset swap signature now */
2015-05-19 10:23:23 +03:00
error = hib_submit_io ( WRITE_SYNC , swsusp_resume_block ,
2007-05-02 21:27:07 +04:00
swsusp_header , NULL ) ;
2006-03-23 14:00:00 +03:00
} else {
2009-10-08 00:37:35 +04:00
error = - EINVAL ;
2006-03-23 14:00:00 +03:00
}
2009-10-08 00:37:35 +04:00
put :
2006-03-23 14:00:00 +03:00
if ( error )
2010-05-02 01:52:34 +04:00
blkdev_put ( hib_resume_bdev , FMODE_READ ) ;
2006-03-23 14:00:00 +03:00
else
2010-09-29 01:31:22 +04:00
pr_debug ( " PM: Image signature found, resuming \n " ) ;
2006-03-23 14:00:00 +03:00
} else {
2010-05-02 01:52:34 +04:00
error = PTR_ERR ( hib_resume_bdev ) ;
2006-03-23 14:00:00 +03:00
}
if ( error )
2010-09-29 01:31:22 +04:00
pr_debug ( " PM: Image not found (code %d) \n " , error ) ;
2006-03-23 14:00:00 +03:00
return error ;
}
/**
* swsusp_close - close swap device .
*/
2007-10-08 21:21:10 +04:00
void swsusp_close ( fmode_t mode )
2006-03-23 14:00:00 +03:00
{
2010-05-02 01:52:34 +04:00
if ( IS_ERR ( hib_resume_bdev ) ) {
2007-12-08 04:09:43 +03:00
pr_debug ( " PM: Image device not initialised \n " ) ;
2006-03-23 14:00:00 +03:00
return ;
}
2010-05-02 01:52:34 +04:00
blkdev_put ( hib_resume_bdev , mode ) ;
2006-03-23 14:00:00 +03:00
}
2007-05-02 21:27:07 +04:00
2012-06-16 02:09:58 +04:00
/**
* swsusp_unmark - Unmark swsusp signature in the resume device
*/
# ifdef CONFIG_SUSPEND
int swsusp_unmark ( void )
{
int error ;
2015-05-19 10:23:23 +03:00
hib_submit_io ( READ_SYNC , swsusp_resume_block , swsusp_header , NULL ) ;
2012-06-16 02:09:58 +04:00
if ( ! memcmp ( HIBERNATE_SIG , swsusp_header - > sig , 10 ) ) {
memcpy ( swsusp_header - > sig , swsusp_header - > orig_sig , 10 ) ;
2015-05-19 10:23:23 +03:00
error = hib_submit_io ( WRITE_SYNC , swsusp_resume_block ,
2012-06-16 02:09:58 +04:00
swsusp_header , NULL ) ;
} else {
printk ( KERN_ERR " PM: Cannot find swsusp signature! \n " ) ;
error = - ENODEV ;
}
/*
* We just returned from suspend , we don ' t need the image any more .
*/
free_all_swap_pages ( root_swap ) ;
return error ;
}
# endif
2007-05-02 21:27:07 +04:00
static int swsusp_header_init ( void )
{
swsusp_header = ( struct swsusp_header * ) __get_free_page ( GFP_KERNEL ) ;
if ( ! swsusp_header )
panic ( " Could not allocate memory for swsusp_header \n " ) ;
return 0 ;
}
core_initcall ( swsusp_header_init ) ;