2019-06-01 11:08:42 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2006-03-23 14:00:03 +03:00
/*
* linux / kernel / power / user . c
*
* This file provides the user space interface for software suspend / resume .
*
* Copyright ( C ) 2006 Rafael J . Wysocki < rjw @ sisk . pl >
*/
# include <linux/suspend.h>
2006-12-07 07:34:06 +03:00
# include <linux/reboot.h>
2006-03-23 14:00:03 +03:00
# include <linux/string.h>
# include <linux/device.h>
# include <linux/miscdevice.h>
# include <linux/mm.h>
# include <linux/swap.h>
# include <linux/swapops.h>
# include <linux/pm.h>
# include <linux/fs.h>
2011-12-28 01:54:52 +04:00
# include <linux/compat.h>
2006-10-11 12:20:45 +04:00
# include <linux/console.h>
2006-09-26 10:32:48 +04:00
# include <linux/cpu.h>
2006-12-07 07:34:23 +03:00
# include <linux/freezer.h>
2006-03-23 14:00:03 +03:00
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2006-03-23 14:00:03 +03:00
# include "power.h"
2022-07-15 08:49:58 +03:00
static bool need_wait ;
2007-10-26 03:01:10 +04:00
2006-03-23 14:00:03 +03:00
static struct snapshot_data {
struct snapshot_handle handle ;
int swap ;
int mode ;
2013-10-19 00:20:40 +04:00
bool frozen ;
bool ready ;
bool platform_support ;
2013-09-30 21:40:56 +04:00
bool free_bitmaps ;
2020-09-21 10:19:55 +03:00
dev_t dev ;
2006-03-23 14:00:03 +03:00
} snapshot_state ;
2020-09-21 10:19:55 +03:00
int is_hibernate_resume_dev ( dev_t dev )
2020-05-19 21:14:10 +03:00
{
2020-09-21 10:19:55 +03:00
return hibernation_available ( ) & & snapshot_state . dev = = dev ;
2020-05-19 21:14:10 +03:00
}
2006-03-23 14:00:03 +03:00
static int snapshot_open ( struct inode * inode , struct file * filp )
{
struct snapshot_data * data ;
2022-08-22 14:18:17 +03:00
unsigned int sleep_flags ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int error ;
2006-03-23 14:00:03 +03:00
2014-06-14 00:30:35 +04:00
if ( ! hibernation_available ( ) )
return - EPERM ;
2022-08-22 14:18:17 +03:00
sleep_flags = lock_system_sleep ( ) ;
2008-06-12 00:09:45 +04:00
2020-05-07 10:19:52 +03:00
if ( ! hibernate_acquire ( ) ) {
2008-06-12 00:09:45 +04:00
error = - EBUSY ;
goto Unlock ;
}
2006-03-23 14:00:03 +03:00
2007-05-07 01:50:44 +04:00
if ( ( filp - > f_flags & O_ACCMODE ) = = O_RDWR ) {
2020-05-07 10:19:52 +03:00
hibernate_release ( ) ;
2008-06-12 00:09:45 +04:00
error = - ENOSYS ;
goto Unlock ;
2007-05-07 01:50:44 +04:00
}
2006-03-23 14:00:03 +03:00
nonseekable_open ( inode , filp ) ;
data = & snapshot_state ;
filp - > private_data = data ;
memset ( & data - > handle , 0 , sizeof ( struct snapshot_handle ) ) ;
if ( ( filp - > f_flags & O_ACCMODE ) = = O_RDONLY ) {
2009-04-12 22:06:56 +04:00
/* Hibernating. The image device should be accessible. */
2020-09-21 10:19:56 +03:00
data - > swap = swap_type_of ( swsusp_resume_device , 0 ) ;
2006-03-23 14:00:03 +03:00
data - > mode = O_RDONLY ;
2013-11-15 02:26:58 +04:00
data - > free_bitmaps = false ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
error = pm_notifier_call_chain_robust ( PM_HIBERNATION_PREPARE , PM_POST_HIBERNATION ) ;
2006-03-23 14:00:03 +03:00
} else {
2009-04-12 22:06:56 +04:00
/*
* Resuming . We may need to wait for the image device to
* appear .
*/
2022-07-15 08:49:58 +03:00
need_wait = true ;
2009-04-12 22:06:56 +04:00
2006-03-23 14:00:03 +03:00
data - > swap = - 1 ;
data - > mode = O_WRONLY ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
error = pm_notifier_call_chain_robust ( PM_RESTORE_PREPARE , PM_POST_RESTORE ) ;
2013-09-30 21:40:56 +04:00
if ( ! error ) {
error = create_basic_memory_bitmaps ( ) ;
data - > free_bitmaps = ! error ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
}
2007-11-20 01:38:25 +03:00
}
2013-08-30 16:19:38 +04:00
if ( error )
2020-05-07 10:19:52 +03:00
hibernate_release ( ) ;
2013-08-30 16:19:38 +04:00
2013-10-19 00:20:40 +04:00
data - > frozen = false ;
data - > ready = false ;
data - > platform_support = false ;
2020-09-21 10:19:55 +03:00
data - > dev = 0 ;
2006-03-23 14:00:03 +03:00
2008-06-12 00:09:45 +04:00
Unlock :
2022-08-22 14:18:17 +03:00
unlock_system_sleep ( sleep_flags ) ;
2008-06-12 00:09:45 +04:00
return error ;
2006-03-23 14:00:03 +03:00
}
static int snapshot_release ( struct inode * inode , struct file * filp )
{
struct snapshot_data * data ;
2022-08-22 14:18:17 +03:00
unsigned int sleep_flags ;
2006-03-23 14:00:03 +03:00
2022-08-22 14:18:17 +03:00
sleep_flags = lock_system_sleep ( ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
swsusp_free ( ) ;
data = filp - > private_data ;
2020-09-21 10:19:55 +03:00
data - > dev = 0 ;
2007-05-07 01:50:47 +04:00
free_all_swap_pages ( data - > swap ) ;
2011-05-10 23:10:01 +04:00
if ( data - > frozen ) {
pm_restore_gfp_mask ( ) ;
2013-08-30 16:19:38 +04:00
free_basic_memory_bitmaps ( ) ;
2006-03-23 14:00:03 +03:00
thaw_processes ( ) ;
2013-09-30 21:40:56 +04:00
} else if ( data - > free_bitmaps ) {
free_basic_memory_bitmaps ( ) ;
2011-05-10 23:10:01 +04:00
}
2010-12-10 02:16:39 +03:00
pm_notifier_call_chain ( data - > mode = = O_RDONLY ?
2007-11-20 01:38:25 +03:00
PM_POST_HIBERNATION : PM_POST_RESTORE ) ;
2020-05-07 10:19:52 +03:00
hibernate_release ( ) ;
2008-06-12 00:09:45 +04:00
2022-08-22 14:18:17 +03:00
unlock_system_sleep ( sleep_flags ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
return 0 ;
}
static ssize_t snapshot_read ( struct file * filp , char __user * buf ,
size_t count , loff_t * offp )
{
2022-08-22 14:18:17 +03:00
loff_t pg_offp = * offp & ~ PAGE_MASK ;
2006-03-23 14:00:03 +03:00
struct snapshot_data * data ;
2022-08-22 14:18:17 +03:00
unsigned int sleep_flags ;
2006-03-23 14:00:03 +03:00
ssize_t res ;
2022-08-22 14:18:17 +03:00
sleep_flags = lock_system_sleep ( ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
data = filp - > private_data ;
2008-06-12 00:09:45 +04:00
if ( ! data - > ready ) {
res = - ENODATA ;
goto Unlock ;
}
2010-05-02 01:52:02 +04:00
if ( ! pg_offp ) { /* on page boundary? */
res = snapshot_read_next ( & data - > handle ) ;
if ( res < = 0 )
goto Unlock ;
} else {
res = PAGE_SIZE - pg_offp ;
2006-03-23 14:00:03 +03:00
}
2008-06-12 00:09:45 +04:00
2010-05-02 01:52:02 +04:00
res = simple_read_from_buffer ( buf , count , & pg_offp ,
data_of ( data - > handle ) , res ) ;
if ( res > 0 )
* offp + = res ;
2008-06-12 00:09:45 +04:00
Unlock :
2022-08-22 14:18:17 +03:00
unlock_system_sleep ( sleep_flags ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
return res ;
}
static ssize_t snapshot_write ( struct file * filp , const char __user * buf ,
size_t count , loff_t * offp )
{
2022-08-22 14:18:17 +03:00
loff_t pg_offp = * offp & ~ PAGE_MASK ;
2006-03-23 14:00:03 +03:00
struct snapshot_data * data ;
2022-08-22 14:18:17 +03:00
unsigned long sleep_flags ;
2006-03-23 14:00:03 +03:00
ssize_t res ;
2022-07-15 08:49:58 +03:00
if ( need_wait ) {
wait_for_device_probe ( ) ;
need_wait = false ;
}
2022-08-22 14:18:17 +03:00
sleep_flags = lock_system_sleep ( ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
data = filp - > private_data ;
2010-05-02 01:52:02 +04:00
if ( ! pg_offp ) {
res = snapshot_write_next ( & data - > handle ) ;
if ( res < = 0 )
goto unlock ;
} else {
2021-10-29 22:24:22 +03:00
res = PAGE_SIZE ;
2006-03-23 14:00:03 +03:00
}
2008-06-12 00:09:45 +04:00
2018-05-26 03:59:36 +03:00
if ( ! data_of ( data - > handle ) ) {
res = - EINVAL ;
goto unlock ;
}
2010-05-02 01:52:02 +04:00
res = simple_write_to_buffer ( data_of ( data - > handle ) , res , & pg_offp ,
buf , count ) ;
if ( res > 0 )
* offp + = res ;
unlock :
2022-08-22 14:18:17 +03:00
unlock_system_sleep ( sleep_flags ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
return res ;
}
2020-04-06 14:58:35 +03:00
struct compat_resume_swap_area {
compat_loff_t offset ;
u32 dev ;
} __packed ;
2020-04-06 14:58:34 +03:00
static int snapshot_set_swap_area ( struct snapshot_data * data ,
void __user * argp )
{
sector_t offset ;
dev_t swdev ;
if ( swsusp_swap_in_use ( ) )
return - EPERM ;
2020-04-06 14:58:35 +03:00
if ( in_compat_syscall ( ) ) {
struct compat_resume_swap_area swap_area ;
if ( copy_from_user ( & swap_area , argp , sizeof ( swap_area ) ) )
return - EFAULT ;
swdev = new_decode_dev ( swap_area . dev ) ;
offset = swap_area . offset ;
} else {
struct resume_swap_area swap_area ;
if ( copy_from_user ( & swap_area , argp , sizeof ( swap_area ) ) )
return - EFAULT ;
swdev = new_decode_dev ( swap_area . dev ) ;
offset = swap_area . offset ;
}
2020-04-06 14:58:34 +03:00
/*
* User space encodes device types as two - byte values ,
* so we need to recode them
*/
2020-09-21 10:19:56 +03:00
data - > swap = swap_type_of ( swdev , offset ) ;
2020-04-06 14:58:34 +03:00
if ( data - > swap < 0 )
2020-09-21 10:19:56 +03:00
return swdev ? - ENODEV : - EINVAL ;
data - > dev = swdev ;
2020-04-06 14:58:34 +03:00
return 0 ;
}
2008-06-12 00:07:52 +04:00
static long snapshot_ioctl ( struct file * filp , unsigned int cmd ,
unsigned long arg )
2006-03-23 14:00:03 +03:00
{
int error = 0 ;
struct snapshot_data * data ;
2007-10-26 02:59:31 +04:00
loff_t size ;
2006-12-07 07:34:10 +03:00
sector_t offset ;
2006-03-23 14:00:03 +03:00
2022-07-15 08:49:58 +03:00
if ( need_wait ) {
wait_for_device_probe ( ) ;
need_wait = false ;
}
2006-03-23 14:00:03 +03:00
if ( _IOC_TYPE ( cmd ) ! = SNAPSHOT_IOC_MAGIC )
return - ENOTTY ;
if ( _IOC_NR ( cmd ) > SNAPSHOT_IOC_MAXNR )
return - ENOTTY ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2018-07-31 11:51:32 +03:00
if ( ! mutex_trylock ( & system_transition_mutex ) )
2008-06-12 00:09:45 +04:00
return - EBUSY ;
2006-03-23 14:00:03 +03:00
2013-08-30 16:19:46 +04:00
lock_device_hotplug ( ) ;
2008-06-12 00:09:45 +04:00
data = filp - > private_data ;
2008-06-12 00:07:52 +04:00
2006-03-23 14:00:03 +03:00
switch ( cmd ) {
case SNAPSHOT_FREEZE :
if ( data - > frozen )
break ;
2008-10-16 09:01:21 +04:00
2019-02-25 15:36:41 +03:00
ksys_sync_helper ( ) ;
2007-11-20 01:38:25 +03:00
2008-10-16 09:01:21 +04:00
error = freeze_processes ( ) ;
2013-08-30 16:19:38 +04:00
if ( error )
break ;
error = create_basic_memory_bitmaps ( ) ;
if ( error )
thaw_processes ( ) ;
else
2013-10-19 00:20:40 +04:00
data - > frozen = true ;
2013-08-30 16:19:38 +04:00
2006-03-23 14:00:03 +03:00
break ;
case SNAPSHOT_UNFREEZE :
2007-06-16 21:16:03 +04:00
if ( ! data - > frozen | | data - > ready )
2006-03-23 14:00:03 +03:00
break ;
2010-12-04 00:57:45 +03:00
pm_restore_gfp_mask ( ) ;
2013-08-30 16:19:38 +04:00
free_basic_memory_bitmaps ( ) ;
2013-09-30 21:40:56 +04:00
data - > free_bitmaps = false ;
2006-03-23 14:00:03 +03:00
thaw_processes ( ) ;
2013-10-19 00:20:40 +04:00
data - > frozen = false ;
2006-03-23 14:00:03 +03:00
break ;
2010-01-28 01:47:50 +03:00
case SNAPSHOT_CREATE_IMAGE :
2006-03-23 14:00:03 +03:00
if ( data - > mode ! = O_RDONLY | | ! data - > frozen | | data - > ready ) {
error = - EPERM ;
break ;
}
2010-12-04 00:57:45 +03:00
pm_restore_gfp_mask ( ) ;
2007-10-26 03:01:10 +04:00
error = hibernation_snapshot ( data - > platform_support ) ;
2012-02-05 01:26:38 +04:00
if ( ! error ) {
2007-10-26 03:03:33 +04:00
error = put_user ( in_suspend , ( int __user * ) arg ) ;
2012-02-05 02:39:56 +04:00
data - > ready = ! freezer_test_done & & ! error ;
freezer_test_done = false ;
2011-12-02 01:33:10 +04:00
}
2006-03-23 14:00:03 +03:00
break ;
case SNAPSHOT_ATOMIC_RESTORE :
2024-02-18 11:40:58 +03:00
error = snapshot_write_finalize ( & data - > handle ) ;
if ( error )
break ;
2006-03-23 14:00:03 +03:00
if ( data - > mode ! = O_WRONLY | | ! data - > frozen | |
! snapshot_image_loaded ( & data - > handle ) ) {
error = - EPERM ;
break ;
}
2007-10-26 03:01:10 +04:00
error = hibernation_restore ( data - > platform_support ) ;
2006-03-23 14:00:03 +03:00
break ;
case SNAPSHOT_FREE :
swsusp_free ( ) ;
memset ( & data - > handle , 0 , sizeof ( struct snapshot_handle ) ) ;
2013-10-19 00:20:40 +04:00
data - > ready = false ;
2012-01-29 23:35:52 +04:00
/*
* It is necessary to thaw kernel threads here , because
* SNAPSHOT_CREATE_IMAGE may be invoked directly after
* SNAPSHOT_FREE . In that case , if kernel threads were not
* thawed , the preallocation of memory carried out by
* hibernation_snapshot ( ) might run into problems ( i . e . it
* might fail or even deadlock ) .
*/
thaw_kernel_threads ( ) ;
2006-03-23 14:00:03 +03:00
break ;
2010-01-28 01:47:50 +03:00
case SNAPSHOT_PREF_IMAGE_SIZE :
2006-03-23 14:00:03 +03:00
image_size = arg ;
break ;
2007-10-26 02:59:31 +04:00
case SNAPSHOT_GET_IMAGE_SIZE :
if ( ! data - > ready ) {
error = - ENODATA ;
break ;
}
size = snapshot_get_image_size ( ) ;
size < < = PAGE_SHIFT ;
error = put_user ( size , ( loff_t __user * ) arg ) ;
break ;
2010-01-28 01:47:50 +03:00
case SNAPSHOT_AVAIL_SWAP_SIZE :
2007-10-26 02:59:31 +04:00
size = count_swap_pages ( data - > swap , 1 ) ;
size < < = PAGE_SHIFT ;
error = put_user ( size , ( loff_t __user * ) arg ) ;
2006-03-23 14:00:03 +03:00
break ;
2010-01-28 01:47:50 +03:00
case SNAPSHOT_ALLOC_SWAP_PAGE :
2006-03-23 14:00:03 +03:00
if ( data - > swap < 0 | | data - > swap > = MAX_SWAPFILES ) {
error = - ENODEV ;
break ;
}
2007-05-07 01:50:47 +04:00
offset = alloc_swapdev_block ( data - > swap ) ;
2006-03-23 14:00:03 +03:00
if ( offset ) {
offset < < = PAGE_SHIFT ;
2007-10-26 03:03:33 +04:00
error = put_user ( offset , ( loff_t __user * ) arg ) ;
2006-03-23 14:00:03 +03:00
} else {
error = - ENOSPC ;
}
break ;
case SNAPSHOT_FREE_SWAP_PAGES :
if ( data - > swap < 0 | | data - > swap > = MAX_SWAPFILES ) {
error = - ENODEV ;
break ;
}
2007-05-07 01:50:47 +04:00
free_all_swap_pages ( data - > swap ) ;
2006-03-23 14:00:03 +03:00
break ;
2006-03-23 14:00:09 +03:00
case SNAPSHOT_S2RAM :
if ( ! data - > frozen ) {
error = - EPERM ;
break ;
}
2007-07-19 12:47:38 +04:00
/*
* Tasks are frozen and the notifiers have been called with
* PM_HIBERNATION_PREPARE
*/
error = suspend_devices_and_enter ( PM_SUSPEND_MEM ) ;
2013-10-19 00:20:40 +04:00
data - > ready = false ;
2006-03-23 14:00:09 +03:00
break ;
2007-10-26 03:01:10 +04:00
case SNAPSHOT_PLATFORM_SUPPORT :
data - > platform_support = ! ! arg ;
break ;
case SNAPSHOT_POWER_OFF :
if ( data - > platform_support )
error = hibernation_platform_enter ( ) ;
break ;
2006-12-07 07:34:15 +03:00
case SNAPSHOT_SET_SWAP_AREA :
2020-04-06 14:58:34 +03:00
error = snapshot_set_swap_area ( data , ( void __user * ) arg ) ;
2006-12-07 07:34:15 +03:00
break ;
2006-03-23 14:00:03 +03:00
default :
error = - ENOTTY ;
}
2008-06-12 00:09:45 +04:00
2013-08-30 16:19:46 +04:00
unlock_device_hotplug ( ) ;
2018-07-31 11:51:32 +03:00
mutex_unlock ( & system_transition_mutex ) ;
2008-06-12 00:09:45 +04:00
2006-03-23 14:00:03 +03:00
return error ;
}
2011-12-28 01:54:52 +04:00
# ifdef CONFIG_COMPAT
static long
snapshot_compat_ioctl ( struct file * file , unsigned int cmd , unsigned long arg )
{
BUILD_BUG_ON ( sizeof ( loff_t ) ! = sizeof ( compat_loff_t ) ) ;
switch ( cmd ) {
case SNAPSHOT_GET_IMAGE_SIZE :
case SNAPSHOT_AVAIL_SWAP_SIZE :
2020-03-08 06:27:01 +03:00
case SNAPSHOT_ALLOC_SWAP_PAGE :
2011-12-28 01:54:52 +04:00
case SNAPSHOT_CREATE_IMAGE :
2020-04-06 14:58:35 +03:00
case SNAPSHOT_SET_SWAP_AREA :
2011-12-28 01:54:52 +04:00
return snapshot_ioctl ( file , cmd ,
( unsigned long ) compat_ptr ( arg ) ) ;
default :
return snapshot_ioctl ( file , cmd , arg ) ;
}
}
# endif /* CONFIG_COMPAT */
2006-12-07 07:40:36 +03:00
static const struct file_operations snapshot_fops = {
2006-03-23 14:00:03 +03:00
. open = snapshot_open ,
. release = snapshot_release ,
. read = snapshot_read ,
. write = snapshot_write ,
. llseek = no_llseek ,
2008-06-12 00:07:52 +04:00
. unlocked_ioctl = snapshot_ioctl ,
2011-12-28 01:54:52 +04:00
# ifdef CONFIG_COMPAT
. compat_ioctl = snapshot_compat_ioctl ,
# endif
2006-03-23 14:00:03 +03:00
} ;
static struct miscdevice snapshot_device = {
. minor = SNAPSHOT_MINOR ,
. name = " snapshot " ,
. fops = & snapshot_fops ,
} ;
static int __init snapshot_device_init ( void )
{
return misc_register ( & snapshot_device ) ;
} ;
device_initcall ( snapshot_device_init ) ;