2021-09-07 17:13:02 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright ( C ) 1991 , 1992 Linus Torvalds
* Copyright ( C ) 2001 Andrea Arcangeli < andrea @ suse . de > SuSE
* Copyright ( C ) 2016 - 2020 Christoph Hellwig
*/
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/blkdev.h>
# include <linux/buffer_head.h>
# include <linux/mpage.h>
# include <linux/uio.h>
# include <linux/namei.h>
# include <linux/task_io_accounting_ops.h>
# include <linux/falloc.h>
# include <linux/suspend.h>
2021-09-23 05:37:51 +03:00
# include <linux/fs.h>
2021-12-02 23:34:00 +03:00
# include <linux/module.h>
2021-09-07 17:13:02 +03:00
# include "blk.h"
2021-10-13 11:57:11 +03:00
static inline struct inode * bdev_file_inode ( struct file * file )
2021-09-07 17:13:02 +03:00
{
return file - > f_mapping - > host ;
}
static int blkdev_get_block ( struct inode * inode , sector_t iblock ,
struct buffer_head * bh , int create )
{
bh - > b_bdev = I_BDEV ( inode ) ;
bh - > b_blocknr = iblock ;
set_buffer_mapped ( bh ) ;
return 0 ;
}
2022-07-14 21:06:32 +03:00
static blk_opf_t dio_bio_write_op ( struct kiocb * iocb )
2021-09-07 17:13:02 +03:00
{
2022-07-14 21:06:32 +03:00
blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE ;
2021-09-07 17:13:02 +03:00
/* avoid the need for a I/O completion work item */
2022-05-22 16:39:27 +03:00
if ( iocb_is_dsync ( iocb ) )
2022-07-14 21:06:32 +03:00
opf | = REQ_FUA ;
return opf ;
2021-09-07 17:13:02 +03:00
}
2022-06-10 22:58:24 +03:00
static bool blkdev_dio_unaligned ( struct block_device * bdev , loff_t pos ,
struct iov_iter * iter )
{
2022-06-10 22:58:29 +03:00
return pos & ( bdev_logical_block_size ( bdev ) - 1 ) | |
! bdev_iter_is_aligned ( bdev , iter ) ;
2022-06-10 22:58:24 +03:00
}
2021-09-07 17:13:02 +03:00
# define DIO_INLINE_BIO_VECS 4
static ssize_t __blkdev_direct_IO_simple ( struct kiocb * iocb ,
struct iov_iter * iter , unsigned int nr_pages )
{
2021-10-13 11:57:11 +03:00
struct block_device * bdev = iocb - > ki_filp - > private_data ;
2021-09-07 17:13:02 +03:00
struct bio_vec inline_vecs [ DIO_INLINE_BIO_VECS ] , * vecs ;
loff_t pos = iocb - > ki_pos ;
bool should_dirty = false ;
struct bio bio ;
ssize_t ret ;
2022-06-10 22:58:24 +03:00
if ( blkdev_dio_unaligned ( bdev , pos , iter ) )
2021-09-07 17:13:02 +03:00
return - EINVAL ;
if ( nr_pages < = DIO_INLINE_BIO_VECS )
vecs = inline_vecs ;
else {
vecs = kmalloc_array ( nr_pages , sizeof ( struct bio_vec ) ,
GFP_KERNEL ) ;
if ( ! vecs )
return - ENOMEM ;
}
2022-01-24 12:11:06 +03:00
if ( iov_iter_rw ( iter ) = = READ ) {
bio_init ( & bio , bdev , vecs , nr_pages , REQ_OP_READ ) ;
2022-05-22 21:59:25 +03:00
if ( user_backed_iter ( iter ) )
2022-01-24 12:11:06 +03:00
should_dirty = true ;
} else {
bio_init ( & bio , bdev , vecs , nr_pages , dio_bio_write_op ( iocb ) ) ;
}
2021-10-20 22:00:50 +03:00
bio . bi_iter . bi_sector = pos > > SECTOR_SHIFT ;
2021-09-07 17:13:02 +03:00
bio . bi_ioprio = iocb - > ki_ioprio ;
ret = bio_iov_iter_get_pages ( & bio , iter ) ;
if ( unlikely ( ret ) )
goto out ;
ret = bio . bi_iter . bi_size ;
2022-01-24 12:11:06 +03:00
if ( iov_iter_rw ( iter ) = = WRITE )
2021-09-07 17:13:02 +03:00
task_io_account_write ( ret ) ;
2022-01-24 12:11:06 +03:00
2021-09-07 17:13:02 +03:00
if ( iocb - > ki_flags & IOCB_NOWAIT )
bio . bi_opf | = REQ_NOWAIT ;
2022-04-20 17:31:10 +03:00
submit_bio_wait ( & bio ) ;
2021-09-07 17:13:02 +03:00
bio_release_pages ( & bio , should_dirty ) ;
if ( unlikely ( bio . bi_status ) )
ret = blk_status_to_errno ( bio . bi_status ) ;
out :
if ( vecs ! = inline_vecs )
kfree ( vecs ) ;
bio_uninit ( & bio ) ;
return ret ;
}
2021-10-14 20:17:43 +03:00
enum {
2021-10-27 15:21:09 +03:00
DIO_SHOULD_DIRTY = 1 ,
DIO_IS_SYNC = 2 ,
2021-10-14 20:17:43 +03:00
} ;
2021-09-07 17:13:02 +03:00
struct blkdev_dio {
union {
struct kiocb * iocb ;
struct task_struct * waiter ;
} ;
size_t size ;
atomic_t ref ;
2021-10-14 20:17:43 +03:00
unsigned int flags ;
2021-10-16 01:55:05 +03:00
struct bio bio ____cacheline_aligned_in_smp ;
2021-09-07 17:13:02 +03:00
} ;
static struct bio_set blkdev_dio_pool ;
static void blkdev_bio_end_io ( struct bio * bio )
{
struct blkdev_dio * dio = bio - > bi_private ;
2021-10-14 20:17:43 +03:00
bool should_dirty = dio - > flags & DIO_SHOULD_DIRTY ;
2021-09-07 17:13:02 +03:00
if ( bio - > bi_status & & ! dio - > bio . bi_status )
dio - > bio . bi_status = bio - > bi_status ;
2021-10-27 15:21:09 +03:00
if ( atomic_dec_and_test ( & dio - > ref ) ) {
2021-10-14 20:17:43 +03:00
if ( ! ( dio - > flags & DIO_IS_SYNC ) ) {
2021-09-07 17:13:02 +03:00
struct kiocb * iocb = dio - > iocb ;
ssize_t ret ;
2021-10-12 14:12:24 +03:00
WRITE_ONCE ( iocb - > private , NULL ) ;
2021-09-07 17:13:02 +03:00
if ( likely ( ! dio - > bio . bi_status ) ) {
ret = dio - > size ;
iocb - > ki_pos + = ret ;
} else {
ret = blk_status_to_errno ( dio - > bio . bi_status ) ;
}
2021-10-21 18:22:35 +03:00
dio - > iocb - > ki_complete ( iocb , ret ) ;
2021-10-27 15:21:09 +03:00
bio_put ( & dio - > bio ) ;
2021-09-07 17:13:02 +03:00
} else {
struct task_struct * waiter = dio - > waiter ;
WRITE_ONCE ( dio - > waiter , NULL ) ;
blk_wake_io_task ( waiter ) ;
}
}
if ( should_dirty ) {
bio_check_pages_dirty ( bio ) ;
} else {
bio_release_pages ( bio , false ) ;
bio_put ( bio ) ;
}
}
static ssize_t __blkdev_direct_IO ( struct kiocb * iocb , struct iov_iter * iter ,
unsigned int nr_pages )
{
2021-10-13 11:57:11 +03:00
struct block_device * bdev = iocb - > ki_filp - > private_data ;
2021-09-07 17:13:02 +03:00
struct blk_plug plug ;
struct blkdev_dio * dio ;
struct bio * bio ;
bool is_read = ( iov_iter_rw ( iter ) = = READ ) , is_sync ;
2022-07-14 21:06:32 +03:00
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op ( iocb ) ;
2021-09-07 17:13:02 +03:00
loff_t pos = iocb - > ki_pos ;
int ret = 0 ;
2022-06-10 22:58:24 +03:00
if ( blkdev_dio_unaligned ( bdev , pos , iter ) )
2021-09-07 17:13:02 +03:00
return - EINVAL ;
2022-03-24 23:35:24 +03:00
if ( iocb - > ki_flags & IOCB_ALLOC_CACHE )
opf | = REQ_ALLOC_CACHE ;
bio = bio_alloc_bioset ( bdev , nr_pages , opf , GFP_KERNEL ,
& blkdev_dio_pool ) ;
2021-09-07 17:13:02 +03:00
dio = container_of ( bio , struct blkdev_dio , bio ) ;
2021-10-27 15:21:09 +03:00
atomic_set ( & dio - > ref , 1 ) ;
/*
* Grab an extra reference to ensure the dio structure which is embedded
* into the first bio stays around .
*/
bio_get ( bio ) ;
2021-10-14 20:17:43 +03:00
is_sync = is_sync_kiocb ( iocb ) ;
if ( is_sync ) {
dio - > flags = DIO_IS_SYNC ;
2021-09-07 17:13:02 +03:00
dio - > waiter = current ;
} else {
2021-10-14 20:17:43 +03:00
dio - > flags = 0 ;
2021-09-07 17:13:02 +03:00
dio - > iocb = iocb ;
}
dio - > size = 0 ;
2022-05-22 21:59:25 +03:00
if ( is_read & & user_backed_iter ( iter ) )
2021-10-14 20:17:43 +03:00
dio - > flags | = DIO_SHOULD_DIRTY ;
2021-09-07 17:13:02 +03:00
2021-10-27 15:21:08 +03:00
blk_start_plug ( & plug ) ;
2021-09-07 17:13:02 +03:00
for ( ; ; ) {
2021-10-20 22:00:50 +03:00
bio - > bi_iter . bi_sector = pos > > SECTOR_SHIFT ;
2021-09-07 17:13:02 +03:00
bio - > bi_private = dio ;
bio - > bi_end_io = blkdev_bio_end_io ;
bio - > bi_ioprio = iocb - > ki_ioprio ;
ret = bio_iov_iter_get_pages ( bio , iter ) ;
if ( unlikely ( ret ) ) {
bio - > bi_status = BLK_STS_IOERR ;
bio_endio ( bio ) ;
break ;
}
if ( is_read ) {
2021-10-14 20:17:43 +03:00
if ( dio - > flags & DIO_SHOULD_DIRTY )
2021-09-07 17:13:02 +03:00
bio_set_pages_dirty ( bio ) ;
} else {
task_io_account_write ( bio - > bi_iter . bi_size ) ;
}
if ( iocb - > ki_flags & IOCB_NOWAIT )
bio - > bi_opf | = REQ_NOWAIT ;
dio - > size + = bio - > bi_iter . bi_size ;
pos + = bio - > bi_iter . bi_size ;
nr_pages = bio_iov_vecs_to_alloc ( iter , BIO_MAX_VECS ) ;
if ( ! nr_pages ) {
2021-10-12 14:12:24 +03:00
submit_bio ( bio ) ;
2021-09-07 17:13:02 +03:00
break ;
}
2021-10-27 15:21:09 +03:00
atomic_inc ( & dio - > ref ) ;
2021-09-07 17:13:02 +03:00
submit_bio ( bio ) ;
2022-01-24 12:11:05 +03:00
bio = bio_alloc ( bdev , nr_pages , opf , GFP_KERNEL ) ;
2021-09-07 17:13:02 +03:00
}
2021-10-27 15:21:08 +03:00
blk_finish_plug ( & plug ) ;
2021-09-07 17:13:02 +03:00
if ( ! is_sync )
return - EIOCBQUEUED ;
for ( ; ; ) {
set_current_state ( TASK_UNINTERRUPTIBLE ) ;
if ( ! READ_ONCE ( dio - > waiter ) )
break ;
2021-10-27 15:21:08 +03:00
blk_io_schedule ( ) ;
2021-09-07 17:13:02 +03:00
}
__set_current_state ( TASK_RUNNING ) ;
if ( ! ret )
ret = blk_status_to_errno ( dio - > bio . bi_status ) ;
if ( likely ( ! ret ) )
ret = dio - > size ;
bio_put ( & dio - > bio ) ;
return ret ;
}
2021-10-23 19:21:32 +03:00
static void blkdev_bio_end_io_async ( struct bio * bio )
{
struct blkdev_dio * dio = container_of ( bio , struct blkdev_dio , bio ) ;
struct kiocb * iocb = dio - > iocb ;
ssize_t ret ;
2022-02-11 12:01:36 +03:00
WRITE_ONCE ( iocb - > private , NULL ) ;
2021-10-23 19:21:32 +03:00
if ( likely ( ! bio - > bi_status ) ) {
ret = dio - > size ;
iocb - > ki_pos + = ret ;
} else {
ret = blk_status_to_errno ( bio - > bi_status ) ;
}
2021-11-01 20:17:11 +03:00
iocb - > ki_complete ( iocb , ret ) ;
2021-10-23 19:21:32 +03:00
if ( dio - > flags & DIO_SHOULD_DIRTY ) {
bio_check_pages_dirty ( bio ) ;
} else {
bio_release_pages ( bio , false ) ;
bio_put ( bio ) ;
}
}
static ssize_t __blkdev_direct_IO_async ( struct kiocb * iocb ,
struct iov_iter * iter ,
unsigned int nr_pages )
{
struct block_device * bdev = iocb - > ki_filp - > private_data ;
2022-01-24 12:11:04 +03:00
bool is_read = iov_iter_rw ( iter ) = = READ ;
2022-07-14 21:06:32 +03:00
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op ( iocb ) ;
2021-10-23 19:21:32 +03:00
struct blkdev_dio * dio ;
struct bio * bio ;
loff_t pos = iocb - > ki_pos ;
int ret = 0 ;
2022-06-10 22:58:24 +03:00
if ( blkdev_dio_unaligned ( bdev , pos , iter ) )
2021-10-23 19:21:32 +03:00
return - EINVAL ;
2022-03-24 23:35:24 +03:00
if ( iocb - > ki_flags & IOCB_ALLOC_CACHE )
opf | = REQ_ALLOC_CACHE ;
bio = bio_alloc_bioset ( bdev , nr_pages , opf , GFP_KERNEL ,
& blkdev_dio_pool ) ;
2021-10-23 19:21:32 +03:00
dio = container_of ( bio , struct blkdev_dio , bio ) ;
dio - > flags = 0 ;
dio - > iocb = iocb ;
bio - > bi_iter . bi_sector = pos > > SECTOR_SHIFT ;
bio - > bi_end_io = blkdev_bio_end_io_async ;
bio - > bi_ioprio = iocb - > ki_ioprio ;
2021-10-27 15:21:07 +03:00
if ( iov_iter_is_bvec ( iter ) ) {
/*
* Users don ' t rely on the iterator being in any particular
* state for async I / O returning - EIOCBQUEUED , hence we can
* avoid expensive iov_iter_advance ( ) . Bypass
* bio_iov_iter_get_pages ( ) and set the bvec directly .
*/
bio_iov_bvec_set ( bio , iter ) ;
} else {
ret = bio_iov_iter_get_pages ( bio , iter ) ;
if ( unlikely ( ret ) ) {
2021-12-07 23:16:36 +03:00
bio_put ( bio ) ;
2021-10-27 15:21:07 +03:00
return ret ;
}
2021-10-23 19:21:32 +03:00
}
dio - > size = bio - > bi_iter . bi_size ;
2022-01-24 12:11:04 +03:00
if ( is_read ) {
2022-05-22 21:59:25 +03:00
if ( user_backed_iter ( iter ) ) {
2021-10-23 19:21:32 +03:00
dio - > flags | = DIO_SHOULD_DIRTY ;
bio_set_pages_dirty ( bio ) ;
}
} else {
task_io_account_write ( bio - > bi_iter . bi_size ) ;
}
if ( iocb - > ki_flags & IOCB_HIPRI ) {
2021-10-27 15:21:10 +03:00
bio - > bi_opf | = REQ_POLLED | REQ_NOWAIT ;
2021-10-23 19:21:32 +03:00
submit_bio ( bio ) ;
WRITE_ONCE ( iocb - > private , bio ) ;
} else {
2021-10-27 15:21:10 +03:00
if ( iocb - > ki_flags & IOCB_NOWAIT )
bio - > bi_opf | = REQ_NOWAIT ;
2021-10-23 19:21:32 +03:00
submit_bio ( bio ) ;
}
return - EIOCBQUEUED ;
}
2021-09-07 17:13:02 +03:00
static ssize_t blkdev_direct_IO ( struct kiocb * iocb , struct iov_iter * iter )
{
unsigned int nr_pages ;
if ( ! iov_iter_count ( iter ) )
return 0 ;
nr_pages = bio_iov_vecs_to_alloc ( iter , BIO_MAX_VECS + 1 ) ;
2021-10-23 19:21:32 +03:00
if ( likely ( nr_pages < = BIO_MAX_VECS ) ) {
if ( is_sync_kiocb ( iocb ) )
return __blkdev_direct_IO_simple ( iocb , iter , nr_pages ) ;
return __blkdev_direct_IO_async ( iocb , iter , nr_pages ) ;
}
2021-09-07 17:13:02 +03:00
return __blkdev_direct_IO ( iocb , iter , bio_max_segs ( nr_pages ) ) ;
}
static int blkdev_writepage ( struct page * page , struct writeback_control * wbc )
{
return block_write_full_page ( page , blkdev_get_block , wbc ) ;
}
2022-04-29 17:40:40 +03:00
static int blkdev_read_folio ( struct file * file , struct folio * folio )
2021-09-07 17:13:02 +03:00
{
2022-04-29 17:40:40 +03:00
return block_read_full_folio ( folio , blkdev_get_block ) ;
2021-09-07 17:13:02 +03:00
}
static void blkdev_readahead ( struct readahead_control * rac )
{
mpage_readahead ( rac , blkdev_get_block ) ;
}
static int blkdev_write_begin ( struct file * file , struct address_space * mapping ,
2022-02-22 22:31:43 +03:00
loff_t pos , unsigned len , struct page * * pagep , void * * fsdata )
2021-09-07 17:13:02 +03:00
{
2022-02-22 19:25:12 +03:00
return block_write_begin ( mapping , pos , len , pagep , blkdev_get_block ) ;
2021-09-07 17:13:02 +03:00
}
static int blkdev_write_end ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned copied , struct page * page ,
void * fsdata )
{
int ret ;
ret = block_write_end ( file , mapping , pos , len , copied , page , fsdata ) ;
unlock_page ( page ) ;
put_page ( page ) ;
return ret ;
}
static int blkdev_writepages ( struct address_space * mapping ,
struct writeback_control * wbc )
{
return generic_writepages ( mapping , wbc ) ;
}
const struct address_space_operations def_blk_aops = {
2022-02-09 23:22:12 +03:00
. dirty_folio = block_dirty_folio ,
2022-02-09 23:21:34 +03:00
. invalidate_folio = block_invalidate_folio ,
2022-04-29 17:40:40 +03:00
. read_folio = blkdev_read_folio ,
2021-09-07 17:13:02 +03:00
. readahead = blkdev_readahead ,
. writepage = blkdev_writepage ,
. write_begin = blkdev_write_begin ,
. write_end = blkdev_write_end ,
. writepages = blkdev_writepages ,
. direct_IO = blkdev_direct_IO ,
2022-06-06 17:20:31 +03:00
. migrate_folio = buffer_migrate_folio_norefs ,
2021-09-07 17:13:02 +03:00
. is_dirty_writeback = buffer_check_dirty_writeback ,
} ;
/*
* for a block special file file_inode ( file ) - > i_size is zero
* so we compute the size by hand ( just as in block_read / write above )
*/
static loff_t blkdev_llseek ( struct file * file , loff_t offset , int whence )
{
struct inode * bd_inode = bdev_file_inode ( file ) ;
loff_t retval ;
inode_lock ( bd_inode ) ;
retval = fixed_size_llseek ( file , offset , whence , i_size_read ( bd_inode ) ) ;
inode_unlock ( bd_inode ) ;
return retval ;
}
static int blkdev_fsync ( struct file * filp , loff_t start , loff_t end ,
int datasync )
{
2021-10-13 11:57:11 +03:00
struct block_device * bdev = filp - > private_data ;
2021-09-07 17:13:02 +03:00
int error ;
error = file_write_and_wait_range ( filp , start , end ) ;
if ( error )
return error ;
/*
* There is no need to serialise calls to blkdev_issue_flush with
* i_mutex and doing so causes performance issues with concurrent
* O_SYNC writers to a block device .
*/
error = blkdev_issue_flush ( bdev ) ;
if ( error = = - EOPNOTSUPP )
error = 0 ;
return error ;
}
static int blkdev_open ( struct inode * inode , struct file * filp )
{
struct block_device * bdev ;
/*
* Preserve backwards compatibility and allow large file access
* even if userspace doesn ' t ask for it explicitly . Some mkfs
* binary needs it . We might want to drop this workaround
* during an unstable branch .
*/
filp - > f_flags | = O_LARGEFILE ;
filp - > f_mode | = FMODE_NOWAIT | FMODE_BUF_RASYNC ;
if ( filp - > f_flags & O_NDELAY )
filp - > f_mode | = FMODE_NDELAY ;
if ( filp - > f_flags & O_EXCL )
filp - > f_mode | = FMODE_EXCL ;
if ( ( filp - > f_flags & O_ACCMODE ) = = 3 )
filp - > f_mode | = FMODE_WRITE_IOCTL ;
bdev = blkdev_get_by_dev ( inode - > i_rdev , filp - > f_mode , filp ) ;
if ( IS_ERR ( bdev ) )
return PTR_ERR ( bdev ) ;
2021-10-13 11:57:11 +03:00
filp - > private_data = bdev ;
2021-09-07 17:13:02 +03:00
filp - > f_mapping = bdev - > bd_inode - > i_mapping ;
filp - > f_wb_err = filemap_sample_wb_err ( filp - > f_mapping ) ;
return 0 ;
}
static int blkdev_close ( struct inode * inode , struct file * filp )
{
2021-10-13 11:57:11 +03:00
struct block_device * bdev = filp - > private_data ;
2021-09-07 17:13:02 +03:00
blkdev_put ( bdev , filp - > f_mode ) ;
return 0 ;
}
/*
* Write data to the block device . Only intended for the block device itself
* and the raw driver which basically is a fake block device .
*
* Does not take i_mutex for the write and thus is not for general purpose
* use .
*/
static ssize_t blkdev_write_iter ( struct kiocb * iocb , struct iov_iter * from )
{
2021-10-13 11:57:11 +03:00
struct block_device * bdev = iocb - > ki_filp - > private_data ;
struct inode * bd_inode = bdev - > bd_inode ;
2021-11-05 00:13:17 +03:00
loff_t size = bdev_nr_bytes ( bdev ) ;
2021-09-07 17:13:02 +03:00
struct blk_plug plug ;
size_t shorted = 0 ;
ssize_t ret ;
2021-10-13 11:57:11 +03:00
if ( bdev_read_only ( bdev ) )
2021-09-07 17:13:02 +03:00
return - EPERM ;
if ( IS_SWAPFILE ( bd_inode ) & & ! is_hibernate_resume_dev ( bd_inode - > i_rdev ) )
return - ETXTBSY ;
if ( ! iov_iter_count ( from ) )
return 0 ;
if ( iocb - > ki_pos > = size )
return - ENOSPC ;
if ( ( iocb - > ki_flags & ( IOCB_NOWAIT | IOCB_DIRECT ) ) = = IOCB_NOWAIT )
return - EOPNOTSUPP ;
size - = iocb - > ki_pos ;
if ( iov_iter_count ( from ) > size ) {
shorted = iov_iter_count ( from ) - size ;
iov_iter_truncate ( from , size ) ;
}
blk_start_plug ( & plug ) ;
ret = __generic_file_write_iter ( iocb , from ) ;
if ( ret > 0 )
ret = generic_write_sync ( iocb , ret ) ;
iov_iter_reexpand ( from , iov_iter_count ( from ) + shorted ) ;
blk_finish_plug ( & plug ) ;
return ret ;
}
static ssize_t blkdev_read_iter ( struct kiocb * iocb , struct iov_iter * to )
{
2021-10-13 11:57:11 +03:00
struct block_device * bdev = iocb - > ki_filp - > private_data ;
2021-11-05 00:13:17 +03:00
loff_t size = bdev_nr_bytes ( bdev ) ;
2021-09-07 17:13:02 +03:00
loff_t pos = iocb - > ki_pos ;
size_t shorted = 0 ;
2021-10-28 17:57:09 +03:00
ssize_t ret = 0 ;
2022-02-01 13:04:20 +03:00
size_t count ;
2021-09-07 17:13:02 +03:00
2022-02-01 13:04:20 +03:00
if ( unlikely ( pos + iov_iter_count ( to ) > size ) ) {
2021-10-20 22:00:48 +03:00
if ( pos > = size )
return 0 ;
size - = pos ;
2022-02-01 13:04:20 +03:00
shorted = iov_iter_count ( to ) - size ;
iov_iter_truncate ( to , size ) ;
2021-09-07 17:13:02 +03:00
}
2022-02-01 13:04:20 +03:00
count = iov_iter_count ( to ) ;
if ( ! count )
goto reexpand ; /* skip atime */
2021-10-28 17:57:09 +03:00
if ( iocb - > ki_flags & IOCB_DIRECT ) {
struct address_space * mapping = iocb - > ki_filp - > f_mapping ;
if ( iocb - > ki_flags & IOCB_NOWAIT ) {
2022-02-01 13:04:20 +03:00
if ( filemap_range_needs_writeback ( mapping , pos ,
pos + count - 1 ) ) {
ret = - EAGAIN ;
goto reexpand ;
}
2021-10-28 17:57:09 +03:00
} else {
2022-02-01 13:04:20 +03:00
ret = filemap_write_and_wait_range ( mapping , pos ,
pos + count - 1 ) ;
2021-10-28 17:57:09 +03:00
if ( ret < 0 )
2022-02-01 13:04:20 +03:00
goto reexpand ;
2021-10-28 17:57:09 +03:00
}
file_accessed ( iocb - > ki_filp ) ;
ret = blkdev_direct_IO ( iocb , to ) ;
if ( ret > = 0 ) {
iocb - > ki_pos + = ret ;
count - = ret ;
}
2022-02-01 13:04:20 +03:00
iov_iter_revert ( to , count - iov_iter_count ( to ) ) ;
2021-10-28 17:57:09 +03:00
if ( ret < 0 | | ! count )
2022-02-01 13:04:20 +03:00
goto reexpand ;
2021-10-28 17:57:09 +03:00
}
ret = filemap_read ( iocb , to , ret ) ;
2021-10-20 22:00:48 +03:00
2022-02-01 13:04:20 +03:00
reexpand :
2021-10-20 22:00:48 +03:00
if ( unlikely ( shorted ) )
iov_iter_reexpand ( to , iov_iter_count ( to ) + shorted ) ;
2021-09-07 17:13:02 +03:00
return ret ;
}
# define BLKDEV_FALLOC_FL_SUPPORTED \
( FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE )
static long blkdev_fallocate ( struct file * file , int mode , loff_t start ,
loff_t len )
{
2021-09-23 05:37:51 +03:00
struct inode * inode = bdev_file_inode ( file ) ;
struct block_device * bdev = I_BDEV ( inode ) ;
2021-09-07 17:13:02 +03:00
loff_t end = start + len - 1 ;
loff_t isize ;
int error ;
/* Fail if we don't recognize the flags. */
if ( mode & ~ BLKDEV_FALLOC_FL_SUPPORTED )
return - EOPNOTSUPP ;
/* Don't go off the end of the device. */
2021-10-18 13:11:24 +03:00
isize = bdev_nr_bytes ( bdev ) ;
2021-09-07 17:13:02 +03:00
if ( start > = isize )
return - EINVAL ;
if ( end > = isize ) {
if ( mode & FALLOC_FL_KEEP_SIZE ) {
len = isize - start ;
end = start + len - 1 ;
} else
return - EINVAL ;
}
/*
* Don ' t allow IO that isn ' t aligned to logical block size .
*/
if ( ( start | len ) & ( bdev_logical_block_size ( bdev ) - 1 ) )
return - EINVAL ;
2021-09-23 05:37:51 +03:00
filemap_invalidate_lock ( inode - > i_mapping ) ;
2021-09-07 17:13:02 +03:00
/* Invalidate the page cache, including dirty pages. */
error = truncate_bdev_range ( bdev , file - > f_mode , start , end ) ;
if ( error )
2021-09-23 05:37:51 +03:00
goto fail ;
2021-09-07 17:13:02 +03:00
switch ( mode ) {
case FALLOC_FL_ZERO_RANGE :
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE :
2021-10-20 22:00:50 +03:00
error = blkdev_issue_zeroout ( bdev , start > > SECTOR_SHIFT ,
len > > SECTOR_SHIFT , GFP_KERNEL ,
BLKDEV_ZERO_NOUNMAP ) ;
2021-09-07 17:13:02 +03:00
break ;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE :
2021-10-20 22:00:50 +03:00
error = blkdev_issue_zeroout ( bdev , start > > SECTOR_SHIFT ,
len > > SECTOR_SHIFT , GFP_KERNEL ,
BLKDEV_ZERO_NOFALLBACK ) ;
2021-09-07 17:13:02 +03:00
break ;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE :
2021-10-20 22:00:50 +03:00
error = blkdev_issue_discard ( bdev , start > > SECTOR_SHIFT ,
2022-04-15 07:52:57 +03:00
len > > SECTOR_SHIFT , GFP_KERNEL ) ;
2021-09-07 17:13:02 +03:00
break ;
default :
2021-09-23 05:37:51 +03:00
error = - EOPNOTSUPP ;
2021-09-07 17:13:02 +03:00
}
2021-09-23 05:37:51 +03:00
fail :
filemap_invalidate_unlock ( inode - > i_mapping ) ;
return error ;
2021-09-07 17:13:02 +03:00
}
const struct file_operations def_blk_fops = {
. open = blkdev_open ,
. release = blkdev_close ,
. llseek = blkdev_llseek ,
. read_iter = blkdev_read_iter ,
. write_iter = blkdev_write_iter ,
2021-10-12 14:12:24 +03:00
. iopoll = iocb_bio_iopoll ,
2021-09-07 17:13:02 +03:00
. mmap = generic_file_mmap ,
. fsync = blkdev_fsync ,
2021-10-12 13:44:50 +03:00
. unlocked_ioctl = blkdev_ioctl ,
2021-09-07 17:13:02 +03:00
# ifdef CONFIG_COMPAT
. compat_ioctl = compat_blkdev_ioctl ,
# endif
. splice_read = generic_file_splice_read ,
. splice_write = iter_file_splice_write ,
. fallocate = blkdev_fallocate ,
} ;
static __init int blkdev_init ( void )
{
return bioset_init ( & blkdev_dio_pool , 4 ,
offsetof ( struct blkdev_dio , bio ) ,
BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE ) ;
}
module_init ( blkdev_init ) ;