2018-01-30 15:13:48 +03:00
/*
* Copyright ( C ) 2018 Red Hat , Inc . All rights reserved .
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
*/
2018-01-30 13:46:08 +03:00
# ifndef BCACHE_H
# define BCACHE_H
2018-06-08 14:31:45 +03:00
# include "device_mapper/all.h"
2020-03-05 14:58:34 +03:00
# include "base/memory/container_of.h"
2018-05-02 20:59:43 +03:00
2018-02-05 19:04:23 +03:00
# include <linux/fs.h>
2018-01-30 13:46:08 +03:00
# include <stdint.h>
2018-02-01 12:54:56 +03:00
# include <stdbool.h>
2018-01-30 13:46:08 +03:00
2018-02-05 19:04:23 +03:00
enum dir {
DIR_READ ,
DIR_WRITE
} ;
2018-01-30 13:46:08 +03:00
typedef uint64_t block_address ;
typedef uint64_t sector_t ;
2018-02-05 19:04:23 +03:00
typedef void io_complete_fn ( void * context , int io_error ) ;
struct io_engine {
void ( * destroy ) ( struct io_engine * e ) ;
bool ( * issue ) ( struct io_engine * e , enum dir d , int fd ,
sector_t sb , sector_t se , void * data , void * context ) ;
bool ( * wait ) ( struct io_engine * e , io_complete_fn fn ) ;
unsigned ( * max_io ) ( struct io_engine * e ) ;
} ;
2018-02-20 18:33:27 +03:00
struct io_engine * create_async_io_engine ( void ) ;
2018-05-10 16:29:26 +03:00
struct io_engine * create_sync_io_engine ( void ) ;
2018-02-05 19:04:23 +03:00
/*----------------------------------------------------------------*/
2018-01-30 13:46:08 +03:00
struct bcache ;
struct block {
/* clients may only access these three fields */
int fd ;
uint64_t index ;
void * data ;
struct bcache * cache ;
struct dm_list list ;
unsigned flags ;
unsigned ref_count ;
int error ;
2018-02-20 18:33:27 +03:00
enum dir io_dir ;
2018-01-30 13:46:08 +03:00
} ;
2018-02-05 19:04:23 +03:00
/*
* Ownership of engine passes . Engine will be destroyed even if this fails .
*/
struct bcache * bcache_create ( sector_t block_size , unsigned nr_cache_blocks ,
struct io_engine * engine ) ;
2018-01-30 13:46:08 +03:00
void bcache_destroy ( struct bcache * cache ) ;
enum bcache_get_flags {
/*
* The block will be zeroed before get_block returns it . This
* potentially avoids a read if the block is not already in the cache .
* GF_DIRTY is implicit .
*/
GF_ZERO = ( 1 < < 0 ) ,
/*
* Indicates the caller is intending to change the data in the block , a
* writeback will occur after the block is released .
*/
GF_DIRTY = ( 1 < < 1 )
} ;
2018-05-03 11:33:55 +03:00
sector_t bcache_block_sectors ( struct bcache * cache ) ;
2018-02-01 17:52:43 +03:00
unsigned bcache_nr_cache_blocks ( struct bcache * cache ) ;
2018-02-02 15:06:14 +03:00
unsigned bcache_max_prefetches ( struct bcache * cache ) ;
2018-01-30 13:46:08 +03:00
/*
* Use the prefetch method to take advantage of asynchronous IO . For example ,
* if you wanted to read a block from many devices concurrently you ' d do
* something like this :
*
* dm_list_iterate_items ( dev , & devices )
* bcache_prefetch ( cache , dev - > fd , block ) ;
*
* dm_list_iterate_items ( dev , & devices ) {
* if ( ! bcache_get ( cache , dev - > fd , block , & b ) )
* fail ( ) ;
*
* process_block ( b ) ;
* }
*
* It ' s slightly sub optimal , since you may not run the gets in the order that
* they complete . But we ' re talking a very small difference , and it ' s worth it
* to keep callbacks out of this interface .
*/
void bcache_prefetch ( struct bcache * cache , int fd , block_address index ) ;
/*
* Returns true on success .
*/
bool bcache_get ( struct bcache * cache , int fd , block_address index ,
2018-05-10 15:26:08 +03:00
unsigned flags , struct block * * result ) ;
2018-01-30 13:46:08 +03:00
void bcache_put ( struct block * b ) ;
2018-02-20 18:33:27 +03:00
/*
* flush ( ) does not attempt to writeback locked blocks . flush will fail
* ( return false ) , if any unlocked dirty data cannot be written back .
*/
bool bcache_flush ( struct bcache * cache ) ;
2018-01-30 13:46:08 +03:00
2018-02-02 10:59:49 +03:00
/*
2018-04-27 12:56:13 +03:00
* Removes a block from the cache .
*
* If the block is dirty it will be written back first . If the writeback fails
* false will be returned .
*
* If the block is currently held false will be returned .
2018-02-02 10:59:49 +03:00
*/
2018-04-27 12:56:13 +03:00
bool bcache_invalidate ( struct bcache * cache , int fd , block_address index ) ;
2018-02-02 10:59:49 +03:00
/*
* Invalidates all blocks on the given descriptor . Call this before closing
* the descriptor to make sure everything is written back .
*/
2018-04-27 12:56:13 +03:00
bool bcache_invalidate_fd ( struct bcache * cache , int fd ) ;
2018-02-02 10:59:49 +03:00
2019-10-28 17:29:47 +03:00
/*
* Call this function if flush , or invalidate fail and you do not
* wish to retry the writes . This will throw away any dirty data
* not written . If any blocks for fd are held , then it will call
* abort ( ) .
*/
void bcache_abort_fd ( struct bcache * cache , int fd ) ;
2018-05-03 11:14:59 +03:00
//----------------------------------------------------------------
// The next four functions are utilities written in terms of the above api.
// Prefetches the blocks neccessary to satisfy a byte range.
2018-05-03 11:37:43 +03:00
void bcache_prefetch_bytes ( struct bcache * cache , int fd , uint64_t start , size_t len ) ;
2018-02-05 19:56:56 +03:00
2018-05-03 11:14:59 +03:00
// Reads, writes and zeroes bytes. Returns false if errors occur.
2018-05-03 11:37:43 +03:00
bool bcache_read_bytes ( struct bcache * cache , int fd , uint64_t start , size_t len , void * data ) ;
bool bcache_write_bytes ( struct bcache * cache , int fd , uint64_t start , size_t len , void * data ) ;
2018-05-03 12:21:14 +03:00
bool bcache_zero_bytes ( struct bcache * cache , int fd , uint64_t start , size_t len ) ;
2018-05-09 13:05:29 +03:00
bool bcache_set_bytes ( struct bcache * cache , int fd , uint64_t start , size_t len , uint8_t val ) ;
2019-11-27 00:46:49 +03:00
bool bcache_invalidate_bytes ( struct bcache * cache , int fd , uint64_t start , size_t len ) ;
2018-02-05 19:56:56 +03:00
2018-10-30 00:53:17 +03:00
void bcache_set_last_byte ( struct bcache * cache , int fd , uint64_t offset , int sector_size ) ;
void bcache_unset_last_byte ( struct bcache * cache , int fd ) ;
2018-05-03 11:14:59 +03:00
//----------------------------------------------------------------
2018-01-30 13:46:08 +03:00
# endif