2018-01-30 15:13:48 +03:00
/*
* Copyright ( C ) 2018 Red Hat , Inc . All rights reserved .
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
*/
2018-01-30 13:46:08 +03:00
# ifndef BCACHE_H
# define BCACHE_H
2018-02-05 19:04:23 +03:00
# include <linux/fs.h>
2018-01-30 13:46:08 +03:00
# include <stdint.h>
2018-02-01 12:54:56 +03:00
# include <stdbool.h>
2018-01-30 13:46:08 +03:00
# include "libdevmapper.h"
/*----------------------------------------------------------------*/
2018-02-05 19:04:23 +03:00
// FIXME: move somewhere more sensible
# define container_of(v, t, head) \
( ( t * ) ( ( const char * ) ( v ) - ( const char * ) & ( ( t * ) 0 ) - > head ) )
/*----------------------------------------------------------------*/
2018-04-06 21:11:39 +03:00
/*
* bcache - specific error numbers
* These supplement standard - EXXX error numbers and
* should not overlap .
*/
# define BCACHE_NO_BLOCK 201
2018-02-05 19:04:23 +03:00
enum dir {
DIR_READ ,
DIR_WRITE
} ;
2018-01-30 13:46:08 +03:00
typedef uint64_t block_address ;
typedef uint64_t sector_t ;
2018-02-05 19:04:23 +03:00
typedef void io_complete_fn ( void * context , int io_error ) ;
struct io_engine {
void ( * destroy ) ( struct io_engine * e ) ;
bool ( * issue ) ( struct io_engine * e , enum dir d , int fd ,
sector_t sb , sector_t se , void * data , void * context ) ;
bool ( * wait ) ( struct io_engine * e , io_complete_fn fn ) ;
unsigned ( * max_io ) ( struct io_engine * e ) ;
} ;
2018-02-20 18:33:27 +03:00
struct io_engine * create_async_io_engine ( void ) ;
2018-02-05 19:04:23 +03:00
/*----------------------------------------------------------------*/
2018-01-30 13:46:08 +03:00
struct bcache ;
struct block {
/* clients may only access these three fields */
int fd ;
uint64_t index ;
void * data ;
struct bcache * cache ;
struct dm_list list ;
struct dm_list hash ;
unsigned flags ;
unsigned ref_count ;
int error ;
2018-02-20 18:33:27 +03:00
enum dir io_dir ;
2018-01-30 13:46:08 +03:00
} ;
2018-02-05 19:04:23 +03:00
/*
* Ownership of engine passes . Engine will be destroyed even if this fails .
*/
struct bcache * bcache_create ( sector_t block_size , unsigned nr_cache_blocks ,
struct io_engine * engine ) ;
2018-01-30 13:46:08 +03:00
void bcache_destroy ( struct bcache * cache ) ;
enum bcache_get_flags {
/*
* The block will be zeroed before get_block returns it . This
* potentially avoids a read if the block is not already in the cache .
* GF_DIRTY is implicit .
*/
GF_ZERO = ( 1 < < 0 ) ,
/*
* Indicates the caller is intending to change the data in the block , a
* writeback will occur after the block is released .
*/
GF_DIRTY = ( 1 < < 1 )
} ;
2018-02-01 17:52:43 +03:00
unsigned bcache_nr_cache_blocks ( struct bcache * cache ) ;
2018-02-02 15:06:14 +03:00
unsigned bcache_max_prefetches ( struct bcache * cache ) ;
2018-01-30 13:46:08 +03:00
/*
* Use the prefetch method to take advantage of asynchronous IO . For example ,
* if you wanted to read a block from many devices concurrently you ' d do
* something like this :
*
* dm_list_iterate_items ( dev , & devices )
* bcache_prefetch ( cache , dev - > fd , block ) ;
*
* dm_list_iterate_items ( dev , & devices ) {
* if ( ! bcache_get ( cache , dev - > fd , block , & b ) )
* fail ( ) ;
*
* process_block ( b ) ;
* }
*
* It ' s slightly sub optimal , since you may not run the gets in the order that
* they complete . But we ' re talking a very small difference , and it ' s worth it
* to keep callbacks out of this interface .
*/
void bcache_prefetch ( struct bcache * cache , int fd , block_address index ) ;
/*
* Returns true on success .
*/
bool bcache_get ( struct bcache * cache , int fd , block_address index ,
2018-04-06 21:11:39 +03:00
unsigned flags , struct block * * result , int * error ) ;
2018-01-30 13:46:08 +03:00
void bcache_put ( struct block * b ) ;
2018-02-20 18:33:27 +03:00
/*
* flush ( ) does not attempt to writeback locked blocks . flush will fail
* ( return false ) , if any unlocked dirty data cannot be written back .
*/
bool bcache_flush ( struct bcache * cache ) ;
2018-01-30 13:46:08 +03:00
2018-02-02 10:59:49 +03:00
/*
* Removes a block from the cache . If the block is dirty it will be written
* back first . If the block is currently held a warning will be issued , and it
* will not be removed .
*/
void bcache_invalidate ( struct bcache * cache , int fd , block_address index ) ;
/*
* Invalidates all blocks on the given descriptor . Call this before closing
* the descriptor to make sure everything is written back .
*/
void bcache_invalidate_fd ( struct bcache * cache , int fd ) ;
2018-02-05 19:56:56 +03:00
/*
* Prefetches the blocks neccessary to satisfy a byte range .
*/
void bcache_prefetch_bytes ( struct bcache * cache , int fd , off_t start , size_t len ) ;
/*
2018-02-20 00:40:44 +03:00
* Reads and writes the bytes . Returns false if errors occur .
2018-02-05 19:56:56 +03:00
*/
bool bcache_read_bytes ( struct bcache * cache , int fd , off_t start , size_t len , void * data ) ;
2018-02-20 00:40:44 +03:00
bool bcache_write_bytes ( struct bcache * cache , int fd , off_t start , size_t len , void * data ) ;
bool bcache_write_zeros ( struct bcache * cache , int fd , off_t start , size_t len ) ;
2018-02-05 19:56:56 +03:00
2018-01-30 13:46:08 +03:00
/*----------------------------------------------------------------*/
# endif