2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BKEY_H
# define _BCACHEFS_BKEY_H
# include <linux/bug.h>
# include "bcachefs_format.h"
2024-03-02 02:49:09 +03:00
# include "bkey_types.h"
2022-10-22 02:15:07 +03:00
# include "btree_types.h"
2017-03-17 09:18:50 +03:00
# include "util.h"
# include "vstructs.h"
2023-08-06 17:04:37 +03:00
enum bkey_invalid_flags {
BKEY_INVALID_WRITE = ( 1U < < 0 ) ,
BKEY_INVALID_COMMIT = ( 1U < < 1 ) ,
BKEY_INVALID_JOURNAL = ( 1U < < 2 ) ,
} ;
2017-03-17 09:18:50 +03:00
#if 0
/*
* compiled unpack functions are disabled , pending a new interface for
* dynamically allocating executable memory :
*/
# ifdef CONFIG_X86_64
# define HAVE_BCACHEFS_COMPILED_UNPACK 1
# endif
# endif
2022-08-14 21:44:17 +03:00
void bch2_bkey_packed_to_binary_text ( struct printbuf * ,
const struct bkey_format * ,
const struct bkey_packed * ) ;
2017-03-17 09:18:50 +03:00
enum bkey_lr_packed {
BKEY_PACKED_BOTH ,
BKEY_PACKED_RIGHT ,
BKEY_PACKED_LEFT ,
BKEY_PACKED_NONE ,
} ;
# define bkey_lr_packed(_l, _r) \
( ( _l ) - > format + ( ( _r ) - > format < < 1 ) )
2023-11-03 02:33:48 +03:00
static inline void bkey_p_copy ( struct bkey_packed * dst , const struct bkey_packed * src )
{
memcpy_u64s_small ( dst , src , src - > u64s ) ;
}
static inline void bkey_copy ( struct bkey_i * dst , const struct bkey_i * src )
{
memcpy_u64s_small ( dst , src , src - > k . u64s ) ;
}
2017-03-17 09:18:50 +03:00
struct btree ;
__pure
unsigned bch2_bkey_greatest_differing_bit ( const struct btree * ,
const struct bkey_packed * ,
const struct bkey_packed * ) ;
__pure
unsigned bch2_bkey_ffs ( const struct btree * , const struct bkey_packed * ) ;
__pure
int __bch2_bkey_cmp_packed_format_checked ( const struct bkey_packed * ,
const struct bkey_packed * ,
const struct btree * ) ;
__pure
int __bch2_bkey_cmp_left_packed_format_checked ( const struct btree * ,
const struct bkey_packed * ,
const struct bpos * ) ;
__pure
2020-11-07 20:31:20 +03:00
int bch2_bkey_cmp_packed ( const struct btree * ,
const struct bkey_packed * ,
const struct bkey_packed * ) ;
2017-03-17 09:18:50 +03:00
__pure
int __bch2_bkey_cmp_left_packed ( const struct btree * ,
const struct bkey_packed * ,
const struct bpos * ) ;
static inline __pure
int bkey_cmp_left_packed ( const struct btree * b ,
const struct bkey_packed * l , const struct bpos * r )
{
return __bch2_bkey_cmp_left_packed ( b , l , r ) ;
}
/*
2022-10-20 01:31:33 +03:00
* The compiler generates better code when we pass bpos by ref , but it ' s often
* enough terribly convenient to pass it by val . . . as much as I hate c + + , const
* ref would be nice here :
2017-03-17 09:18:50 +03:00
*/
__pure __flatten
static inline int bkey_cmp_left_packed_byval ( const struct btree * b ,
const struct bkey_packed * l ,
struct bpos r )
{
return bkey_cmp_left_packed ( b , l , & r ) ;
}
2022-11-24 11:12:22 +03:00
static __always_inline bool bpos_eq ( struct bpos l , struct bpos r )
{
return ! ( ( l . inode ^ r . inode ) |
( l . offset ^ r . offset ) |
( l . snapshot ^ r . snapshot ) ) ;
}
static __always_inline bool bpos_lt ( struct bpos l , struct bpos r )
{
return l . inode ! = r . inode ? l . inode < r . inode :
l . offset ! = r . offset ? l . offset < r . offset :
l . snapshot ! = r . snapshot ? l . snapshot < r . snapshot : false ;
}
static __always_inline bool bpos_le ( struct bpos l , struct bpos r )
{
return l . inode ! = r . inode ? l . inode < r . inode :
l . offset ! = r . offset ? l . offset < r . offset :
l . snapshot ! = r . snapshot ? l . snapshot < r . snapshot : true ;
}
static __always_inline bool bpos_gt ( struct bpos l , struct bpos r )
{
return bpos_lt ( r , l ) ;
}
static __always_inline bool bpos_ge ( struct bpos l , struct bpos r )
{
return bpos_le ( r , l ) ;
}
2021-03-05 00:20:16 +03:00
static __always_inline int bpos_cmp ( struct bpos l , struct bpos r )
{
return cmp_int ( l . inode , r . inode ) ? :
cmp_int ( l . offset , r . offset ) ? :
cmp_int ( l . snapshot , r . snapshot ) ;
}
2022-11-25 23:01:36 +03:00
static inline struct bpos bpos_min ( struct bpos l , struct bpos r )
{
return bpos_lt ( l , r ) ? l : r ;
}
static inline struct bpos bpos_max ( struct bpos l , struct bpos r )
{
return bpos_gt ( l , r ) ? l : r ;
}
2022-11-24 11:12:22 +03:00
static __always_inline bool bkey_eq ( struct bpos l , struct bpos r )
{
return ! ( ( l . inode ^ r . inode ) |
( l . offset ^ r . offset ) ) ;
}
static __always_inline bool bkey_lt ( struct bpos l , struct bpos r )
{
return l . inode ! = r . inode
? l . inode < r . inode
: l . offset < r . offset ;
}
static __always_inline bool bkey_le ( struct bpos l , struct bpos r )
{
return l . inode ! = r . inode
? l . inode < r . inode
: l . offset < = r . offset ;
}
static __always_inline bool bkey_gt ( struct bpos l , struct bpos r )
{
return bkey_lt ( r , l ) ;
}
static __always_inline bool bkey_ge ( struct bpos l , struct bpos r )
{
return bkey_le ( r , l ) ;
}
2017-03-17 09:18:50 +03:00
static __always_inline int bkey_cmp ( struct bpos l , struct bpos r )
{
2021-03-05 00:20:16 +03:00
return cmp_int ( l . inode , r . inode ) ? :
cmp_int ( l . offset , r . offset ) ;
2017-03-17 09:18:50 +03:00
}
2022-11-25 23:01:36 +03:00
static inline struct bpos bkey_min ( struct bpos l , struct bpos r )
2017-03-17 09:18:50 +03:00
{
2022-11-25 23:01:36 +03:00
return bkey_lt ( l , r ) ? l : r ;
2017-03-17 09:18:50 +03:00
}
2022-11-25 23:01:36 +03:00
static inline struct bpos bkey_max ( struct bpos l , struct bpos r )
2021-01-19 04:20:24 +03:00
{
2022-11-25 23:01:36 +03:00
return bkey_gt ( l , r ) ? l : r ;
2021-01-19 04:20:24 +03:00
}
2017-03-17 09:18:50 +03:00
void bch2_bpos_swab ( struct bpos * ) ;
void bch2_bkey_swab_key ( const struct bkey_format * , struct bkey_packed * ) ;
static __always_inline int bversion_cmp ( struct bversion l , struct bversion r )
{
2019-04-12 11:54:12 +03:00
return cmp_int ( l . hi , r . hi ) ? :
cmp_int ( l . lo , r . lo ) ;
2017-03-17 09:18:50 +03:00
}
# define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
# define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
static __always_inline int bversion_zero ( struct bversion v )
{
return ! bversion_cmp ( v , ZERO_VERSION ) ;
}
# ifdef CONFIG_BCACHEFS_DEBUG
/* statement expressions confusing unlikely()? */
# define bkey_packed(_k) \
( { EBUG_ON ( ( _k ) - > format > KEY_FORMAT_CURRENT ) ; \
( _k ) - > format ! = KEY_FORMAT_CURRENT ; } )
# else
# define bkey_packed(_k) ((_k)->format != KEY_FORMAT_CURRENT)
# endif
/*
* It ' s safe to treat an unpacked bkey as a packed one , but not the reverse
*/
static inline struct bkey_packed * bkey_to_packed ( struct bkey_i * k )
{
return ( struct bkey_packed * ) k ;
}
static inline const struct bkey_packed * bkey_to_packed_c ( const struct bkey_i * k )
{
return ( const struct bkey_packed * ) k ;
}
static inline struct bkey_i * packed_to_bkey ( struct bkey_packed * k )
{
return bkey_packed ( k ) ? NULL : ( struct bkey_i * ) k ;
}
static inline const struct bkey * packed_to_bkey_c ( const struct bkey_packed * k )
{
return bkey_packed ( k ) ? NULL : ( const struct bkey * ) k ;
}
static inline unsigned bkey_format_key_bits ( const struct bkey_format * format )
{
return format - > bits_per_field [ BKEY_FIELD_INODE ] +
format - > bits_per_field [ BKEY_FIELD_OFFSET ] +
format - > bits_per_field [ BKEY_FIELD_SNAPSHOT ] ;
}
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
static inline struct bpos bpos_successor ( struct bpos p )
2017-03-17 09:18:50 +03:00
{
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
if ( ! + + p . snapshot & &
! + + p . offset & &
! + + p . inode )
BUG ( ) ;
2017-03-17 09:18:50 +03:00
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
return p ;
}
2017-03-17 09:18:50 +03:00
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
static inline struct bpos bpos_predecessor ( struct bpos p )
{
if ( ! p . snapshot - - & &
! p . offset - - & &
! p . inode - - )
BUG ( ) ;
return p ;
2017-03-17 09:18:50 +03:00
}
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
static inline struct bpos bpos_nosnap_successor ( struct bpos p )
2017-03-17 09:18:50 +03:00
{
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
p . snapshot = 0 ;
2017-03-17 09:18:50 +03:00
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
if ( ! + + p . offset & &
! + + p . inode )
BUG ( ) ;
2017-03-17 09:18:50 +03:00
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
return p ;
}
static inline struct bpos bpos_nosnap_predecessor ( struct bpos p )
{
p . snapshot = 0 ;
if ( ! p . offset - - & &
! p . inode - - )
BUG ( ) ;
return p ;
2017-03-17 09:18:50 +03:00
}
static inline u64 bkey_start_offset ( const struct bkey * k )
{
return k - > p . offset - k - > size ;
}
static inline struct bpos bkey_start_pos ( const struct bkey * k )
{
return ( struct bpos ) {
. inode = k - > p . inode ,
. offset = bkey_start_offset ( k ) ,
. snapshot = k - > p . snapshot ,
} ;
}
/* Packed helpers */
static inline unsigned bkeyp_key_u64s ( const struct bkey_format * format ,
const struct bkey_packed * k )
{
2024-03-08 22:53:03 +03:00
return bkey_packed ( k ) ? format - > key_u64s : BKEY_U64s ;
2017-03-17 09:18:50 +03:00
}
static inline unsigned bkeyp_key_bytes ( const struct bkey_format * format ,
const struct bkey_packed * k )
{
return bkeyp_key_u64s ( format , k ) * sizeof ( u64 ) ;
}
static inline unsigned bkeyp_val_u64s ( const struct bkey_format * format ,
const struct bkey_packed * k )
{
return k - > u64s - bkeyp_key_u64s ( format , k ) ;
}
static inline size_t bkeyp_val_bytes ( const struct bkey_format * format ,
const struct bkey_packed * k )
{
return bkeyp_val_u64s ( format , k ) * sizeof ( u64 ) ;
}
static inline void set_bkeyp_val_u64s ( const struct bkey_format * format ,
struct bkey_packed * k , unsigned val_u64s )
{
k - > u64s = bkeyp_key_u64s ( format , k ) + val_u64s ;
}
# define bkeyp_val(_format, _k) \
2023-09-10 03:10:11 +03:00
( ( struct bch_val * ) ( ( u64 * ) ( _k ) - > _data + bkeyp_key_u64s ( _format , _k ) ) )
2017-03-17 09:18:50 +03:00
extern const struct bkey_format bch2_bkey_format_current ;
bool bch2_bkey_transform ( const struct bkey_format * ,
struct bkey_packed * ,
const struct bkey_format * ,
const struct bkey_packed * ) ;
struct bkey __bch2_bkey_unpack_key ( const struct bkey_format * ,
const struct bkey_packed * ) ;
# ifndef HAVE_BCACHEFS_COMPILED_UNPACK
struct bpos __bkey_unpack_pos ( const struct bkey_format * ,
const struct bkey_packed * ) ;
# endif
bool bch2_bkey_pack_key ( struct bkey_packed * , const struct bkey * ,
const struct bkey_format * ) ;
enum bkey_pack_pos_ret {
BKEY_PACK_POS_EXACT ,
BKEY_PACK_POS_SMALLER ,
BKEY_PACK_POS_FAIL ,
} ;
enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy ( struct bkey_packed * , struct bpos ,
const struct btree * ) ;
static inline bool bkey_pack_pos ( struct bkey_packed * out , struct bpos in ,
const struct btree * b )
{
return bch2_bkey_pack_pos_lossy ( out , in , b ) = = BKEY_PACK_POS_EXACT ;
}
void bch2_bkey_unpack ( const struct btree * , struct bkey_i * ,
const struct bkey_packed * ) ;
bool bch2_bkey_pack ( struct bkey_packed * , const struct bkey_i * ,
const struct bkey_format * ) ;
2022-10-22 02:15:07 +03:00
typedef void ( * compiled_unpack_fn ) ( struct bkey * , const struct bkey_packed * ) ;
static inline void
__bkey_unpack_key_format_checked ( const struct btree * b ,
struct bkey * dst ,
const struct bkey_packed * src )
{
if ( IS_ENABLED ( HAVE_BCACHEFS_COMPILED_UNPACK ) ) {
compiled_unpack_fn unpack_fn = b - > aux_data ;
unpack_fn ( dst , src ) ;
if ( IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) & &
bch2_expensive_debug_checks ) {
struct bkey dst2 = __bch2_bkey_unpack_key ( & b - > format , src ) ;
BUG_ON ( memcmp ( dst , & dst2 , sizeof ( * dst ) ) ) ;
}
} else {
* dst = __bch2_bkey_unpack_key ( & b - > format , src ) ;
}
}
static inline struct bkey
bkey_unpack_key_format_checked ( const struct btree * b ,
const struct bkey_packed * src )
{
struct bkey dst ;
__bkey_unpack_key_format_checked ( b , & dst , src ) ;
return dst ;
}
static inline void __bkey_unpack_key ( const struct btree * b ,
struct bkey * dst ,
const struct bkey_packed * src )
{
if ( likely ( bkey_packed ( src ) ) )
__bkey_unpack_key_format_checked ( b , dst , src ) ;
else
* dst = * packed_to_bkey_c ( src ) ;
}
/**
* bkey_unpack_key - - unpack just the key , not the value
*/
static inline struct bkey bkey_unpack_key ( const struct btree * b ,
const struct bkey_packed * src )
{
return likely ( bkey_packed ( src ) )
? bkey_unpack_key_format_checked ( b , src )
: * packed_to_bkey_c ( src ) ;
}
static inline struct bpos
bkey_unpack_pos_format_checked ( const struct btree * b ,
const struct bkey_packed * src )
{
# ifdef HAVE_BCACHEFS_COMPILED_UNPACK
return bkey_unpack_key_format_checked ( b , src ) . p ;
# else
return __bkey_unpack_pos ( & b - > format , src ) ;
# endif
}
static inline struct bpos bkey_unpack_pos ( const struct btree * b ,
const struct bkey_packed * src )
{
return likely ( bkey_packed ( src ) )
? bkey_unpack_pos_format_checked ( b , src )
: packed_to_bkey_c ( src ) - > p ;
}
/* Disassembled bkeys */
2023-03-06 10:53:25 +03:00
static inline struct bkey_s_c bkey_disassemble ( const struct btree * b ,
2022-10-22 02:15:07 +03:00
const struct bkey_packed * k ,
struct bkey * u )
{
__bkey_unpack_key ( b , u , k ) ;
return ( struct bkey_s_c ) { u , bkeyp_val ( & b - > format , k ) , } ;
}
/* non const version: */
2023-03-06 10:53:25 +03:00
static inline struct bkey_s __bkey_disassemble ( const struct btree * b ,
2022-10-22 02:15:07 +03:00
struct bkey_packed * k ,
struct bkey * u )
{
__bkey_unpack_key ( b , u , k ) ;
return ( struct bkey_s ) { . k = u , . v = bkeyp_val ( & b - > format , k ) , } ;
}
2017-03-17 09:18:50 +03:00
static inline u64 bkey_field_max ( const struct bkey_format * f ,
enum bch_bkey_fields nr )
{
return f - > bits_per_field [ nr ] < 64
? ( le64_to_cpu ( f - > field_offset [ nr ] ) +
~ ( ~ 0ULL < < f - > bits_per_field [ nr ] ) )
: U64_MAX ;
}
# ifdef HAVE_BCACHEFS_COMPILED_UNPACK
int bch2_compile_bkey_format ( const struct bkey_format * , void * ) ;
# else
static inline int bch2_compile_bkey_format ( const struct bkey_format * format ,
void * out ) { return 0 ; }
# endif
static inline void bkey_reassemble ( struct bkey_i * dst ,
struct bkey_s_c src )
{
dst - > k = * src . k ;
2020-03-25 00:00:48 +03:00
memcpy_u64s_small ( & dst - > v , src . v , bkey_val_u64s ( src . k ) ) ;
2017-03-17 09:18:50 +03:00
}
/* byte order helpers */
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
static inline unsigned high_word_offset ( const struct bkey_format * f )
{
return f - > key_u64s - 1 ;
}
# define high_bit_offset 0
# define nth_word(p, n) ((p) - (n))
# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
static inline unsigned high_word_offset ( const struct bkey_format * f )
{
return 0 ;
}
# define high_bit_offset KEY_PACKED_BITS_START
# define nth_word(p, n) ((p) + (n))
# else
# error edit for your odd byteorder.
# endif
2023-09-10 03:10:11 +03:00
# define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f))
2017-03-17 09:18:50 +03:00
# define next_word(p) nth_word(p, 1)
# define prev_word(p) nth_word(p, -1)
# ifdef CONFIG_BCACHEFS_DEBUG
void bch2_bkey_pack_test ( void ) ;
# else
static inline void bch2_bkey_pack_test ( void ) { }
# endif
2022-11-24 11:38:31 +03:00
# define bkey_fields() \
x ( BKEY_FIELD_INODE , p . inode ) \
x ( BKEY_FIELD_OFFSET , p . offset ) \
x ( BKEY_FIELD_SNAPSHOT , p . snapshot ) \
x ( BKEY_FIELD_SIZE , size ) \
x ( BKEY_FIELD_VERSION_HI , version . hi ) \
x ( BKEY_FIELD_VERSION_LO , version . lo )
struct bkey_format_state {
u64 field_min [ BKEY_NR_FIELDS ] ;
u64 field_max [ BKEY_NR_FIELDS ] ;
} ;
void bch2_bkey_format_init ( struct bkey_format_state * ) ;
static inline void __bkey_format_add ( struct bkey_format_state * s , unsigned field , u64 v )
{
s - > field_min [ field ] = min ( s - > field_min [ field ] , v ) ;
s - > field_max [ field ] = max ( s - > field_max [ field ] , v ) ;
}
/*
* Changes @ format so that @ k can be successfully packed with @ format
*/
static inline void bch2_bkey_format_add_key ( struct bkey_format_state * s , const struct bkey * k )
{
# define x(id, field) __bkey_format_add(s, id, k->field);
bkey_fields ( )
# undef x
}
void bch2_bkey_format_add_pos ( struct bkey_format_state * , struct bpos ) ;
struct bkey_format bch2_bkey_format_done ( struct bkey_format_state * ) ;
2023-08-06 17:04:37 +03:00
int bch2_bkey_format_invalid ( struct bch_fs * , struct bkey_format * ,
enum bkey_invalid_flags , struct printbuf * ) ;
2023-08-03 21:42:37 +03:00
void bch2_bkey_format_to_text ( struct printbuf * , const struct bkey_format * ) ;
2022-11-24 11:38:31 +03:00
2017-03-17 09:18:50 +03:00
# endif /* _BCACHEFS_BKEY_H */