for-6.11/block-post-20240722
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmaeY00QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpjPGD/9CPo93+V/ztfzY1J18KhA2CCUh1uuxZIjx dLfi07Bo+gyLwB1vaSf0bNy9gM8SzGFSMszSIDTErNq9/F6RvWjXN0CchyQf1Wii o2UyQg8JLjT2o1pJSsdJySZQRsG/daWUHzHaX1kD343Cd6OBV2YaVFdYTaXUGg4v G1AVh7qFvQhAIg1jV8q2z7QC7PSeuTnvyvY65Z8/iVJe95FayOrtGmDPTaJab8r2 7uEFiWZk23erzNygVdcSoNIrwWFmRARz5o3IvwJJfEL08hkdoAqu6vD2oCUZspKU 3g4wU6JrN0QYQpVwIJ9WcwYcoOm6iMm9xwCVMsp8R3KRUU107HjaiEazFDGk4HW4 ozZTa7leTXnrRqnjVhcQpUvC+1uVLCFN8sSElNY7m2dg0IojnlMz+t3lMiTtaR9N Rt6wy5alVQFlb2uhzALuUh6HM1zA98swWySNoP0arTkOT9kjXwwAgn0I+M1s9Uxo FaQvM0YnAsb2C8LSpNtZWLaTlRSLTzUsGThLSJMBZueIJ9+BF23i7W7euklCNxjj Jl6CykEkEkacOxU6b9PG6qSnUq9JJ+W7gcJVing+ugAFrZDutxy6eJZXVv8wuvCC EOxaADpSs2xAaH9V0BMmwO51w0NDWySyGPHB5UBkhNjqOji/oG3FvAITiboQArgS FES4jtU1TA== =dn4l -----END PGP SIGNATURE----- Merge tag 'for-6.11/block-post-20240722' of git://git.kernel.dk/linux Pull block integrity mapping updates from Jens Axboe: "A set of cleanups and fixes for the block integrity support. Sent separately from the main block changes from last week, as they depended on later fixes in the 6.10-rc cycle" * tag 'for-6.11/block-post-20240722' of git://git.kernel.dk/linux: block: don't free the integrity payload in bio_integrity_unmap_free_user block: don't free submitter owned integrity payload on I/O completion block: call bio_integrity_unmap_free_user from blk_rq_unmap_user block: don't call bio_uninit from bio_endio block: also return bio_integrity_payload * from stubs block: split integrity support out of bio.h
This commit is contained in:
commit
0256994887
@ -22,9 +22,17 @@ void blk_flush_integrity(void)
|
||||
flush_workqueue(kintegrityd_wq);
|
||||
}
|
||||
|
||||
static void __bio_integrity_free(struct bio_set *bs,
|
||||
struct bio_integrity_payload *bip)
|
||||
/**
|
||||
* bio_integrity_free - Free bio integrity payload
|
||||
* @bio: bio containing bip to be freed
|
||||
*
|
||||
* Description: Free the integrity portion of a bio.
|
||||
*/
|
||||
void bio_integrity_free(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
|
||||
if (bip->bip_vec)
|
||||
bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
|
||||
@ -33,6 +41,8 @@ static void __bio_integrity_free(struct bio_set *bs,
|
||||
} else {
|
||||
kfree(bip);
|
||||
}
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -86,7 +96,10 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
|
||||
|
||||
return bip;
|
||||
err:
|
||||
__bio_integrity_free(bs, bip);
|
||||
if (bs && mempool_initialized(&bs->bio_integrity_pool))
|
||||
mempool_free(bip, &bs->bio_integrity_pool);
|
||||
else
|
||||
kfree(bip);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_alloc);
|
||||
@ -118,64 +131,27 @@ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
|
||||
bio_integrity_unpin_bvec(copy, nr_vecs, true);
|
||||
}
|
||||
|
||||
static void bio_integrity_unmap_user(struct bio_integrity_payload *bip)
|
||||
/**
|
||||
* bio_integrity_unmap_user - Unmap user integrity payload
|
||||
* @bio: bio containing bip to be unmapped
|
||||
*
|
||||
* Unmap the user mapped integrity portion of a bio.
|
||||
*/
|
||||
void bio_integrity_unmap_user(struct bio *bio)
|
||||
{
|
||||
bool dirty = bio_data_dir(bip->bip_bio) == READ;
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bip->bip_flags & BIP_COPY_USER) {
|
||||
if (dirty)
|
||||
if (bio_data_dir(bio) == READ)
|
||||
bio_integrity_uncopy_user(bip);
|
||||
kfree(bvec_virt(bip->bip_vec));
|
||||
return;
|
||||
}
|
||||
|
||||
bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt, dirty);
|
||||
bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
|
||||
bio_data_dir(bio) == READ);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_free - Free bio integrity payload
|
||||
* @bio: bio containing bip to be freed
|
||||
*
|
||||
* Description: Used to free the integrity portion of a bio. Usually
|
||||
* called from bio_free().
|
||||
*/
|
||||
void bio_integrity_free(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (bip->bip_flags & BIP_INTEGRITY_USER)
|
||||
return;
|
||||
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
|
||||
kfree(bvec_virt(bip->bip_vec));
|
||||
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
|
||||
* @bio: bio containing bip to be unmapped and freed
|
||||
*
|
||||
* Description: Used to unmap and free the user mapped integrity portion of a
|
||||
* bio. Submitter attaching the user integrity buffer is responsible for
|
||||
* unmapping and freeing it during completion.
|
||||
*/
|
||||
void bio_integrity_unmap_free_user(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
|
||||
return;
|
||||
bio_integrity_unmap_user(bip);
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_unmap_free_user);
|
||||
|
||||
/**
|
||||
* bio_integrity_add_page - Attach integrity metadata
|
||||
* @bio: bio to update
|
||||
@ -274,7 +250,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
|
||||
goto free_bip;
|
||||
}
|
||||
|
||||
bip->bip_flags |= BIP_INTEGRITY_USER | BIP_COPY_USER;
|
||||
bip->bip_flags |= BIP_COPY_USER;
|
||||
bip->bip_iter.bi_sector = seed;
|
||||
bip->bip_vcnt = nr_vecs;
|
||||
return 0;
|
||||
@ -295,7 +271,6 @@ static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
|
||||
return PTR_ERR(bip);
|
||||
|
||||
memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
|
||||
bip->bip_flags |= BIP_INTEGRITY_USER;
|
||||
bip->bip_iter.bi_sector = seed;
|
||||
bip->bip_iter.bi_size = len;
|
||||
bip->bip_vcnt = nr_vecs;
|
||||
@ -503,6 +478,8 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||
struct bio *bio = bip->bip_bio;
|
||||
|
||||
blk_integrity_verify(bio);
|
||||
|
||||
kfree(bvec_virt(bip->bip_vec));
|
||||
bio_integrity_free(bio);
|
||||
bio_endio(bio);
|
||||
}
|
||||
@ -523,13 +500,13 @@ bool __bio_integrity_endio(struct bio *bio)
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
|
||||
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->csum_type) {
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
|
||||
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
||||
queue_work(kintegrityd_wq, &bip->bip_work);
|
||||
return false;
|
||||
}
|
||||
|
||||
kfree(bvec_virt(bip->bip_vec));
|
||||
bio_integrity_free(bio);
|
||||
return true;
|
||||
}
|
||||
|
16
block/bio.c
16
block/bio.c
@ -4,7 +4,7 @@
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/iocontext.h>
|
||||
@ -1630,8 +1630,18 @@ again:
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* release cgroup info */
|
||||
bio_uninit(bio);
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/*
|
||||
* Release cgroup info. We shouldn't have to do this here, but quite
|
||||
* a few callers of bio_init fail to call bio_uninit, so we cover up
|
||||
* for that here at least for now.
|
||||
*/
|
||||
if (bio->bi_blkg) {
|
||||
blkg_put(bio->bi_blkg);
|
||||
bio->bi_blkg = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (bio->bi_end_io)
|
||||
bio->bi_end_io(bio);
|
||||
}
|
||||
|
@ -757,6 +757,9 @@ int blk_rq_unmap_user(struct bio *bio)
|
||||
bio_release_pages(bio, bio_data_dir(bio) == READ);
|
||||
}
|
||||
|
||||
if (bio_integrity(bio))
|
||||
bio_integrity_unmap_user(bio);
|
||||
|
||||
next_bio = bio;
|
||||
bio = bio->bi_next;
|
||||
blk_mq_map_bio_put(next_bio);
|
||||
|
14
block/blk.h
14
block/blk.h
@ -2,6 +2,7 @@
|
||||
#ifndef BLK_INTERNAL_H
|
||||
#define BLK_INTERNAL_H
|
||||
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
|
||||
#include <linux/sched/sysctl.h>
|
||||
@ -201,11 +202,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
void blk_flush_integrity(void);
|
||||
bool __bio_integrity_endio(struct bio *);
|
||||
void bio_integrity_free(struct bio *bio);
|
||||
|
||||
/*
|
||||
* Integrity payloads can either be owned by the submitter, in which case
|
||||
* bio_uninit will free them, or owned and generated by the block layer,
|
||||
* in which case we'll verify them here (for reads) and free them before
|
||||
* the bio is handed back to the submitted.
|
||||
*/
|
||||
bool __bio_integrity_endio(struct bio *bio);
|
||||
static inline bool bio_integrity_endio(struct bio *bio)
|
||||
{
|
||||
if (bio_integrity(bio))
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
|
||||
return __bio_integrity_endio(bio);
|
||||
return true;
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "dm-uevent.h"
|
||||
#include "dm-ima.h"
|
||||
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -3,6 +3,7 @@
|
||||
* Copyright (c) 2011-2014, Intel Corporation.
|
||||
* Copyright (c) 2017-2021 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/ptrace.h> /* for force_successful_syscall_return */
|
||||
#include <linux/nvme_ioctl.h>
|
||||
#include <linux/io_uring/cmd.h>
|
||||
@ -111,13 +112,6 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
|
||||
return req;
|
||||
}
|
||||
|
||||
static void nvme_unmap_bio(struct bio *bio)
|
||||
{
|
||||
if (bio_integrity(bio))
|
||||
bio_integrity_unmap_free_user(bio);
|
||||
blk_rq_unmap_user(bio);
|
||||
}
|
||||
|
||||
static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
|
||||
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
|
||||
@ -164,7 +158,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
|
||||
out_unmap:
|
||||
if (bio)
|
||||
nvme_unmap_bio(bio);
|
||||
blk_rq_unmap_user(bio);
|
||||
out:
|
||||
blk_mq_free_request(req);
|
||||
return ret;
|
||||
@ -202,7 +196,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
||||
if (result)
|
||||
*result = le64_to_cpu(nvme_req(req)->result.u64);
|
||||
if (bio)
|
||||
nvme_unmap_bio(bio);
|
||||
blk_rq_unmap_user(bio);
|
||||
blk_mq_free_request(req);
|
||||
|
||||
if (effects)
|
||||
@ -413,7 +407,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
|
||||
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
|
||||
|
||||
if (pdu->bio)
|
||||
nvme_unmap_bio(pdu->bio);
|
||||
blk_rq_unmap_user(pdu->bio);
|
||||
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
|
||||
}
|
||||
|
||||
@ -439,7 +433,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
|
||||
*/
|
||||
if (blk_rq_is_poll(req)) {
|
||||
if (pdu->bio)
|
||||
nvme_unmap_bio(pdu->bio);
|
||||
blk_rq_unmap_user(pdu->bio);
|
||||
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
|
||||
} else {
|
||||
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
|
||||
|
@ -33,11 +33,12 @@
|
||||
* than the level indicated above to trigger output.
|
||||
*/
|
||||
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/idr.h>
|
||||
|
152
include/linux/bio-integrity.h
Normal file
152
include/linux/bio-integrity.h
Normal file
@ -0,0 +1,152 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BIO_INTEGRITY_H
|
||||
#define _LINUX_BIO_INTEGRITY_H
|
||||
|
||||
#include <linux/bio.h>
|
||||
|
||||
enum bip_flags {
|
||||
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
|
||||
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
|
||||
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
|
||||
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
|
||||
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
|
||||
BIP_COPY_USER = 1 << 5, /* Kernel bounce buffer in use */
|
||||
};
|
||||
|
||||
struct bio_integrity_payload {
|
||||
struct bio *bip_bio; /* parent bio */
|
||||
|
||||
struct bvec_iter bip_iter;
|
||||
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
|
||||
unsigned short bip_flags; /* control flags */
|
||||
|
||||
struct bvec_iter bio_iter; /* for rewinding parent bio */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
|
||||
struct bio_vec *bip_vec;
|
||||
struct bio_vec bip_inline_vecs[];/* embedded bvec array */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
|
||||
#define bip_for_each_vec(bvl, bip, iter) \
|
||||
for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
|
||||
|
||||
#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
|
||||
for_each_bio(_bio) \
|
||||
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
|
||||
|
||||
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_opf & REQ_INTEGRITY)
|
||||
return bio->bi_integrity;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bip)
|
||||
return bip->bip_flags & flag;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
|
||||
{
|
||||
return bip->bip_iter.bi_sector;
|
||||
}
|
||||
|
||||
static inline void bip_set_seed(struct bio_integrity_payload *bip,
|
||||
sector_t seed)
|
||||
{
|
||||
bip->bip_iter.bi_sector = seed;
|
||||
}
|
||||
|
||||
struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
|
||||
unsigned int nr);
|
||||
int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||
unsigned int offset);
|
||||
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
|
||||
void bio_integrity_unmap_user(struct bio *bio);
|
||||
bool bio_integrity_prep(struct bio *bio);
|
||||
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
|
||||
void bio_integrity_trim(struct bio *bio);
|
||||
int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
|
||||
int bioset_integrity_create(struct bio_set *bs, int pool_size);
|
||||
void bioset_integrity_free(struct bio_set *bs);
|
||||
void bio_integrity_init(void);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bioset_integrity_free(struct bio_set *bs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
|
||||
ssize_t len, u32 seed)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void bio_integrity_unmap_user(struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_prep(struct bio *bio)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bio_integrity_advance(struct bio *bio,
|
||||
unsigned int bytes_done)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bio_integrity_trim(struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bio_integrity_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct bio_integrity_payload *
|
||||
bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
#endif /* _LINUX_BIO_INTEGRITY_H */
|
@ -321,69 +321,6 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
|
||||
#define bio_for_each_folio_all(fi, bio) \
|
||||
for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
|
||||
|
||||
enum bip_flags {
|
||||
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
|
||||
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
|
||||
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
|
||||
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
|
||||
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
|
||||
BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */
|
||||
BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */
|
||||
};
|
||||
|
||||
/*
|
||||
* bio integrity payload
|
||||
*/
|
||||
struct bio_integrity_payload {
|
||||
struct bio *bip_bio; /* parent bio */
|
||||
|
||||
struct bvec_iter bip_iter;
|
||||
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
|
||||
unsigned short bip_flags; /* control flags */
|
||||
|
||||
struct bvec_iter bio_iter; /* for rewinding parent bio */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
|
||||
struct bio_vec *bip_vec;
|
||||
struct bio_vec bip_inline_vecs[];/* embedded bvec array */
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_opf & REQ_INTEGRITY)
|
||||
return bio->bi_integrity;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bip)
|
||||
return bip->bip_flags & flag;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
|
||||
{
|
||||
return bip->bip_iter.bi_sector;
|
||||
}
|
||||
|
||||
static inline void bip_set_seed(struct bio_integrity_payload *bip,
|
||||
sector_t seed)
|
||||
{
|
||||
bip->bip_iter.bi_sector = seed;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
|
||||
extern struct bio *bio_split(struct bio *bio, int sectors,
|
||||
gfp_t gfp, struct bio_set *bs);
|
||||
@ -721,99 +658,6 @@ static inline bool bioset_initialized(struct bio_set *bs)
|
||||
return bs->bio_slab != NULL;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
#define bip_for_each_vec(bvl, bip, iter) \
|
||||
for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
|
||||
|
||||
#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
|
||||
for_each_bio(_bio) \
|
||||
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
|
||||
|
||||
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
|
||||
void bio_integrity_unmap_free_user(struct bio *bio);
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
|
||||
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
|
||||
extern bool bio_integrity_prep(struct bio *);
|
||||
extern void bio_integrity_advance(struct bio *, unsigned int);
|
||||
extern void bio_integrity_trim(struct bio *);
|
||||
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
|
||||
extern int bioset_integrity_create(struct bio_set *, int);
|
||||
extern void bioset_integrity_free(struct bio_set *);
|
||||
extern void bio_integrity_init(void);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
static inline void *bio_integrity(struct bio *bio)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bioset_integrity_free (struct bio_set *bs)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_prep(struct bio *bio)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bio_integrity_advance(struct bio *bio,
|
||||
unsigned int bytes_done)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void bio_integrity_trim(struct bio *bio)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void bio_integrity_init(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
|
||||
unsigned int nr)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
|
||||
ssize_t len, u32 seed)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void bio_integrity_unmap_free_user(struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
/*
|
||||
* Mark a bio as polled. Note that for async polled IO, the caller must
|
||||
* expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define _LINUX_BLK_INTEGRITY_H
|
||||
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/bio-integrity.h>
|
||||
|
||||
struct request;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user