target: pass a block_device to target_configure_unmap_from_queue

The SCSI target drivers is a consumer of the block layer and shoul
d generally work on struct block_device.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-04-15 06:52:33 +02:00 committed by Jens Axboe
parent 179d8609d8
commit 817e8b51eb
4 changed files with 10 additions and 8 deletions

View File

@ -834,9 +834,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
* in ATA and we need to set TPE=1 * in ATA and we need to set TPE=1
*/ */
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q) struct block_device *bdev)
{ {
int block_size = queue_logical_block_size(q); struct request_queue *q = bdev_get_queue(bdev);
int block_size = bdev_logical_block_size(bdev);
if (!blk_queue_discard(q)) if (!blk_queue_discard(q))
return false; return false;

View File

@ -134,10 +134,11 @@ static int fd_configure_device(struct se_device *dev)
*/ */
inode = file->f_mapping->host; inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) { if (S_ISBLK(inode->i_mode)) {
struct request_queue *q = bdev_get_queue(I_BDEV(inode)); struct block_device *bdev = I_BDEV(inode);
struct request_queue *q = bdev_get_queue(bdev);
unsigned long long dev_size; unsigned long long dev_size;
fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode)); fd_dev->fd_block_size = bdev_logical_block_size(bdev);
/* /*
* Determine the number of bytes from i_size_read() minus * Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device * one (1) logical sector from underlying struct block_device
@ -150,7 +151,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size), dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size); fd_dev->fd_block_size);
if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
pr_debug("IFILE: BLOCK Discard support available," pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");
/* /*

View File

@ -119,7 +119,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests; dev->dev_attrib.hw_queue_depth = q->nr_requests;
if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
pr_debug("IBLOCK: BLOCK Discard support available," pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");

View File

@ -14,7 +14,7 @@
#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 #define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
#define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4 #define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4
struct request_queue; struct block_device;
struct scatterlist; struct scatterlist;
struct target_backend_ops { struct target_backend_ops {
@ -117,7 +117,7 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev); bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q); struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev) static inline bool target_dev_configured(struct se_device *se_dev)
{ {