libnvdimm fixes for 5.9-rc6
- Fix an original bug in device-mapper table reference counting when interrogating dax capability in the component device. This bug was hidden by the following bug. - Fix device-mapper to use the proper helper (dax_supported() instead of the leaf helper generic_fsdax_supported()) to determine dax operation of a stacked block device configuration. The original implementation is only valid for one level of dax-capable block device stacking. This bug was discovered while fixing the below regression. - Fix an infinite recursion regression introduced by broken attempts to quiet the generic_fsdax_supported() path and make it bail out before logging "dax capability not found" errors. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEf41QbsdZzFdA8EfZHtKRamZ9iAIFAl9noTYACgkQHtKRamZ9 iAJuNBAAkuj1C5287OfKLmJGnnPy+MPRaQEkFczsIwmno7qdv7IoUxFnbsd1vnlK 5RlpsmFNu+NDGQ74cZZHFT8pSaOc/6sgchcmGNBdbAaV6uerHlsojMb6YmLASogF 0WIi+0m8i5/zXXFUalAcWGfxNRYwVbfH/wdiu7+ZNvbvQ577nOHU4jgMX/nUbvQq dXF6T4BOFqSfXO0xPenCWP27SDBRWkztXQMdWh+mcayiIvXf/l9/Ir1jx0hpM5zb Bdllpn/BMcvqzKjGAbhGuRgEPQHWaRoaCSg0Evb60e3lNxh75JL6Wj1xRmkTt3HG B4YJ0Jkm0vENxi8dUvolX3s6nd/rdbtgzKQZbPh/+xVZQBzLm07VUMNKgD+mFGuQ OHj9njHuRGk+rMgjJFD5TBUkNZwK9s0y0iB+aeIo9WPcBwFZAir3NOEJ8kXoo6vZ LuvukOHmf8+7agPEZuhVymruP+Sc3c03SIHk8HJfn9DWUtD9IXUuvb0BIho2h0nE 9PLYt95bQF4yxYP0N2BYxpVxcoByXcbwDzBCUIOnIbu11hLuQ/ZKfV4CP8cDj646 ZTHA7rArJs5NyIoRBGs6rtkc2fSt1uG2nm65zNLvx0KV/rqhYFWX26ZX7u0WFrFa yhtx1hVk5/RRdU3p9e595rZDjXHK0YbK8XAMI2Dp702zvVgykN4= =dSAf -----END PGP SIGNATURE----- Merge tag 'libnvdimm-fixes-5.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull libnvdimm fixes from Dan Williams: "A handful of fixes to address a string of mistakes in the mechanism for device-mapper to determine if its component devices are dax capable. - Fix an original bug in device-mapper table reference counting when interrogating dax capability in the component device. This bug was hidden by the following bug. - Fix device-mapper to use the proper helper (dax_supported() instead of the leaf helper generic_fsdax_supported()) to determine dax operation of a stacked block device configuration. The original implementation is only valid for one level of dax-capable block device stacking. This bug was discovered while fixing the below regression. - Fix an infinite recursion regression introduced by broken attempts to quiet the generic_fsdax_supported() path and make it bail out before logging "dax capability not found" errors" * tag 'libnvdimm-fixes-5.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Fix stack overflow when mounting fsdax pmem device dm: Call proper helper to determine dax support dm/dax: Fix table reference counts
This commit is contained in:
commit
4a123dbaf3
@ -85,6 +85,12 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!dax_dev) {
|
||||||
|
pr_debug("%s: error: dax unsupported by block device\n",
|
||||||
|
bdevname(bdev, buf));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
|
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_info("%s: error: unaligned partition for dax\n",
|
pr_info("%s: error: unaligned partition for dax\n",
|
||||||
@ -100,12 +106,6 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dax_dev || !bdev_dax_supported(bdev, blocksize)) {
|
|
||||||
pr_debug("%s: error: dax unsupported by block device\n",
|
|
||||||
bdevname(bdev, buf));
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||||
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
|
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
|
||||||
@ -325,11 +325,15 @@ EXPORT_SYMBOL_GPL(dax_direct_access);
|
|||||||
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||||
int blocksize, sector_t start, sector_t len)
|
int blocksize, sector_t start, sector_t len)
|
||||||
{
|
{
|
||||||
|
if (!dax_dev)
|
||||||
|
return false;
|
||||||
|
|
||||||
if (!dax_alive(dax_dev))
|
if (!dax_alive(dax_dev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
|
return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dax_supported);
|
||||||
|
|
||||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
size_t bytes, struct iov_iter *i)
|
size_t bytes, struct iov_iter *i)
|
||||||
|
@ -860,10 +860,14 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
|
|||||||
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
int blocksize = *(int *) data;
|
int blocksize = *(int *) data, id;
|
||||||
|
bool rc;
|
||||||
|
|
||||||
return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
|
id = dax_read_lock();
|
||||||
start, len);
|
rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
|
||||||
|
dax_read_unlock(id);
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check devices support synchronous DAX */
|
/* Check devices support synchronous DAX */
|
||||||
|
@ -1136,15 +1136,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
|
|||||||
{
|
{
|
||||||
struct mapped_device *md = dax_get_private(dax_dev);
|
struct mapped_device *md = dax_get_private(dax_dev);
|
||||||
struct dm_table *map;
|
struct dm_table *map;
|
||||||
|
bool ret = false;
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
bool ret;
|
|
||||||
|
|
||||||
map = dm_get_live_table(md, &srcu_idx);
|
map = dm_get_live_table(md, &srcu_idx);
|
||||||
if (!map)
|
if (!map)
|
||||||
return false;
|
goto out;
|
||||||
|
|
||||||
ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
|
ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
|
||||||
|
|
||||||
|
out:
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -130,6 +130,8 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
|||||||
return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
|
return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
|
||||||
sectors);
|
sectors);
|
||||||
}
|
}
|
||||||
|
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||||
|
int blocksize, sector_t start, sector_t len);
|
||||||
|
|
||||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||||
{
|
{
|
||||||
@ -157,6 +159,13 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool dax_supported(struct dax_device *dax_dev,
|
||||||
|
struct block_device *bdev, int blocksize, sector_t start,
|
||||||
|
sector_t len)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -189,14 +198,23 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_DAX)
|
||||||
int dax_read_lock(void);
|
int dax_read_lock(void);
|
||||||
void dax_read_unlock(int id);
|
void dax_read_unlock(int id);
|
||||||
|
#else
|
||||||
|
static inline int dax_read_lock(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dax_read_unlock(int id)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_DAX */
|
||||||
bool dax_alive(struct dax_device *dax_dev);
|
bool dax_alive(struct dax_device *dax_dev);
|
||||||
void *dax_get_private(struct dax_device *dax_dev);
|
void *dax_get_private(struct dax_device *dax_dev);
|
||||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
void **kaddr, pfn_t *pfn);
|
void **kaddr, pfn_t *pfn);
|
||||||
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
|
||||||
int blocksize, sector_t start, sector_t len);
|
|
||||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
size_t bytes, struct iov_iter *i);
|
size_t bytes, struct iov_iter *i);
|
||||||
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
|
Loading…
Reference in New Issue
Block a user