A ZERO_SIZE_PTR dereference fix from Xiubo and two fixes for async

creates interacting with pool namespace-constrained OSD permissions
 from Jeff (marked for stable).
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmH0FAkTHGlkcnlvbW92
 QGdtYWlsLmNvbQAKCRBKf944AhHzixpkB/4ssxwtq8aP82Kh/WuS9qdtofWtZvOB
 6BPJdxseWvVMk9Bw/bO6BrxHpHJJKbhPPxKkhJGliSi19kWJKC7FYbuDRp7ffj4Y
 3UDiRaD5aQyjFi1rIrBjDb3dgA1dxdXH4EVAPf44dlRD7HjqaglVldFWSHxqH1RQ
 v4eB9RYHzNZKuAFsXF4+bzwZXMWgzaMXDNA5AzgRb8Zb3I2K2xSsIxDHD8MHlw8v
 BFX7QNobsxi9vjmENOmH8WcjWx8abtdNl0ndNmHcc3u2QyNEfRcdx8DIytQCVRSw
 gCyQVLgZIiedeZ84D6jmQu+RR668ztH+X6/QWWj5eHh6YlvtyqTPnuHe
 =VuFn
 -----END PGP SIGNATURE-----

Merge tag 'ceph-for-5.17-rc2' of git://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
 "A ZERO_SIZE_PTR dereference fix from Xiubo and two fixes for async
  creates interacting with pool namespace-constrained OSD permissions
  from Jeff (marked for stable)"

* tag 'ceph-for-5.17-rc2' of git://github.com/ceph/ceph-client:
  ceph: set pool_ns in new inode layout for async creates
  ceph: properly put ceph_string reference after async create attempt
  ceph: put the requests/sessions when it fails to alloc memory
This commit is contained in:
Linus Torvalds 2022-01-28 18:36:42 +02:00
commit 8157f47073
2 changed files with 46 additions and 18 deletions

View File

@ -2218,6 +2218,7 @@ static int unsafe_request_wait(struct inode *inode)
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req1 = NULL, *req2 = NULL;
unsigned int max_sessions;
int ret, err = 0;
spin_lock(&ci->i_unsafe_lock);
@ -2235,37 +2236,45 @@ static int unsafe_request_wait(struct inode *inode)
}
spin_unlock(&ci->i_unsafe_lock);
/*
* The mdsc->max_sessions is unlikely to be changed
* mostly, here we will retry it by reallocating the
* sessions array memory to get rid of the mdsc->mutex
* lock.
*/
retry:
max_sessions = mdsc->max_sessions;
/*
* Trigger to flush the journal logs in all the relevant MDSes
* manually, or in the worst case we must wait at most 5 seconds
* to wait the journal logs to be flushed by the MDSes periodically.
*/
if (req1 || req2) {
if ((req1 || req2) && likely(max_sessions)) {
struct ceph_mds_session **sessions = NULL;
struct ceph_mds_session *s;
struct ceph_mds_request *req;
unsigned int max;
int i;
/*
* The mdsc->max_sessions is unlikely to be changed
* mostly, here we will retry it by reallocating the
* sessions arrary memory to get rid of the mdsc->mutex
* lock.
*/
retry:
max = mdsc->max_sessions;
sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
if (!sessions)
return -ENOMEM;
sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL);
if (!sessions) {
err = -ENOMEM;
goto out;
}
spin_lock(&ci->i_unsafe_lock);
if (req1) {
list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) {
s = req->r_session;
if (unlikely(s->s_mds >= max)) {
if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock);
for (i = 0; i < max_sessions; i++) {
s = sessions[i];
if (s)
ceph_put_mds_session(s);
}
kfree(sessions);
goto retry;
}
if (!sessions[s->s_mds]) {
@ -2278,8 +2287,14 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) {
s = req->r_session;
if (unlikely(s->s_mds >= max)) {
if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock);
for (i = 0; i < max_sessions; i++) {
s = sessions[i];
if (s)
ceph_put_mds_session(s);
}
kfree(sessions);
goto retry;
}
if (!sessions[s->s_mds]) {
@ -2300,7 +2315,7 @@ retry:
spin_unlock(&ci->i_ceph_lock);
/* send flush mdlog request to MDSes */
for (i = 0; i < max; i++) {
for (i = 0; i < max_sessions; i++) {
s = sessions[i];
if (s) {
send_flush_mdlog(s);
@ -2317,15 +2332,19 @@ retry:
ceph_timeout_jiffies(req1->r_timeout));
if (ret)
err = -EIO;
ceph_mdsc_put_request(req1);
}
if (req2) {
ret = !wait_for_completion_timeout(&req2->r_safe_completion,
ceph_timeout_jiffies(req2->r_timeout));
if (ret)
err = -EIO;
ceph_mdsc_put_request(req2);
}
out:
if (req1)
ceph_mdsc_put_request(req1);
if (req2)
ceph_mdsc_put_request(req2);
return err;
}

View File

@ -583,6 +583,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode;
struct timespec64 now;
struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@ -632,6 +633,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
in.max_size = cpu_to_le64(lo->stripe_unit);
ceph_file_layout_to_legacy(lo, &in.layout);
/* lo is private, so pool_ns can't change */
pool_ns = rcu_dereference_raw(lo->pool_ns);
if (pool_ns) {
iinfo.pool_ns_len = pool_ns->len;
iinfo.pool_ns_data = pool_ns->str;
}
down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
@ -750,8 +757,10 @@ retry:
restore_deleg_ino(dir, req->r_deleg_ino);
ceph_mdsc_put_request(req);
try_async = false;
ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto retry;
}
ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto out_req;
}
}