ceph: maintain i_head_snapc when any caps are dirty, not just for data

We used to use i_head_snapc to keep track of which snapc the current epoch
of dirty data was dirtied under.  It is used by queue_cap_snap to set up
the cap_snap.  However, since we queue cap snaps for any dirty caps, not
just for dirty file data, we need to keep a valid i_head_snapc anytime
we have dirty|flushing caps.  This fixes a NULL pointer deref in
queue_cap_snap when writing back dirty caps without data (e.g.,
snaptest-authwb.sh).

Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
Sage Weil 2010-08-24 08:44:16 -07:00
parent 07a27e226d
commit 7d8cb26d7d
4 changed files with 26 additions and 7 deletions

View File

@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
/* dirty the head */ /* dirty the head */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (ci->i_wrbuffer_ref_head == 0) if (ci->i_head_snapc == NULL)
ci->i_head_snapc = ceph_get_snap_context(snapc); ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head; ++ci->i_wrbuffer_ref_head;
if (ci->i_wrbuffer_ref == 0) if (ci->i_wrbuffer_ref == 0)
@ -346,7 +346,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
break; break;
} }
} }
if (!snapc && ci->i_head_snapc) { if (!snapc && ci->i_wrbuffer_ref_head) {
snapc = ceph_get_snap_context(ci->i_head_snapc); snapc = ceph_get_snap_context(ci->i_head_snapc);
dout(" head snapc %p has %d dirty pages\n", dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head); snapc, ci->i_wrbuffer_ref_head);

View File

@ -1143,6 +1143,10 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
for (i = 0; i < CEPH_CAP_BITS; i++) for (i = 0; i < CEPH_CAP_BITS; i++)
if (flushing & (1 << i)) if (flushing & (1 << i))
ci->i_cap_flush_tid[i] = flush_tid; ci->i_cap_flush_tid[i] = flush_tid;
follows = ci->i_head_snapc->seq;
} else {
follows = 0;
} }
keep = cap->implemented; keep = cap->implemented;
@ -1156,7 +1160,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
mtime = inode->i_mtime; mtime = inode->i_mtime;
atime = inode->i_atime; atime = inode->i_atime;
time_warp_seq = ci->i_time_warp_seq; time_warp_seq = ci->i_time_warp_seq;
follows = ci->i_snap_realm->cached_context->seq;
uid = inode->i_uid; uid = inode->i_uid;
gid = inode->i_gid; gid = inode->i_gid;
mode = inode->i_mode; mode = inode->i_mode;
@ -1332,7 +1335,11 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
ceph_cap_string(was | mask)); ceph_cap_string(was | mask));
ci->i_dirty_caps |= mask; ci->i_dirty_caps |= mask;
if (was == 0) { if (was == 0) {
dout(" inode %p now dirty\n", &ci->vfs_inode); if (!ci->i_head_snapc)
ci->i_head_snapc = ceph_get_snap_context(
ci->i_snap_realm->cached_context);
dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
ci->i_head_snapc);
BUG_ON(!list_empty(&ci->i_dirty_item)); BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock); spin_lock(&mdsc->cap_dirty_lock);
list_add(&ci->i_dirty_item, &mdsc->cap_dirty); list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@ -2190,7 +2197,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
if (ci->i_head_snapc == snapc) { if (ci->i_head_snapc == snapc) {
ci->i_wrbuffer_ref_head -= nr; ci->i_wrbuffer_ref_head -= nr;
if (!ci->i_wrbuffer_ref_head) { if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
BUG_ON(!ci->i_head_snapc);
ceph_put_snap_context(ci->i_head_snapc); ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL; ci->i_head_snapc = NULL;
} }
@ -2483,6 +2492,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
dout(" inode %p now clean\n", inode); dout(" inode %p now clean\n", inode);
BUG_ON(!list_empty(&ci->i_dirty_item)); BUG_ON(!list_empty(&ci->i_dirty_item));
drop = 1; drop = 1;
if (ci->i_wrbuffer_ref_head == 0) {
BUG_ON(!ci->i_head_snapc);
ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL;
}
} else { } else {
BUG_ON(list_empty(&ci->i_dirty_item)); BUG_ON(list_empty(&ci->i_dirty_item));
} }

View File

@ -458,6 +458,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) { CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
struct ceph_snap_context *snapc = ci->i_head_snapc; struct ceph_snap_context *snapc = ci->i_head_snapc;
dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
capsnap, snapc);
igrab(inode); igrab(inode);
atomic_set(&capsnap->nref, 1); atomic_set(&capsnap->nref, 1);
@ -489,7 +491,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
capsnap->dirty_pages = ci->i_wrbuffer_ref_head; capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
ci->i_wrbuffer_ref_head = 0; ci->i_wrbuffer_ref_head = 0;
capsnap->context = snapc; capsnap->context = snapc;
ci->i_head_snapc = NULL; ci->i_head_snapc =
ceph_get_snap_context(ci->i_snap_realm->cached_context);
dout(" new snapc is %p\n", ci->i_head_snapc);
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
if (used & CEPH_CAP_FILE_WR) { if (used & CEPH_CAP_FILE_WR) {

View File

@ -344,7 +344,8 @@ struct ceph_inode_info {
unsigned i_cap_exporting_issued; unsigned i_cap_exporting_issued;
struct ceph_cap_reservation i_cap_migration_resv; struct ceph_cap_reservation i_cap_migration_resv;
struct list_head i_cap_snaps; /* snapped state pending flush to mds */ struct list_head i_cap_snaps; /* snapped state pending flush to mds */
struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */ struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
dirty|flushing caps */
unsigned i_snap_caps; /* cap bits for snapped files */ unsigned i_snap_caps; /* cap bits for snapped files */
int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */ int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */