mirror of
git://sourceware.org/git/lvm2.git
synced 2025-01-05 13:18:20 +03:00
89 lines
2.6 KiB
Plaintext
89 lines
2.6 KiB
Plaintext
diff -urN 2.4.7pre6/fs/buffer.c bh_async/fs/buffer.c
|
|
--- 2.4.7pre6/fs/buffer.c Wed Jul 11 06:03:18 2001
|
|
+++ bh_async/fs/buffer.c Thu Jul 12 07:55:08 2001
|
|
@@ -827,10 +827,11 @@
|
|
* that unlock the page..
|
|
*/
|
|
spin_lock_irqsave(&page_uptodate_lock, flags);
|
|
+ mark_buffer_async(bh, 0);
|
|
unlock_buffer(bh);
|
|
tmp = bh->b_this_page;
|
|
while (tmp != bh) {
|
|
- if (tmp->b_end_io == end_buffer_io_async && buffer_locked(tmp))
|
|
+ if (buffer_async(tmp) && buffer_locked(tmp))
|
|
goto still_busy;
|
|
tmp = tmp->b_this_page;
|
|
}
|
|
@@ -862,8 +863,9 @@
|
|
return;
|
|
}
|
|
|
|
-void set_buffer_async_io(struct buffer_head *bh) {
|
|
+inline void set_buffer_async_io(struct buffer_head *bh) {
|
|
bh->b_end_io = end_buffer_io_async ;
|
|
+ mark_buffer_async(bh, 1);
|
|
}
|
|
|
|
/*
|
|
@@ -1553,7 +1555,7 @@
|
|
/* Stage 2: lock the buffers, mark them clean */
|
|
do {
|
|
lock_buffer(bh);
|
|
- bh->b_end_io = end_buffer_io_async;
|
|
+ set_buffer_async_io(bh);
|
|
get_bh(bh);
|
|
set_bit(BH_Uptodate, &bh->b_state);
|
|
clear_bit(BH_Dirty, &bh->b_state);
|
|
@@ -1755,7 +1757,7 @@
|
|
for (i = 0; i < nr; i++) {
|
|
struct buffer_head * bh = arr[i];
|
|
lock_buffer(bh);
|
|
- bh->b_end_io = end_buffer_io_async;
|
|
+ set_buffer_async_io(bh);
|
|
get_bh(bh);
|
|
}
|
|
|
|
@@ -2200,7 +2202,7 @@
|
|
lock_buffer(bh);
|
|
bh->b_blocknr = *(b++);
|
|
set_bit(BH_Mapped, &bh->b_state);
|
|
- bh->b_end_io = end_buffer_io_async;
|
|
+ set_buffer_async_io(bh);
|
|
get_bh(bh);
|
|
bh = bh->b_this_page;
|
|
} while (bh != head);
|
|
diff -urN 2.4.7pre6/include/linux/fs.h bh_async/include/linux/fs.h
|
|
--- 2.4.7pre6/include/linux/fs.h Wed Jul 11 06:03:19 2001
|
|
+++ bh_async/include/linux/fs.h Thu Jul 12 07:54:26 2001
|
|
@@ -215,6 +215,7 @@
|
|
BH_Mapped, /* 1 if the buffer has a disk mapping */
|
|
BH_New, /* 1 if the buffer is new and not yet written out */
|
|
BH_Protected, /* 1 if the buffer is protected */
|
|
+ BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */
|
|
|
|
BH_PrivateStart,/* not a state bit, but the first bit available
|
|
* for private allocation by other entities
|
|
@@ -275,6 +276,7 @@
|
|
#define buffer_mapped(bh) __buffer_state(bh,Mapped)
|
|
#define buffer_new(bh) __buffer_state(bh,New)
|
|
#define buffer_protected(bh) __buffer_state(bh,Protected)
|
|
+#define buffer_async(bh) __buffer_state(bh,Async)
|
|
|
|
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
|
|
|
|
@@ -1110,6 +1112,14 @@
|
|
extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
|
|
|
|
#define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state)
|
|
+
|
|
+static inline void mark_buffer_async(struct buffer_head * bh, int on)
|
|
+{
|
|
+ if (on)
|
|
+ set_bit(BH_Async, &bh->b_state);
|
|
+ else
|
|
+ clear_bit(BH_Async, &bh->b_state);
|
|
+}
|
|
|
|
/*
|
|
* If an error happens during the make_request, this function
|