FAQ Search Today's Posts Mark Forums Read
» Video Reviews

» Linux Archive

Linux-archive is a website aiming to archive linux email lists and to make them easily accessible for linux users/developers.


» Sponsor

» Partners

» Sponsor


 
 
LinkBack Thread Tools
 
Old 02-23-2009, 06:22 PM
Mikulas Patocka
 
Default barriers

Merge pushback and deferred lists into one list --- use deferred list
for both deferred and pushed-back bios.

This will be needed for proper support of barrier bios, it is impossible to
support ordering correctly with two lists because the requests on both lists
will be mixed up.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
drivers/md/dm.c | 38 ++++++++++++++++----------------------
1 file changed, 16 insertions(+), 22 deletions(-)

Index: linux-2.6.29-rc3-devel/drivers/md/dm.c
================================================== =================
--- linux-2.6.29-rc3-devel.orig/drivers/md/dm.c 2009-02-05 05:52:23.000000000 +0100
+++ linux-2.6.29-rc3-devel/drivers/md/dm.c 2009-02-05 05:53:01.000000000 +0100
@@ -102,7 +102,6 @@ union map_info *dm_get_mapinfo(struct bi
struct mapped_device {
struct rw_semaphore io_lock;
struct mutex suspend_lock;
- spinlock_t pushback_lock;
rwlock_t map_lock;
atomic_t holders;
atomic_t open_count;
@@ -122,7 +121,7 @@ struct mapped_device {
wait_queue_head_t wait;
struct work_struct work;
struct bio_list deferred;
- struct bio_list pushback;
+ spinlock_t deferred_lock;

/*
* Processing queue (flush/barriers)
@@ -445,7 +444,9 @@ static int queue_io(struct mapped_device
return 1;
}

+ spin_lock_irq(&md->deferred_lock);
bio_list_add(&md->deferred, bio);
+ spin_unlock_irq(&md->deferred_lock);

up_write(&md->io_lock);
return 0; /* deferred successfully */
@@ -526,16 +527,14 @@ static void dec_pending(struct dm_io *io
if (io->error == DM_ENDIO_REQUEUE) {
/*
* Target requested pushing back the I/O.
- * This must be handled before the sleeper on
- * suspend queue merges the pushback list.
*/
- spin_lock_irqsave(&io->md->pushback_lock, flags);
+ spin_lock_irqsave(&io->md->deferred_lock, flags);
if (__noflush_suspending(io->md))
- bio_list_add(&io->md->pushback, io->bio);
+ bio_list_add(&io->md->deferred, io->bio);
else
/* noflush suspend was interrupted. */
io->error = -EIO;
- spin_unlock_irqrestore(&io->md->pushback_lock, flags);
+ spin_unlock_irqrestore(&io->md->deferred_lock, flags);
}

end_io_acct(io);
@@ -1090,7 +1089,7 @@ static struct mapped_device *alloc_dev(i

init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
- spin_lock_init(&md->pushback_lock);
+ spin_lock_init(&md->deferred_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
@@ -1412,26 +1411,21 @@ static void dm_wq_work(struct work_struc

down_write(&md->io_lock);

- while ((c = bio_list_pop(&md->deferred)))
+next_bio:
+ spin_lock_irq(&md->deferred_lock);
+ c = bio_list_pop(&md->deferred);
+ spin_unlock_irq(&md->deferred_lock);
+
+ if (c) {
__process_bio(md, c);
+ goto next_bio;
+ }

clear_bit(DMF_BLOCK_IO, &md->flags);

up_write(&md->io_lock);
}

-static void __merge_pushback_list(struct mapped_device *md)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&md->pushback_lock, flags);
- clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
- bio_list_merge_head(&md->deferred, &md->pushback);
- bio_list_init(&md->pushback);
- spin_unlock_irqrestore(&md->pushback_lock, flags);
-}
-
-
static void dm_queue_flush(struct mapped_device *md)
{
queue_work(md->wq, &md->work);
@@ -1556,7 +1550,7 @@ int dm_suspend(struct mapped_device *md,
down_write(&md->io_lock);

if (noflush)
- __merge_pushback_list(md);
+ clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
up_write(&md->io_lock);

/* were we interrupted ? */

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 02-23-2009, 06:22 PM
Mikulas Patocka
 
Default barriers

Move test for not-supported barriers to __make_request.

This test is sensible only for drivers that use requests (such as disk
drivers), not for drivers that use bios (such as device mapper).

It is better to fix it in generic code than to make workaround for it
in device mapper.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
block/blk-core.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)

Index: linux-2.6.29-rc6-devel/block/blk-core.c
================================================== =================
--- linux-2.6.29-rc6-devel.orig/block/blk-core.c 2009-02-23 18:43:37.000000000 +0100
+++ linux-2.6.29-rc6-devel/block/blk-core.c 2009-02-23 18:44:27.000000000 +0100
@@ -1145,6 +1145,12 @@ static int __make_request(struct request
const int unplug = bio_unplug(bio);
int rw_flags;

+ if (bio_barrier(bio) && bio_has_data(bio) &&
+ (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
+
nr_sectors = bio_sectors(bio);

/*
@@ -1450,11 +1456,6 @@ static inline void __generic_make_reques
err = -EOPNOTSUPP;
goto end_io;
}
- if (bio_barrier(bio) && bio_has_data(bio) &&
- (q->next_ordered == QUEUE_ORDERED_NONE)) {
- err = -EOPNOTSUPP;
- goto end_io;
- }

ret = q->make_request_fn(q, bio);
} while (ret);

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 02-23-2009, 06:22 PM
Mikulas Patocka
 
Default barriers

>From mbroz@redhat.com Thu Jan 8 11:14:02 2009
Date: Thu, 08 Jan 2009 17:13:12 +0100
From: Milan Broz <mbroz@redhat.com>
Reply-To: device-mapper development <dm-devel@redhat.com>
To: device-mapper development <dm-devel@redhat.com>
Subject: [dm-devel] [PATCH 1/4] dm-core: remove current partial barrier implementation

Prepare for full barrier implementation - remove the simple one

Signed-off-by: Milan Broz <mbroz@redhat.com>

---
drivers/md/dm-linear.c | 1 -
drivers/md/dm-table.c | 19 -------------------
drivers/md/dm.c | 15 ++++++++++-----
drivers/md/dm.h | 1 -
include/linux/device-mapper.h | 1 -
5 files changed, 10 insertions(+), 27 deletions(-)

Index: linux-2.6.29-rc3-devel/drivers/md/dm-linear.c
================================================== =================
--- linux-2.6.29-rc3-devel.orig/drivers/md/dm-linear.c 2009-02-05 05:33:47.000000000 +0100
+++ linux-2.6.29-rc3-devel/drivers/md/dm-linear.c 2009-02-05 05:33:52.000000000 +0100
@@ -142,7 +142,6 @@ static struct target_type linear_target
.status = linear_status,
.ioctl = linear_ioctl,
.merge = linear_merge,
- .features = DM_TARGET_SUPPORTS_BARRIERS,
};

int __init dm_linear_init(void)
Index: linux-2.6.29-rc3-devel/drivers/md/dm-table.c
================================================== =================
--- linux-2.6.29-rc3-devel.orig/drivers/md/dm-table.c 2009-02-05 05:33:48.000000000 +0100
+++ linux-2.6.29-rc3-devel/drivers/md/dm-table.c 2009-02-05 05:33:52.000000000 +0100
@@ -52,8 +52,6 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;

- unsigned barriers_supported:1;
-
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
@@ -240,7 +238,6 @@ int dm_table_create(struct dm_table **re

INIT_LIST_HEAD(&t->devices);
atomic_set(&t->holders, 0);
- t->barriers_supported = 1;

if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -741,10 +738,6 @@ int dm_table_add_target(struct dm_table
/* FIXME: the plan is to combine high here and then have
* the merge fn apply the target level restrictions. */
combine_restrictions_low(&t->limits, &tgt->limits);
-
- if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS))
- t->barriers_supported = 0;
-
return 0;

bad:
@@ -789,12 +782,6 @@ int dm_table_complete(struct dm_table *t

check_for_valid_limits(&t->limits);

- /*
- * We only support barriers if there is exactly one underlying device.
- */
- if (!list_is_singular(&t->devices))
- t->barriers_supported = 0;
-
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
@@ -1008,12 +995,6 @@ struct mapped_device *dm_table_get_md(st
return t->md;
}

-int dm_table_barrier_ok(struct dm_table *t)
-{
- return t->barriers_supported;
-}
-EXPORT_SYMBOL(dm_table_barrier_ok);
-
EXPORT_SYMBOL(dm_vcalloc);
EXPORT_SYMBOL(dm_get_device);
EXPORT_SYMBOL(dm_put_device);
Index: linux-2.6.29-rc3-devel/drivers/md/dm.c
================================================== =================
--- linux-2.6.29-rc3-devel.orig/drivers/md/dm.c 2009-02-05 05:33:51.000000000 +0100
+++ linux-2.6.29-rc3-devel/drivers/md/dm.c 2009-02-05 05:33:52.000000000 +0100
@@ -831,11 +831,7 @@ static void __process_bio(struct mapped_
bio_io_error(bio);
return;
}
- if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) {
- dm_table_put(ci.map);
- bio_endio(bio, -EOPNOTSUPP);
- return;
- }
+
ci.md = md;
ci.bio = bio;
ci.io = alloc_io(md);
@@ -917,6 +913,15 @@ static int dm_request(struct request_que
struct mapped_device *md = q->queuedata;
int cpu;

+ /*
+ * There is no use in forwarding any barrier request since we can't
+ * guarantee it is (or can be) handled by the targets correctly.
+ */
+ if (unlikely(bio_barrier(bio))) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
+
down_read(&md->io_lock);

cpu = part_stat_lock();
Index: linux-2.6.29-rc3-devel/drivers/md/dm.h
================================================== =================
--- linux-2.6.29-rc3-devel.orig/drivers/md/dm.h 2009-02-05 05:33:48.000000000 +0100
+++ linux-2.6.29-rc3-devel/drivers/md/dm.h 2009-02-05 05:33:52.000000000 +0100
@@ -52,7 +52,6 @@ int dm_table_any_congested(struct dm_tab
* To check the return value from dm_table_find_target().
*/
#define dm_target_is_valid(t) ((t)->table)
-int dm_table_barrier_ok(struct dm_table *t);

/*-----------------------------------------------------------------
* A registry of target types.
Index: linux-2.6.29-rc3-devel/include/linux/device-mapper.h
================================================== =================
--- linux-2.6.29-rc3-devel.orig/include/linux/device-mapper.h 2009-02-05 05:33:48.000000000 +0100
+++ linux-2.6.29-rc3-devel/include/linux/device-mapper.h 2009-02-05 05:33:52.000000000 +0100
@@ -116,7 +116,6 @@ void dm_put_device(struct dm_target *ti,
/*
* Target features
*/
-#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001

struct target_type {
uint64_t features;

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 02-23-2009, 06:22 PM
Mikulas Patocka
 
Default barriers

Rework helper thread.

IO may be submitted to a worker thread with queue_io().
queue_io() sets DMF_BLOCK_IO so that all further IO goes to the thread.
When the thread finishes its work, it clears DMF_BLOCK_IO and from this point
on, requests are submitted from dm_request again.

Add new flag DMF_BLOCK_FOR_SUSPEND that is set when the IO needs to be blocked
because of an ongoing suspend (DMF_BLOCK_IO had this meaning before this patch).

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
drivers/md/dm.c | 77 +++++++++++++++++++++++++-------------------------------
1 file changed, 35 insertions(+), 42 deletions(-)

Index: linux-2.6.29-rc6-devel/drivers/md/dm.c
================================================== =================
--- linux-2.6.29-rc6-devel.orig/drivers/md/dm.c 2009-02-23 16:27:41.000000000 +0100
+++ linux-2.6.29-rc6-devel/drivers/md/dm.c 2009-02-23 17:55:37.000000000 +0100
@@ -90,11 +90,12 @@ union map_info *dm_get_mapinfo(struct bi
* Bits for the md->flags field.
*/
#define DMF_BLOCK_IO 0
-#define DMF_SUSPENDED 1
-#define DMF_FROZEN 2
-#define DMF_FREEING 3
-#define DMF_DELETING 4
-#define DMF_NOFLUSH_SUSPENDING 5
+#define DMF_BLOCK_FOR_SUSPEND 1
+#define DMF_SUSPENDED 2
+#define DMF_FROZEN 3
+#define DMF_FREEING 4
+#define DMF_DELETING 5
+#define DMF_NOFLUSH_SUSPENDING 6

/*
* Work processed by per-device workqueue.
@@ -435,21 +436,15 @@ static void end_io_acct(struct dm_io *io
/*
* Add the bio to the list of deferred io.
*/
-static int queue_io(struct mapped_device *md, struct bio *bio)
+static void queue_io(struct mapped_device *md, struct bio *bio)
{
down_write(&md->io_lock);
-
- if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
- up_write(&md->io_lock);
- return 1;
- }
-
spin_lock_irq(&md->deferred_lock);
bio_list_add(&md->deferred, bio);
spin_unlock_irq(&md->deferred_lock);
-
+ if (!test_and_set_bit(DMF_BLOCK_IO, &md->flags))
+ queue_work(md->wq, &md->work);
up_write(&md->io_lock);
- return 0; /* deferred successfully */
}

/*
@@ -908,7 +903,6 @@ out:
*/
static int dm_request(struct request_queue *q, struct bio *bio)
{
- int r = -EIO;
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
int cpu;
@@ -933,31 +927,23 @@ static int dm_request(struct request_que
* If we're suspended we have to queue
* this io for later.
*/
- while (test_bit(DMF_BLOCK_IO, &md->flags)) {
+ if (unlikely(test_bit(DMF_BLOCK_IO, &md->flags))) {
up_read(&md->io_lock);

- if (bio_rw(bio) != READA)
- r = queue_io(md, bio);
+ if (unlikely(test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) &&
+ bio_rw(bio) == READA) {
+ bio_io_error(bio);
+ return 0;
+ }

- if (r <= 0)
- goto out_req;
+ queue_io(md, bio);

- /*
- * We're in a while loop, because someone could suspend
- * before we get to the following read lock.
- */
- down_read(&md->io_lock);
+ return 0;
}

__process_bio(md, bio);
up_read(&md->io_lock);
return 0;
-
-out_req:
- if (r < 0)
- bio_io_error(bio);
-
- return 0;
}

static void dm_unplug_all(struct request_queue *q)
@@ -977,7 +963,7 @@ static int dm_any_congested(void *conges
struct mapped_device *md = congested_data;
struct dm_table *map;

- if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
+ if (!test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) {
map = dm_get_table(md);
if (map) {
r = dm_table_any_congested(map, bdi_bits);
@@ -1412,29 +1398,34 @@ static int dm_wait_for_completion(struct
static void dm_wq_work(struct work_struct *work)
{
struct mapped_device *md = container_of(work, struct mapped_device, work);
- struct bio *c;
-
down_write(&md->io_lock);

-next_bio:
- spin_lock_irq(&md->deferred_lock);
- c = bio_list_pop(&md->deferred);
- spin_unlock_irq(&md->deferred_lock);
+ while (!test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) {
+ struct bio *c;
+
+ spin_lock_irq(&md->deferred_lock);
+ c = bio_list_pop(&md->deferred);
+ spin_unlock_irq(&md->deferred_lock);
+
+ if (!c) {
+ clear_bit(DMF_BLOCK_IO, &md->flags);
+ break;
+ }
+ up_write(&md->io_lock);

- if (c) {
__process_bio(md, c);
- goto next_bio;
+
+ down_write(&md->io_lock);
}

- clear_bit(DMF_BLOCK_IO, &md->flags);

up_write(&md->io_lock);
}

static void dm_queue_flush(struct mapped_device *md)
{
+ clear_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags);
queue_work(md->wq, &md->work);
- flush_workqueue(md->wq);
}

/*
@@ -1543,6 +1534,7 @@ int dm_suspend(struct mapped_device *md,
* First we set the BLOCK_IO flag so no more ios will be mapped.
*/
down_write(&md->io_lock);
+ set_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags);
set_bit(DMF_BLOCK_IO, &md->flags);

up_write(&md->io_lock);
@@ -1550,6 +1542,7 @@ int dm_suspend(struct mapped_device *md,
/*
* Wait for the already-mapped ios to complete.
*/
+ flush_workqueue(md->wq);
r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);

down_write(&md->io_lock);

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 02-23-2009, 06:23 PM
Mikulas Patocka
 
Default barriers

Barrier support.

Barriers are submitted to a worker thread that issues them in-order.

__process_bio functions is modified that when it sees a barrier request,
it waits for all pending IO before the request, then submits the barrier
and waits for it.

DM_ENDIO_REQUEUE doesn't work and I doubt that it can be made work with
barriers.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
drivers/md/dm.c | 75 +++++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 61 insertions(+), 14 deletions(-)

Index: linux-2.6.29-rc6-devel/drivers/md/dm.c
================================================== =================
--- linux-2.6.29-rc6-devel.orig/drivers/md/dm.c 2009-02-23 17:55:37.000000000 +0100
+++ linux-2.6.29-rc6-devel/drivers/md/dm.c 2009-02-23 17:55:45.000000000 +0100
@@ -125,6 +125,11 @@ struct mapped_device {
spinlock_t deferred_lock;

/*
+ * An error from the barrier request currently being processed.
+ */
+ int barrier_error;
+
+ /*
* Processing queue (flush/barriers)
*/
struct workqueue_struct *wq;
@@ -425,6 +430,10 @@ static void end_io_acct(struct dm_io *io
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
part_stat_unlock();

+ /*
+ * after this is decremented, the bio must not be touched if it is
+ * barrier bio
+ */
dm_disk(md)->part0.in_flight = pending =
atomic_dec_return(&md->pending);

@@ -525,19 +534,29 @@ static void dec_pending(struct dm_io *io
*/
spin_lock_irqsave(&io->md->deferred_lock, flags);
if (__noflush_suspending(io->md))
- bio_list_add(&io->md->deferred, io->bio);
+ bio_list_add_head(&io->md->deferred, io->bio);
else
/* noflush suspend was interrupted. */
io->error = -EIO;
spin_unlock_irqrestore(&io->md->deferred_lock, flags);
}

- end_io_acct(io);
+ if (bio_barrier(io->bio)) {
+ /*
+ * There could be just one barrier request, so we use
+ * per-device variable for error reporting is OK.
+ * Note that you can't touch the bio after end_io_acct
+ */
+ io->md->barrier_error = io->error;
+ end_io_acct(io);
+ } else {
+ end_io_acct(io);

- if (io->error != DM_ENDIO_REQUEUE) {
- trace_block_bio_complete(io->md->queue, io->bio);
+ if (io->error != DM_ENDIO_REQUEUE) {
+ trace_block_bio_complete(io->md->queue, io->bio);

- bio_endio(io->bio, io->error);
+ bio_endio(io->bio, io->error);
+ }
}

free_io(io->md, io);
@@ -682,7 +701,7 @@ static struct bio *split_bvec(struct bio

clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
+ clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
@@ -703,6 +722,7 @@ static struct bio *clone_bio(struct bio

clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
__bio_clone(clone, bio);
+ clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
clone->bi_idx = idx;
@@ -823,7 +843,10 @@ static void __process_bio(struct mapped_

ci.map = dm_get_table(md);
if (unlikely(!ci.map)) {
- bio_io_error(bio);
+ if (!bio_barrier(bio))
+ bio_io_error(bio);
+ else
+ md->barrier_error = -EIO;
return;
}

@@ -911,11 +934,6 @@ static int dm_request(struct request_que
* There is no use in forwarding any barrier request since we can't
* guarantee it is (or can be) handled by the targets correctly.
*/
- if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
down_read(&md->io_lock);

cpu = part_stat_lock();
@@ -927,7 +945,8 @@ static int dm_request(struct request_que
* If we're suspended we have to queue
* this io for later.
*/
- if (unlikely(test_bit(DMF_BLOCK_IO, &md->flags))) {
+ if (unlikely(test_bit(DMF_BLOCK_IO, &md->flags)) ||
+ unlikely(bio_barrier(bio))) {
up_read(&md->io_lock);

if (unlikely(test_bit(DMF_BLOCK_FOR_SUSPEND, &md->flags)) &&
@@ -1392,6 +1411,12 @@ static int dm_wait_for_completion(struct
return r;
}

+static int dm_flush(struct mapped_device *md)
+{
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ return 0;
+}
+
/*
* Process the deferred bios
*/
@@ -1413,8 +1438,30 @@ static void dm_wq_work(struct work_struc
}
up_write(&md->io_lock);

- __process_bio(md, c);
+ if (!bio_barrier(c))
+ __process_bio(md, c);
+ else {
+ int error = dm_flush(md);
+ if (unlikely(error)) {
+ bio_endio(c, error);
+ goto next_bio;
+ }
+ if (bio_empty_barrier(c)) {
+ bio_endio(c, 0);
+ goto next_bio;
+ }
+
+ __process_bio(md, c);
+
+ error = dm_flush(md);
+ if (!error && md->barrier_error)
+ error = md->barrier_error;
+
+ if (md->barrier_error != DM_ENDIO_REQUEUE)
+ bio_endio(c, error);
+ }

+next_bio:
down_write(&md->io_lock);
}


--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 02-24-2009, 09:11 AM
Nikanth K
 
Default barriers

On Tue, Feb 24, 2009 at 12:53 AM, Mikulas Patocka <mpatocka@redhat.com> wrote:

<snip>

>
> +static int dm_flush(struct mapped_device *md)
> +{
> + * * * dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
> + * * * return 0;
> +}
> +

Always returns zero! Why not void?

<snip>

Thanks
Nikanth

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 03-24-2009, 01:24 PM
Alasdair G Kergon
 
Default barriers

On Mon, Feb 23, 2009 at 02:23:00PM -0500, Mikulas Patocka wrote:
> DM_ENDIO_REQUEUE doesn't work and I doubt that it can be made work with
> barriers.

Didn't we discuss this before?
We *require* DM_ENDIO_REQUEUE functionality - I can't apply a patch that
breaks it. But it is not something that can be used arbitrarily - it is
only used in situations where core dm is firmly in control (and I/O cannot get
through to the device e.g. all paths down in multipath) and I don't see why it
can't continue to work with barriers.

I hope it's just the comment that is wrong here - or does the patch need
updating?

Alasdair
--
agk@redhat.com

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 03-24-2009, 01:30 PM
Mikulas Patocka
 
Default barriers

On Tue, 24 Mar 2009, Alasdair G Kergon wrote:

> On Mon, Feb 23, 2009 at 02:23:00PM -0500, Mikulas Patocka wrote:
> > DM_ENDIO_REQUEUE doesn't work and I doubt that it can be made work with
> > barriers.
>
> Didn't we discuss this before?
> We *require* DM_ENDIO_REQUEUE functionality - I can't apply a patch that
> breaks it. But it is not something that can be used arbitrarily - it is
> only used in situations where core dm is firmly in control (and I/O cannot get
> through to the device e.g. all paths down in multipath) and I don't see why it
> can't continue to work with barriers.
>
> I hope it's just the comment that is wrong here - or does the patch need
> updating?

Oh, sorry, that is old comment for the old version of that patch. I fixed
the code but forgot to remove the comment. The code in the patch should be
correct w.r.t. DM_ENDIO_REQUEUE.

Mikulas

> Alasdair
> --
> agk@redhat.com
>

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 

Thread Tools




All times are GMT. The time now is 06:18 AM.

VBulletin, Copyright ©2000 - 2014, Jelsoft Enterprises Ltd.
Content Relevant URLs by vBSEO ©2007, Crawlability, Inc.
Copyright 2007 - 2008, www.linux-archive.org