FAQ Search Today's Posts Mark Forums Read
» Video Reviews

» Linux Archive

Linux-archive is a website aiming to archive linux email lists and to make them easily accessible for linux users/developers.


» Sponsor

» Partners

» Sponsor

Go Back   Linux Archive > Redhat > Device-mapper Development

 
 
LinkBack Thread Tools
 
Old 04-22-2010, 06:03 PM
Milan Broz
 
Default md: dm-crypt: Add option to re-use a new global work-queue.

On 04/22/2010 07:48 PM, San Mehat wrote:
> Typically, dm-crypt instances each have their own set of kcrypt/kcrypt_io
> work-queues. This patch adds an option which will create one set of
> work-queues on init, and re-uses them for all dm-crypt target instances.

Hi,

I'll take a look, but you are basically re-introducing previous
logic and it was removed because of deadlock possibility.

(Imagine stacked dm-crypt - Truecrypt uses that - and low memory situation.
The mempool is exhausted and only possibility to free memory is finishing some
request in another queue - which is the same (blocked) queue with your patch!

We must allocate bio clones there and using per-device mempools and queues
to avoid these problems.

And how this will work with asynchronous crypto processing when weh
must wait if crypto queue is full? (In the same device stacked situation.)

Can you explain the real reason for this patch?

(cc: Alasdair - I think he will not accept the patch anyway.)

Milan


>
> Cc: Milan Broz <mbroz@redhat.com>
> Cc: Brian Swetland <swetland@google.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Christophe Saout <christophe@saout.de>
> Signed-off-by: San Mehat <san@google.com>
> ---
> drivers/md/Kconfig | 10 ++++++++++
> drivers/md/dm-crypt.c | 42 +++++++++++++++++++++++++++++++++++++++---
> 2 files changed, 49 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
> index 2158377..8d82dfc 100644
> --- a/drivers/md/Kconfig
> +++ b/drivers/md/Kconfig
> @@ -244,6 +244,16 @@ config DM_CRYPT
>
> If unsure, say N.
>
> +config DM_CRYPT_GLOBAL_WORKQUEUES
> + boolean "Use global instead of per-device work-queues"
> + depends on DM_CRYPT
> + ---help---
> + Normally 2 kernel work-queue threads are created for every
> + dm-crypt target. This option creates only 1 set of work-queues
> + on init, and re-uses them.
> +
> + If unsure, say N.
> +
> config DM_SNAPSHOT
> tristate "Snapshot target"
> depends on BLK_DEV_DM
> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
> index 959d6d1..875ad9a 100644
> --- a/drivers/md/dm-crypt.c
> +++ b/drivers/md/dm-crypt.c
> @@ -104,8 +104,10 @@ struct crypt_config {
> mempool_t *page_pool;
> struct bio_set *bs;
>
> +#ifndef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> struct workqueue_struct *io_queue;
> struct workqueue_struct *crypt_queue;
> +#endif
>
> /*
> * crypto related data
> @@ -148,6 +150,10 @@ struct crypt_config {
> #define MIN_BIO_PAGES 8
>
> static struct kmem_cache *_crypt_io_pool;
> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> +static struct workqueue_struct *_io_queue;
> +static struct workqueue_struct *_crypt_queue;
> +#endif
>
> static void clone_init(struct dm_crypt_io *, struct bio *);
> static void kcryptd_queue_crypt(struct dm_crypt_io *io);
> @@ -730,7 +736,11 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
> struct crypt_config *cc = io->target->private;
>
> INIT_WORK(&io->work, kcryptd_io);
> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> + queue_work(_io_queue, &io->work);
> +#else
> queue_work(cc->io_queue, &io->work);
> +#endif
> }
>
> static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
> @@ -914,7 +924,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
> struct crypt_config *cc = io->target->private;
>
> INIT_WORK(&io->work, kcryptd_crypt);
> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> + queue_work(_crypt_queue, &io->work);
> +#else
> queue_work(cc->crypt_queue, &io->work);
> +#endif
> }
>
> /*
> @@ -1165,6 +1179,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
> } else
> cc->iv_mode = NULL;
>
> +#ifndef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> cc->io_queue = create_singlethread_workqueue("kcryptd_io");
> if (!cc->io_queue) {
> ti->error = "Couldn't create kcryptd io queue";
> @@ -1174,15 +1189,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
> cc->crypt_queue = create_singlethread_workqueue("kcryptd");
> if (!cc->crypt_queue) {
> ti->error = "Couldn't create kcryptd queue";
> - goto bad_crypt_queue;
> + destroy_workqueue(cc->io_queue);
> + goto bad_io_queue;
> }
> +#endif
>
> ti->num_flush_requests = 1;
> ti->private = cc;
> return 0;
>
> -bad_crypt_queue:
> - destroy_workqueue(cc->io_queue);
> bad_io_queue:
> kfree(cc->iv_mode);
> bad_ivmode_string:
> @@ -1210,8 +1225,10 @@ static void crypt_dtr(struct dm_target *ti)
> {
> struct crypt_config *cc = (struct crypt_config *) ti->private;
>
> +#ifndef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> destroy_workqueue(cc->io_queue);
> destroy_workqueue(cc->crypt_queue);
> +#endif
>
> if (cc->req)
> mempool_free(cc->req, cc->req_pool);
> @@ -1399,6 +1416,21 @@ static int __init dm_crypt_init(void)
> {
> int r;
>
> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> + _io_queue = create_singlethread_workqueue("kcryptd_io");
> + if (!_io_queue) {
> + DMERR("couldn't create kcryptd io queue");
> + return -ENOMEM;
> + }
> +
> + _crypt_queue = create_singlethread_workqueue("kcryptd");
> + if (!_crypt_queue) {
> + DMERR("couldn't create kcryptd queue");
> + destroy_workqueue(_io_queue);
> + return -ENOMEM;
> + }
> +#endif
> +
> _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
> if (!_crypt_io_pool)
> return -ENOMEM;
> @@ -1416,6 +1448,10 @@ static void __exit dm_crypt_exit(void)
> {
> dm_unregister_target(&crypt_target);
> kmem_cache_destroy(_crypt_io_pool);
> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
> + destroy_workqueue(_io_queue);
> + destroy_workqueue(_crypt_queue);
> +#endif
> }
>
> module_init(dm_crypt_init);

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 04-22-2010, 06:08 PM
San Mehat
 
Default md: dm-crypt: Add option to re-use a new global work-queue.

On Thu, Apr 22, 2010 at 11:03 AM, Milan Broz <mbroz@redhat.com> wrote:
> On 04/22/2010 07:48 PM, San Mehat wrote:
>> Typically, dm-crypt instances each have their own set of kcrypt/kcrypt_io
>> work-queues. This patch adds an option which will create one set of
>> work-queues on init, and re-uses them for all dm-crypt target instances.
>
> Hi,
>
> I'll take a look, but you are basically re-introducing previous
> logic and it was removed because of deadlock possibility.
>
> (Imagine stacked dm-crypt - Truecrypt uses that - and low memory situation.
> The mempool is exhausted and only possibility to free memory is finishing some
> request in another queue - which is the same (blocked) queue with your patch!
>
> We must allocate bio clones there and using per-device mempools and queues
> to avoid these problems.
>
> And how this will work with asynchronous crypto processing when weh
> must wait if crypto queue is full? (In the same device stacked situation.)
>
> Can you explain the real reason for this patch?
>

Sure, I'd be happy to explain.

Upcoming versions of android are about to start using dm/dm-crypt
heavily, having
a large number of small dm-crypt instances running on the device (hard
to tell just
how many, but i've seen cases where up to 50 or 60 instances may be
running). This ends up creating 100 - 120 kernel threads, and I was
simply trying to cut that down.

I'd be more than happy to discuss alternatives; but do we *really*
need 2 work-queue threads per instance?

> (cc: Alasdair - I think he will not accept the patch anyway.)

Probably not, but at least we can get the discussion going

>
> Milan
>
>
>>
>> Cc: Milan Broz <mbroz@redhat.com>
>> Cc: Brian Swetland <swetland@google.com>
>> Cc: Andrew Morton <akpm@linux-foundation.org>
>> Cc: Christophe Saout <christophe@saout.de>
>> Signed-off-by: San Mehat <san@google.com>
>> ---
>> *drivers/md/Kconfig * *| * 10 ++++++++++
>> *drivers/md/dm-crypt.c | * 42 +++++++++++++++++++++++++++++++++++++++---
>> *2 files changed, 49 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
>> index 2158377..8d82dfc 100644
>> --- a/drivers/md/Kconfig
>> +++ b/drivers/md/Kconfig
>> @@ -244,6 +244,16 @@ config DM_CRYPT
>>
>> * * * * If unsure, say N.
>>
>> +config DM_CRYPT_GLOBAL_WORKQUEUES
>> + * * boolean "Use global instead of per-device work-queues"
>> + * * depends on DM_CRYPT
>> + * * ---help---
>> + * * * Normally 2 kernel work-queue threads are created for every
>> + * * * dm-crypt target. This option creates only 1 set of work-queues
>> + * * * on init, and re-uses them.
>> +
>> + * * * If unsure, say N.
>> +
>> *config DM_SNAPSHOT
>> * * * * tristate "Snapshot target"
>> * * * * depends on BLK_DEV_DM
>> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
>> index 959d6d1..875ad9a 100644
>> --- a/drivers/md/dm-crypt.c
>> +++ b/drivers/md/dm-crypt.c
>> @@ -104,8 +104,10 @@ struct crypt_config {
>> * * * mempool_t *page_pool;
>> * * * struct bio_set *bs;
>>
>> +#ifndef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> * * * struct workqueue_struct *io_queue;
>> * * * struct workqueue_struct *crypt_queue;
>> +#endif
>>
>> * * * /*
>> * * * ** crypto related data
>> @@ -148,6 +150,10 @@ struct crypt_config {
>> *#define MIN_BIO_PAGES *8
>>
>> *static struct kmem_cache *_crypt_io_pool;
>> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> +static struct workqueue_struct *_io_queue;
>> +static struct workqueue_struct *_crypt_queue;
>> +#endif
>>
>> *static void clone_init(struct dm_crypt_io *, struct bio *);
>> *static void kcryptd_queue_crypt(struct dm_crypt_io *io);
>> @@ -730,7 +736,11 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
>> * * * struct crypt_config *cc = io->target->private;
>>
>> * * * INIT_WORK(&io->work, kcryptd_io);
>> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> + * * queue_work(_io_queue, &io->work);
>> +#else
>> * * * queue_work(cc->io_queue, &io->work);
>> +#endif
>> *}
>>
>> *static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
>> @@ -914,7 +924,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
>> * * * struct crypt_config *cc = io->target->private;
>>
>> * * * INIT_WORK(&io->work, kcryptd_crypt);
>> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> + * * queue_work(_crypt_queue, &io->work);
>> +#else
>> * * * queue_work(cc->crypt_queue, &io->work);
>> +#endif
>> *}
>>
>> */*
>> @@ -1165,6 +1179,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
>> * * * } else
>> * * * * * * * cc->iv_mode = NULL;
>>
>> +#ifndef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> * * * cc->io_queue = create_singlethread_workqueue("kcryptd_io");
>> * * * if (!cc->io_queue) {
>> * * * * * * * ti->error = "Couldn't create kcryptd io queue";
>> @@ -1174,15 +1189,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
>> * * * cc->crypt_queue = create_singlethread_workqueue("kcryptd");
>> * * * if (!cc->crypt_queue) {
>> * * * * * * * ti->error = "Couldn't create kcryptd queue";
>> - * * * * * * goto bad_crypt_queue;
>> + * * * * * * destroy_workqueue(cc->io_queue);
>> + * * * * * * goto bad_io_queue;
>> * * * }
>> +#endif
>>
>> * * * ti->num_flush_requests = 1;
>> * * * ti->private = cc;
>> * * * return 0;
>>
>> -bad_crypt_queue:
>> - * * destroy_workqueue(cc->io_queue);
>> *bad_io_queue:
>> * * * kfree(cc->iv_mode);
>> *bad_ivmode_string:
>> @@ -1210,8 +1225,10 @@ static void crypt_dtr(struct dm_target *ti)
>> *{
>> * * * struct crypt_config *cc = (struct crypt_config *) ti->private;
>>
>> +#ifndef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> * * * destroy_workqueue(cc->io_queue);
>> * * * destroy_workqueue(cc->crypt_queue);
>> +#endif
>>
>> * * * if (cc->req)
>> * * * * * * * mempool_free(cc->req, cc->req_pool);
>> @@ -1399,6 +1416,21 @@ static int __init dm_crypt_init(void)
>> *{
>> * * * int r;
>>
>> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> + * * _io_queue = create_singlethread_workqueue("kcryptd_io");
>> + * * if (!_io_queue) {
>> + * * * * * * DMERR("couldn't create kcryptd io queue");
>> + * * * * * * return -ENOMEM;
>> + * * }
>> +
>> + * * _crypt_queue = create_singlethread_workqueue("kcryptd");
>> + * * if (!_crypt_queue) {
>> + * * * * * * DMERR("couldn't create kcryptd queue");
>> + * * * * * * destroy_workqueue(_io_queue);
>> + * * * * * * return -ENOMEM;
>> + * * }
>> +#endif
>> +
>> * * * _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
>> * * * if (!_crypt_io_pool)
>> * * * * * * * return -ENOMEM;
>> @@ -1416,6 +1448,10 @@ static void __exit dm_crypt_exit(void)
>> *{
>> * * * dm_unregister_target(&crypt_target);
>> * * * kmem_cache_destroy(_crypt_io_pool);
>> +#ifdef CONFIG_DM_CRYPT_GLOBAL_WORKQUEUES
>> + * * destroy_workqueue(_io_queue);
>> + * * destroy_workqueue(_crypt_queue);
>> +#endif
>> *}
>>
>> *module_init(dm_crypt_init);
>



--
San Mehat *| *Staff Software Engineer *| *Android *| *Google Inc.
415.366.6172 (san@google.com)

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 04-22-2010, 06:47 PM
Milan Broz
 
Default md: dm-crypt: Add option to re-use a new global work-queue.

On 04/22/2010 08:08 PM, San Mehat wrote:
> On Thu, Apr 22, 2010 at 11:03 AM, Milan Broz <mbroz@redhat.com> wrote:
>> On 04/22/2010 07:48 PM, San Mehat wrote:
>>> Typically, dm-crypt instances each have their own set of kcrypt/kcrypt_io
>>> work-queues. This patch adds an option which will create one set of
>>> work-queues on init, and re-uses them for all dm-crypt target instances.

>> Can you explain the real reason for this patch?
>>
>
> Sure, I'd be happy to explain.

(Please add this always to patch header.)

>
> Upcoming versions of android are about to start using dm/dm-crypt
> heavily, having
> a large number of small dm-crypt instances running on the device (hard
> to tell just
> how many, but i've seen cases where up to 50 or 60 instances may be
> running). This ends up creating 100 - 120 kernel threads, and I was
> simply trying to cut that down.

Sorry, but I don't take this argument. "Too many notes!" :-)

So the problem is with memory allocation? Scheduler? Or where?
Kernel threads should be cheap.

If you need 60 crypt devices, you almost surely hit at least starvation
problem with one global queue!
(Just curious - what are these crypt devices doing?)

> I'd be more than happy to discuss alternatives; but do we *really*
> need 2 work-queue threads per instance?

yes.

For separate io queue - see commit cabf08e4d3d1181d7c408edae97fb4d1c31518af

| Add post-processing queue (per crypt device) for read operations.

| Current implementation uses only one queue for all operations
| and this can lead to starvation caused by many requests waiting
| for memory allocation. But the needed memory-releasing operation
| is queued after these requests (in the same queue).


(and there were another problem with async crypt - callback is called
in interrupt context, bio must be submitted from separate workqueue IIRC)

>> (cc: Alasdair - I think he will not accept the patch anyway.)
>
> Probably not, but at least we can get the discussion going

I am not saying that I do not want to discuss this - but we must know
the real problems many queues are causing first.
And then think about possible solutions.

Milan

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 04-22-2010, 07:42 PM
San Mehat
 
Default md: dm-crypt: Add option to re-use a new global work-queue.

On Thu, Apr 22, 2010 at 11:47 AM, Milan Broz <mbroz@redhat.com> wrote:
> On 04/22/2010 08:08 PM, San Mehat wrote:
>> On Thu, Apr 22, 2010 at 11:03 AM, Milan Broz <mbroz@redhat.com> wrote:
>>> On 04/22/2010 07:48 PM, San Mehat wrote:
>>>> Typically, dm-crypt instances each have their own set of kcrypt/kcrypt_io
>>>> work-queues. This patch adds an option which will create one set of
>>>> work-queues on init, and re-uses them for all dm-crypt target instances.
>
>>> Can you explain the real reason for this patch?
>>>
>>
>> Sure, I'd be happy to explain.
>
> (Please add this always to patch header.)
>

Will do - thanks.

>>
>> * Upcoming versions of android are about to start using dm/dm-crypt
>> heavily, having
>> a large number of small dm-crypt instances running on the device (hard
>> to tell just
>> how many, but i've seen cases where up to 50 or 60 instances may be
>> running). This ends up creating 100 - 120 kernel threads, and I was
>> simply trying to cut that down.
>
> Sorry, but I don't take this argument. "Too many notes!" :-)
>
> So the problem is with memory allocation? Scheduler? Or where?
> Kernel threads should be cheap.
>

Well the initial consideration was towards memory overhead with so
many threads that don't do much (in our use-case) on an embedded
device.

> If you need 60 crypt devices, you almost surely hit at least starvation
> problem with one global queue!
> (Just curious - what are these crypt devices doing?)

The crypt devices are providing small read-only encrypted file-systems
- whose backing files exist on an external FAT file-system, and are
created on-demand as needed. In this usage scenario, we'll only see
typically a few of these devices being simultaneously accessed, (and
the sd-card throughput is definitely the long-pole in the performance
profile, so even when I beat on 80 or 90 concurrent instances, we're
mainly waiting for mmcqd to complete transactions).

>
>> I'd be more than happy to discuss alternatives; but do we *really*
>> need 2 work-queue threads per instance?
>
> yes.

What if we made a note in the Kconfig advising against using the option in
stacked configurations? (Or even make it depend on CONFIG_EMBEDDED)

Thanks for your time,

-san

>
> For separate io queue - see commit cabf08e4d3d1181d7c408edae97fb4d1c31518af
>
> | Add post-processing queue (per crypt device) for read operations.
>
> | Current implementation uses only one queue for all operations
> | and this can lead to starvation caused by many requests waiting
> | for memory allocation. But the needed memory-releasing operation
> | is queued after these requests (in the same queue).
>
>
> (and there were another problem with async crypt - callback is called
> in interrupt context, bio must be submitted from separate workqueue IIRC)
>
>>> (cc: Alasdair - I think he will not accept the patch anyway.)
>>
>> Probably not, but at least we can get the discussion going
>
> I am not saying that I do not want to discuss this - but we must know
> the real problems many queues are causing first.
> And then think about possible solutions.
>
> Milan
>



--
San Mehat *| *Staff Software Engineer *| *Android *| *Google Inc.
415.366.6172 (san@google.com)

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 
Old 04-27-2010, 08:58 PM
San Mehat
 
Default md: dm-crypt: Add option to re-use a new global work-queue.

*ping* Any word on my previous counter-proposal? Shall I prepare
another patch for consideration?

-san


On Thu, Apr 22, 2010 at 12:42 PM, San Mehat <san@google.com> wrote:
> On Thu, Apr 22, 2010 at 11:47 AM, Milan Broz <mbroz@redhat.com> wrote:
>> On 04/22/2010 08:08 PM, San Mehat wrote:
>>> On Thu, Apr 22, 2010 at 11:03 AM, Milan Broz <mbroz@redhat.com> wrote:
>>>> On 04/22/2010 07:48 PM, San Mehat wrote:
>>>>> Typically, dm-crypt instances each have their own set of kcrypt/kcrypt_io
>>>>> work-queues. This patch adds an option which will create one set of
>>>>> work-queues on init, and re-uses them for all dm-crypt target instances.
>>
>>>> Can you explain the real reason for this patch?
>>>>
>>>
>>> Sure, I'd be happy to explain.
>>
>> (Please add this always to patch header.)
>>
>
> Will do - thanks.
>
>>>
>>> * Upcoming versions of android are about to start using dm/dm-crypt
>>> heavily, having
>>> a large number of small dm-crypt instances running on the device (hard
>>> to tell just
>>> how many, but i've seen cases where up to 50 or 60 instances may be
>>> running). This ends up creating 100 - 120 kernel threads, and I was
>>> simply trying to cut that down.
>>
>> Sorry, but I don't take this argument. "Too many notes!" :-)
>>
>> So the problem is with memory allocation? Scheduler? Or where?
>> Kernel threads should be cheap.
>>
>
> Well the initial consideration was towards memory overhead with so
> many threads that don't do much (in our use-case) on an embedded
> device.
>
>> If you need 60 crypt devices, you almost surely hit at least starvation
>> problem with one global queue!
>> (Just curious - what are these crypt devices doing?)
>
> The crypt devices are providing small read-only encrypted file-systems
> - whose backing files exist on an external FAT file-system, and are
> created on-demand as needed. In this usage scenario, we'll only see
> typically a few of these devices being simultaneously accessed, (and
> the sd-card throughput is definitely the long-pole in the performance
> profile, so even when I beat on 80 or 90 concurrent instances, we're
> mainly waiting for mmcqd to complete transactions).
>
>>
>>> I'd be more than happy to discuss alternatives; but do we *really*
>>> need 2 work-queue threads per instance?
>>
>> yes.
>
> What if we made a note in the Kconfig advising against using the option in
> stacked configurations? (Or even make it depend on CONFIG_EMBEDDED)
>
> Thanks for your time,
>
> -san
>
>>
>> For separate io queue - see commit cabf08e4d3d1181d7c408edae97fb4d1c31518af
>>
>> | Add post-processing queue (per crypt device) for read operations.
>>
>> | Current implementation uses only one queue for all operations
>> | and this can lead to starvation caused by many requests waiting
>> | for memory allocation. But the needed memory-releasing operation
>> | is queued after these requests (in the same queue).
>>
>>
>> (and there were another problem with async crypt - callback is called
>> in interrupt context, bio must be submitted from separate workqueue IIRC)
>>
>>>> (cc: Alasdair - I think he will not accept the patch anyway.)
>>>
>>> Probably not, but at least we can get the discussion going
>>
>> I am not saying that I do not want to discuss this - but we must know
>> the real problems many queues are causing first.
>> And then think about possible solutions.
>>
>> Milan
>>
>
>
>
> --
> San Mehat *| *Staff Software Engineer *| *Android *| *Google Inc.
> 415.366.6172 (san@google.com)
>



--
San Mehat *| *Staff Software Engineer *| *Android *| *Google Inc.
415.366.6172 (san@google.com)

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
 

Thread Tools




All times are GMT. The time now is 07:29 AM.

VBulletin, Copyright ©2000 - 2014, Jelsoft Enterprises Ltd.
Content Relevant URLs by vBSEO ©2007, Crawlability, Inc.
Copyright 2007 - 2008, www.linux-archive.org