FAQ Search Today's Posts Mark Forums Read
» Video Reviews

» Linux Archive

Linux-archive is a website aiming to archive linux email lists and to make them easily accessible for linux users/developers.


» Sponsor

» Partners

» Sponsor

Go Back   Linux Archive > Redhat > Cluster Development

 
 
LinkBack Thread Tools
 
Old 05-20-2008, 03:06 PM
 
Default Cluster Project branch, STABLE2, updated. cluster-2.03.02-26-gb88e25c

This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Cluster Project".

http://sources.redhat.com/git/gitweb.cgi?p=cluster.git;a=commitdiff;h=b88e25c2d3 679cf06f3b9c896365b359a29bba0f

The branch, STABLE2 has been updated
via b88e25c2d3679cf06f3b9c896365b359a29bba0f (commit)
from 07df53861cd45ac2f74fdec37a85a830280f37ab (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit b88e25c2d3679cf06f3b9c896365b359a29bba0f
Author: Bob Peterson <rpeterso@redhat.com>
Date: Tue May 20 09:37:53 2008 -0500

bz 446085: Back-port faster bitfit algorithm from gfs2 for better
performance.

-----------------------------------------------------------------------

Summary of changes:
gfs-kernel/src/gfs/bits.c | 85 +++++++++++++++++++++++++++++++-------------
gfs-kernel/src/gfs/bits.h | 3 +-
gfs-kernel/src/gfs/rgrp.c | 3 +-
3 files changed, 62 insertions(+), 29 deletions(-)

diff --git a/gfs-kernel/src/gfs/bits.c b/gfs-kernel/src/gfs/bits.c
index da08a23..55427bb 100644
--- a/gfs-kernel/src/gfs/bits.c
+++ b/gfs-kernel/src/gfs/bits.c
@@ -31,6 +31,16 @@
#include "gfs.h"
#include "bits.h"

+#if BITS_PER_LONG == 32
+#define LBITMASK (0x55555555UL)
+#define LBITSKIP55 (0x55555555UL)
+#define LBITSKIP00 (0x00000000UL)
+#else
+#define LBITMASK (0x5555555555555555UL)
+#define LBITSKIP55 (0x5555555555555555UL)
+#define LBITSKIP00 (0x0000000000000000UL)
+#endif
+
static const char valid_change[16] = {
/* current */
/* n */ 0, 1, 1, 1,
@@ -115,41 +125,66 @@ gfs_testbit(struct gfs_rgrpd *rgd,
*/

uint32_t
-gfs_bitfit(struct gfs_rgrpd *rgd,
- unsigned char *buffer, unsigned int buflen,
+gfs_bitfit(unsigned char *buffer, unsigned int buflen,
uint32_t goal, unsigned char old_state)
{
- unsigned char *byte, *end, alloc;
- uint32_t blk = goal;
- unsigned int bit;
-
- byte = buffer + (goal / GFS_NBBY);
- bit = (goal % GFS_NBBY) * GFS_BIT_SIZE;
- end = buffer + buflen;
- alloc = (old_state & 1) ? 0 : 0x55;
-
+ const u8 *byte, *start, *end;
+ int bit, startbit;
+ u32 g1, g2, misaligned;
+ unsigned long *plong;
+ unsigned long lskipval;
+
+ lskipval = (old_state & GFS_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
+ g1 = (goal / GFS_NBBY);
+ start = buffer + g1;
+ byte = start;
+ end = buffer + buflen;
+ g2 = ALIGN(g1, sizeof(unsigned long));
+ plong = (unsigned long *)(buffer + g2);
+ startbit = bit = (goal % GFS_NBBY) * GFS_BIT_SIZE;
+ misaligned = g2 - g1;
+ if (!misaligned)
+ goto ulong_aligned;
+/* parse the bitmap a byte at a time */
+misaligned:
while (byte < end) {
- if ((*byte & 0x55) == alloc) {
- blk += (8 - bit) >> 1;
-
- bit = 0;
- byte++;
-
- continue;
+ if (((*byte >> bit) & GFS_BIT_MASK) == old_state) {
+ return goal +
+ (((byte - start) * GFS_NBBY) +
+ ((bit - startbit) >> 1));
}
-
- if (((*byte >> bit) & GFS_BIT_MASK) == old_state)
- return blk;
-
bit += GFS_BIT_SIZE;
- if (bit >= 8) {
+ if (bit >= GFS_NBBY * GFS_BIT_SIZE) {
bit = 0;
byte++;
+ misaligned--;
+ if (!misaligned) {
+ plong = (unsigned long *)byte;
+ goto ulong_aligned;
+ }
}
-
- blk++;
}
+ return BFITNOENT;

+/* parse the bitmap a unsigned long at a time */
+ulong_aligned:
+ /* Stop at "end - 1" or else prefetch can go past the end and segfault.
+ We could "if" it but we'd lose some of the performance gained.
+ This way will only slow down searching the very last 4/8 bytes
+ depending on architecture. I've experimented with several ways
+ of writing this section such as using an else before the goto
+ but this one seems to be the fastest. */
+ while ((unsigned char *)plong < end - 1) {
+ prefetch(plong + 1);
+ if (((*plong) & LBITMASK) != lskipval)
+ break;
+ plong++;
+ }
+ if ((unsigned char *)plong < end) {
+ byte = (const u8 *)plong;
+ misaligned += sizeof(unsigned long) - 1;
+ goto misaligned;
+ }
return BFITNOENT;
}

diff --git a/gfs-kernel/src/gfs/bits.h b/gfs-kernel/src/gfs/bits.h
index ed38102..9990bae 100644
--- a/gfs-kernel/src/gfs/bits.h
+++ b/gfs-kernel/src/gfs/bits.h
@@ -22,8 +22,7 @@ void gfs_setbit(struct gfs_rgrpd *rgd,
unsigned char gfs_testbit(struct gfs_rgrpd *rgd,
unsigned char *buffer, unsigned int buflen,
uint32_t block);
-uint32_t gfs_bitfit(struct gfs_rgrpd *rgd,
- unsigned char *buffer, unsigned int buflen,
+uint32_t gfs_bitfit(unsigned char *buffer, unsigned int buflen,
uint32_t goal, unsigned char old_state);
uint32_t gfs_bitcount(struct gfs_rgrpd *rgd,
unsigned char *buffer, unsigned int buflen,
diff --git a/gfs-kernel/src/gfs/rgrp.c b/gfs-kernel/src/gfs/rgrp.c
index dd7fdcb..f0a4e3f 100644
--- a/gfs-kernel/src/gfs/rgrp.c
+++ b/gfs-kernel/src/gfs/rgrp.c
@@ -1444,8 +1444,7 @@ blkalloc_internal(struct gfs_rgrpd *rgd,
allocatable block anywhere else, we want to be able wrap around and
search in the first part of our first-searched bit block. */
for (x = 0; x <= length; x++) {
- blk = gfs_bitfit(rgd,
- rgd->rd_bh[buf]->b_data + bits->bi_offset,
+ blk = gfs_bitfit(rgd->rd_bh[buf]->b_data + bits->bi_offset,
bits->bi_len, goal, old_state);
if (blk != BFITNOENT)
break;


hooks/post-receive
--
Cluster Project
 

Thread Tools




All times are GMT. The time now is 06:53 AM.

VBulletin, Copyright ©2000 - 2014, Jelsoft Enterprises Ltd.
Content Relevant URLs by vBSEO ©2007, Crawlability, Inc.
Copyright 2007 - 2008, www.linux-archive.org