redis/deps/jemalloc/src/bitmap.c

122 lines
3.1 KiB
C
Raw Normal View History

2018-05-24 17:17:37 +02:00
#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
2011-05-09 10:52:55 +02:00
/******************************************************************************/
2018-05-24 17:17:37 +02:00
#ifdef BITMAP_USE_TREE
2011-05-09 10:52:55 +02:00
void
2018-05-24 17:17:37 +02:00
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
2011-05-09 10:52:55 +02:00
unsigned i;
size_t group_count;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
/*
* Compute the number of groups necessary to store nbits bits, and
* progressively work upward through the levels until reaching a level
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
2015-10-06 16:18:30 +02:00
group_count = BITMAP_BITS2GROUPS(nbits);
2011-05-09 10:52:55 +02:00
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
2015-10-06 16:18:30 +02:00
group_count = BITMAP_BITS2GROUPS(group_count);
2011-05-09 10:52:55 +02:00
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
2015-10-06 16:18:30 +02:00
assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
2011-05-09 10:52:55 +02:00
binfo->nlevels = i;
binfo->nbits = nbits;
}
2018-05-24 17:17:37 +02:00
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->levels[binfo->nlevels].group_offset;
2011-05-09 10:52:55 +02:00
}
void
2018-05-24 17:17:37 +02:00
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
2011-05-09 10:52:55 +02:00
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
2018-05-24 17:17:37 +02:00
* interface.
2011-05-09 10:52:55 +02:00
*/
2018-05-24 17:17:37 +02:00
if (fill) {
/* The "filled" bitmap starts out with all 0 bits. */
memset(bitmap, 0, bitmap_size(binfo));
return;
}
/*
* The "empty" bitmap starts out with all 1 bits, except for trailing
* unused bits (if any). Note that each group uses bit 0 to correspond
* to the first logical bit in the group, so extra bits are the most
* significant bits of the last group.
*/
memset(bitmap, 0xffU, bitmap_size(binfo));
2011-05-09 10:52:55 +02:00
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
2018-05-24 17:17:37 +02:00
if (extra != 0) {
2011-05-09 10:52:55 +02:00
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
2018-05-24 17:17:37 +02:00
}
2011-05-09 10:52:55 +02:00
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
2018-05-24 17:17:37 +02:00
if (extra != 0) {
2011-05-09 10:52:55 +02:00
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
2018-05-24 17:17:37 +02:00
}
}
}
#else /* BITMAP_USE_TREE */
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return binfo->ngroups;
}
void
bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
if (fill) {
memset(bitmap, 0, bitmap_size(binfo));
return;
}
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0) {
bitmap[binfo->ngroups - 1] >>= extra;
2011-05-09 10:52:55 +02:00
}
}
2018-05-24 17:17:37 +02:00
#endif /* BITMAP_USE_TREE */
size_t
bitmap_size(const bitmap_info_t *binfo) {
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}