remove minmax heap usage

This commit is contained in:
Anthony Pesch 2016-12-17 23:25:12 -08:00
parent c84d92fcd3
commit b7d4f88ea6
5 changed files with 115 additions and 538 deletions

View File

@ -149,7 +149,6 @@ set(REDREAM_SOURCES
src/core/interval_tree.c
src/core/list.c
src/core/log.c
src/core/mm_heap.c
src/core/option.c
src/core/profiler.cc
src/core/ringbuf.cc

View File

@ -1,207 +0,0 @@
#include "core/mm_heap.h"
#include "core/assert.h"
#include "core/core.h"
#define SWAP_NODE(a, b) \
do { \
mm_type tmp = (a); \
(a) = (b); \
(b) = tmp; \
} while (0)
static inline bool mm_is_max_level(int index) {
int n = index + 1;
int log2 = 0;
while (n >>= 1) log2++;
return log2 % 2 == 1;
}
static inline int mm_parent(int index) {
return (index - 1) / 2;
}
static inline int mm_grandparent(int index) {
return mm_parent(mm_parent(index));
}
static inline bool mm_has_grandparent(int index) {
return mm_parent(index) != 0;
}
static inline int mm_left_child(int index) {
return 2 * index + 1;
}
static inline int mm_left_grandchild(int index) {
return mm_left_child(mm_left_child(index));
}
static inline int mm_is_child(int parent, int child) {
return parent == ((child - 1) / 2);
}
static void mm_sift_up(mm_type *begin, int size, int index, mm_cmp cmp) {
// can't sift up past the root
if (!index) {
return;
}
int ancestor_index = mm_parent(index);
bool max_level = mm_is_max_level(ancestor_index);
// if the node is smaller (greater) than its parent, then it is smaller
// (greater) than all other nodes at max (min) levels up to the root. swap
// the node with its parent and check min (max) levels up to the root until
// the min-max order property is satisfied
if (cmp(*(begin + index), *(begin + ancestor_index)) ^ max_level) {
SWAP_NODE(*(begin + ancestor_index), *(begin + index));
index = ancestor_index;
}
// if the node is greater (smaller) than its parent, then it is greater
// (smaller) than all other nodes at min (max) levels up to the root. the
// node is in the correct order with regards to its parent, but check max
// (min) levels up to the root until the min-max order property is satisfied
else {
max_level = !max_level;
}
while (mm_has_grandparent(index)) {
ancestor_index = mm_grandparent(index);
// once node is greater (smaller) than parent, the min-max order property
// is satisfied
if (!(cmp(*(begin + index), *(begin + ancestor_index)) ^ max_level)) {
break;
}
// swap node with parent
SWAP_NODE(*(begin + ancestor_index), *(begin + index));
index = ancestor_index;
}
}
static void mm_sift_down(mm_type *begin, int size, int index, mm_cmp cmp) {
bool max_level = mm_is_max_level(index);
while (index < size) {
// get the smallest (largest) child or grandchild
int smallest = index;
int i = mm_left_child(index);
int end = MIN(i + 2, size);
for (; i < end; i++) {
if (cmp(*(begin + i), *(begin + smallest)) ^ max_level) {
smallest = i;
}
}
i = mm_left_grandchild(index);
end = MIN(i + 4, size);
for (; i < end; i++) {
if (cmp(*(begin + i), *(begin + smallest)) ^ max_level) {
smallest = i;
}
}
// already the smallest (largest) node, nothing to do
if (smallest == index) {
break;
}
// swap the node with the smallest (largest) descendant
SWAP_NODE(*(begin + index), *(begin + smallest));
// if the swapped node was a child, then the current node, its child, and
// its grandchild are all ordered correctly at this point satisfying the
// min-max order property
if (mm_is_child(index, smallest)) {
break;
}
// if the node's new parent is now smaller than it, swap again
int parent = mm_parent(smallest);
if (cmp(*(begin + parent), *(begin + smallest)) ^ max_level) {
SWAP_NODE(*(begin + parent), *(begin + smallest));
}
// if the swapped node was a grandchild, iteration must continue to
// ensure it's now ordered with regard to its descendants
index = smallest;
}
}
bool mm_validate(mm_type *begin, int size, mm_cmp cmp) {
for (int i = 0; i < size; i++) {
bool flip_compare = mm_is_max_level(i);
// values stored at nodes on even (odd) levels are smaller (greater) than
// or equal to the values stored at their descendants
// validate children
int j = MIN(mm_left_child(i), size);
int end = MIN(j + 2, size);
for (; j < end; j++) {
if (!(cmp(*(begin + i), *(begin + j)) ^ flip_compare)) {
return false;
}
}
// validate grandchildren
j = MIN(mm_left_grandchild(i), size);
end = MIN(j + 4, size);
for (; j < end; j++) {
if (!(cmp(*(begin + i), *(begin + j)) ^ flip_compare)) {
return false;
}
}
}
return true;
}
void mm_push(mm_type *begin, int size, mm_cmp cmp) {
mm_sift_up(begin, size, size - 1, cmp);
}
mm_type *mm_find_min(mm_type *begin, int size, mm_cmp cmp) {
return begin;
}
mm_type *mm_find_max(mm_type *begin, int size, mm_cmp cmp) {
if (size == 1) {
// root must be the max
return begin;
} else if (size == 2) {
// root's child must be the max
return begin + 1;
} else {
// must be the larger of the two children
if (cmp(*(begin + 1), *(begin + 2))) {
return begin + 2;
} else {
return begin + 1;
}
}
}
void mm_pop_min(mm_type *begin, int size, mm_cmp cmp) {
if (!size) {
return;
}
mm_type *min = mm_find_min(begin, size, cmp);
SWAP_NODE(*min, *(begin + size - 1));
mm_sift_down(begin, size - 1, (int)(min - begin), cmp);
}
void mm_pop_max(mm_type *begin, int size, mm_cmp cmp) {
if (!size) {
return;
}
mm_type *max = mm_find_max(begin, size, cmp);
SWAP_NODE(*max, *(begin + size - 1));
mm_sift_down(begin, size - 1, (int)(max - begin), cmp);
}

View File

@ -1,19 +0,0 @@
#ifndef MM_HEAP_H
#define MM_HEAP_H
// Min-max heap implementation, based on
// http://www.akira.ruc.dk/~keld/teaching/algoritmedesign_f03/Artikler/02../Atkinson86.pdf
#include <stdbool.h>
typedef void *mm_type;
typedef bool (*mm_cmp)(mm_type lhs, mm_type rhs);
bool mm_validate(mm_type *begin, int size, mm_cmp cmp);
void mm_push(mm_type *begin, int size, mm_cmp cmp);
mm_type *mm_find_min(mm_type *begin, int size, mm_cmp cmp);
mm_type *mm_find_max(mm_type *begin, int size, mm_cmp cmp);
void mm_pop_min(mm_type *begin, int size, mm_cmp cmp);
void mm_pop_max(mm_type *begin, int size, mm_cmp cmp);
#endif

View File

@ -1,5 +1,5 @@
#include "jit/passes/register_allocation_pass.h"
#include "core/mm_heap.h"
#include "core/list.h"
#include "jit/backend/jit_backend.h"
#include "jit/ir/ir.h"
#include "jit/pass_stats.h"
@ -10,20 +10,15 @@ DEFINE_STAT(fprs_spilled, "fprs spilled");
#define MAX_REGISTERS 32
struct interval {
/* register assigned to this interval */
const struct jit_register *reg;
struct ir_instr *instr;
struct ir_instr *reused;
struct ir_use *start;
struct ir_use *end;
struct ir_use *first;
struct ir_use *last;
struct ir_use *next;
int reg;
};
struct register_set {
int free_regs[MAX_REGISTERS];
int num_free_regs;
struct interval *live_intervals[MAX_REGISTERS];
int num_live_intervals;
struct list_node it;
};
struct ra {
@ -31,13 +26,14 @@ struct ra {
const struct jit_register *registers;
int num_registers;
/* allocation state */
struct register_set int_registers;
struct register_set float_registers;
struct register_set vector_registers;
/* intervals, keyed by register */
/* all intervals, keyed by register */
struct interval intervals[MAX_REGISTERS];
/* list of registers available for allocation */
struct list dead_intervals;
/* list of registers currently in use, sorted by next use */
struct list live_intervals;
};
static int ra_get_ordinal(const struct ir_instr *i) {
@ -48,90 +44,46 @@ static void ra_set_ordinal(struct ir_instr *i, int ordinal) {
i->tag = (intptr_t)ordinal;
}
static int ra_pop_register(struct register_set *set) {
if (!set->num_free_regs) {
return NO_REGISTER;
static int ra_reg_can_store(const struct jit_register *reg,
const struct ir_instr *instr) {
int mask = 1 << instr->result->type;
return (reg->value_types & mask) == mask;
}
static void ra_add_dead_interval(struct ra *ra, struct interval *interval) {
list_add(&ra->dead_intervals, &interval->it);
}
static void ra_add_live_interval(struct ra *ra, struct interval *interval) {
/* add interval to the live list, which is sorted by each interval's next
use */
struct list_node *after = NULL;
list_for_each_entry(it, &ra->live_intervals, struct interval, it) {
if (ra_get_ordinal(it->next->instr) >
ra_get_ordinal(interval->next->instr)) {
break;
}
after = &it->it;
}
return set->free_regs[--set->num_free_regs];
list_add_after(&ra->live_intervals, after, &interval->it);
}
static void ra_push_register(struct register_set *set, int reg) {
set->free_regs[set->num_free_regs++] = reg;
}
static bool ra_interval_cmp(const struct interval *lhs,
const struct interval *rhs) {
return !lhs->next ||
ra_get_ordinal(lhs->next->instr) < ra_get_ordinal(rhs->next->instr);
};
static struct interval *ra_head_interval(struct register_set *set) {
if (!set->num_live_intervals) {
return NULL;
static const struct jit_register *ra_alloc_blocked_register(
struct ra *ra, struct ir *ir, struct ir_instr *instr) {
/* spill the register who's next use is furthest away */
struct interval *interval = NULL;
list_for_each_entry_reverse(it, &ra->live_intervals, struct interval, it) {
if (ra_reg_can_store(it->reg, instr)) {
interval = it;
break;
}
}
mm_type *it = mm_find_min((mm_type *)set->live_intervals,
set->num_live_intervals, (mm_cmp)&ra_interval_cmp);
return *it;
}
CHECK_NOTNULL(interval);
static struct interval *ra_tail_interval(struct register_set *set) {
if (!set->num_live_intervals) {
return NULL;
}
mm_type *it = mm_find_max((mm_type *)set->live_intervals,
set->num_live_intervals, (mm_cmp)&ra_interval_cmp);
return *it;
}
static void ra_pop_head_interval(struct register_set *set) {
mm_pop_min((mm_type *)set->live_intervals, set->num_live_intervals,
(mm_cmp)&ra_interval_cmp);
set->num_live_intervals--;
}
static void ra_pop_tail_interval(struct register_set *set) {
mm_pop_max((mm_type *)set->live_intervals, set->num_live_intervals,
(mm_cmp)&ra_interval_cmp);
set->num_live_intervals--;
}
static void ra_insert_interval(struct register_set *set,
struct interval *interval) {
set->live_intervals[set->num_live_intervals++] = interval;
mm_push((mm_type *)set->live_intervals, set->num_live_intervals,
(mm_cmp)&ra_interval_cmp);
}
static struct register_set *ra_get_register_set(struct ra *ra,
enum ir_type type) {
if (ir_is_int(type)) {
return &ra->int_registers;
}
if (ir_is_float(type)) {
return &ra->float_registers;
}
if (ir_is_vector(type)) {
return &ra->vector_registers;
}
LOG_FATAL("Unexpected value type");
}
static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
struct ir_instr *instr) {
struct ir_instr *insert_point = ir->current_instr;
struct register_set *set = ra_get_register_set(ra, instr->result->type);
/* spill the register who's next use is furthest away from start */
struct interval *interval = ra_tail_interval(set);
ra_pop_tail_interval(set);
/* the interval's value needs to be filled back from from the stack before
/* the register's value needs to be filled back from from the stack before
its next use */
struct ir_instr *insert_point = ir->current_instr;
struct ir_use *next_use = interval->next;
struct ir_use *prev_use = list_prev_entry(next_use, struct ir_use, it);
CHECK(next_use,
@ -199,16 +151,14 @@ static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
ir_store_local(ir, local, interval->instr->result);
}
/* since the interval that this store belongs to has now expired, there's no
need to assign an ordinal to it */
/* reuse the old interval */
/* register's previous value is now spilled, reuse the interval for the new
value */
interval->instr = instr;
interval->reused = NULL;
interval->start = list_first_entry(&instr->result->uses, struct ir_use, it);
interval->end = list_last_entry(&instr->result->uses, struct ir_use, it);
interval->next = interval->start;
ra_insert_interval(set, interval);
interval->first = list_first_entry(&instr->result->uses, struct ir_use, it);
interval->last = list_last_entry(&instr->result->uses, struct ir_use, it);
interval->next = interval->first;
list_remove(&ra->live_intervals, &interval->it);
ra_add_live_interval(ra, interval);
/* reset insert point */
ir->current_instr = insert_point;
@ -222,26 +172,30 @@ static int ra_alloc_blocked_register(struct ra *ra, struct ir *ir,
return interval->reg;
}
static int ra_alloc_free_register(struct ra *ra, struct ir_instr *instr) {
struct register_set *set = ra_get_register_set(ra, instr->result->type);
/* get the first free register for this value type */
int reg = ra_pop_register(set);
if (reg == NO_REGISTER) {
return NO_REGISTER;
static const struct jit_register *ra_alloc_free_register(
struct ra *ra, struct ir_instr *instr) {
/* try to allocate the first free interval for this value type */
struct interval *interval = NULL;
list_for_each_entry(it, &ra->dead_intervals, struct interval, it) {
if (ra_reg_can_store(it->reg, instr)) {
interval = it;
break;
}
}
/* add interval */
struct interval *interval = &ra->intervals[reg];
interval->instr = instr;
interval->reused = NULL;
interval->start = list_first_entry(&instr->result->uses, struct ir_use, it);
interval->end = list_last_entry(&instr->result->uses, struct ir_use, it);
interval->next = interval->start;
interval->reg = reg;
ra_insert_interval(set, interval);
if (!interval) {
return NULL;
}
return reg;
/* make the interval live */
interval->instr = instr;
interval->first = list_first_entry(&instr->result->uses, struct ir_use, it);
interval->last = list_last_entry(&instr->result->uses, struct ir_use, it);
interval->next = interval->first;
list_remove(&ra->dead_intervals, &interval->it);
ra_add_live_interval(ra, interval);
return interval->reg;
}
/* if the first argument isn't used after this instruction, its register
@ -249,49 +203,42 @@ static int ra_alloc_free_register(struct ra *ra, struct ir_instr *instr) {
operations where the destination is the first argument.
TODO could reorder arguments for communicative binary ops and do this
with the second argument as well */
static int ra_reuse_arg_register(struct ra *ra, struct ir *ir,
struct ir_instr *instr) {
if (!instr->arg[0]) {
return NO_REGISTER;
static const struct jit_register *ra_reuse_arg_register(
struct ra *ra, struct ir *ir, struct ir_instr *instr) {
if (!instr->arg[0] || ir_is_constant(instr->arg[0])) {
return NULL;
}
int prefered = instr->arg[0]->reg;
if (prefered == NO_REGISTER) {
return NO_REGISTER;
}
/* make sure the register can hold the result type */
const struct jit_register *r = &ra->registers[prefered];
if (!(r->value_types & (1 << instr->result->type))) {
return NO_REGISTER;
return NULL;
}
/* if the argument's register is used after this instruction, it's not
trivial to reuse */
struct interval *interval = &ra->intervals[prefered];
if (list_next_entry(interval->next, struct ir_use, it)) {
return NO_REGISTER;
return NULL;
}
/* the argument's register is not used after the current instruction, so the
register can be reused for the result. note, since the interval min/max
heap does not support removal of an arbitrary interval, the interval
removal must be deferred. since there are no more uses, the interval will
expire on the next call to ra_expire_old_iintervals, and then immediately
requeued by setting the reused property */
interval->reused = instr;
/* make sure the register can hold the result type */
if (!ra_reg_can_store(interval->reg, instr)) {
return NULL;
}
return prefered;
/* argument is no longer used, reuse its interval */
interval->instr = instr;
interval->first = list_first_entry(&instr->result->uses, struct ir_use, it);
interval->last = list_last_entry(&instr->result->uses, struct ir_use, it);
interval->next = interval->first;
list_remove(&ra->live_intervals, &interval->it);
ra_add_live_interval(ra, interval);
return interval->reg;
}
static void ra_expire_set(struct ra *ra, struct register_set *set,
struct ir_instr *instr) {
while (true) {
struct interval *interval = ra_head_interval(set);
if (!interval) {
break;
}
static void ra_expire_intervals(struct ra *ra, struct ir_instr *instr) {
list_for_each_entry_safe(interval, &ra->live_intervals, struct interval, it) {
/* intervals are sorted by their next use, once one fails to expire or
advance, they all will */
if (interval->next &&
@ -300,41 +247,23 @@ static void ra_expire_set(struct ra *ra, struct register_set *set,
}
/* remove interval from the sorted set */
ra_pop_head_interval(set);
list_remove(&ra->live_intervals, &interval->it);
/* if there are more uses, advance the next use and reinsert the interval
into the correct position */
if (interval->next && list_next_entry(interval->next, struct ir_use, it)) {
interval->next = list_next_entry(interval->next, struct ir_use, it);
ra_insert_interval(set, interval);
ra_add_live_interval(ra, interval);
}
/* if there are no more uses, but the register has been reused by
ra_reuse_arg_register, requeue the interval at this time */
else if (interval->reused) {
struct ir_instr *reused = interval->reused;
interval->instr = reused;
interval->reused = NULL;
interval->start =
list_first_entry(&reused->result->uses, struct ir_use, it);
interval->end = list_last_entry(&reused->result->uses, struct ir_use, it);
interval->next = interval->start;
ra_insert_interval(set, interval);
}
/* if there are no other uses, free the register assigned to this
interval */
/* if there are no other uses, free the register */
else {
ra_push_register(set, interval->reg);
ra_add_dead_interval(ra, interval);
}
}
}
static void ra_expire_intervals(struct ra *ra, struct ir_instr *instr) {
ra_expire_set(ra, &ra->int_registers, instr);
ra_expire_set(ra, &ra->float_registers, instr);
ra_expire_set(ra, &ra->vector_registers, instr);
}
static int use_cmp(const struct list_node *a_it, const struct list_node *b_it) {
static int ra_use_cmp(const struct list_node *a_it,
const struct list_node *b_it) {
struct ir_use *a = list_entry(a_it, struct ir_use, it);
struct ir_use *b = list_entry(b_it, struct ir_use, it);
return ra_get_ordinal(a->instr) - ra_get_ordinal(b->instr);
@ -355,23 +284,16 @@ static void ra_assign_ordinals(struct ir *ir) {
}
}
static void ra_init_sets(struct ra *ra, const struct jit_register *registers,
int num_registers) {
static void ra_reset(struct ra *ra, const struct jit_register *registers,
int num_registers) {
ra->registers = registers;
ra->num_registers = num_registers;
/* add a dead interval for each available register */
for (int i = 0; i < ra->num_registers; i++) {
const struct jit_register *r = &ra->registers[i];
if (r->value_types == VALUE_INT_MASK) {
ra_push_register(&ra->int_registers, i);
} else if (r->value_types == VALUE_FLOAT_MASK) {
ra_push_register(&ra->float_registers, i);
} else if (r->value_types == VALUE_VECTOR_MASK) {
ra_push_register(&ra->vector_registers, i);
} else {
LOG_FATAL("Unsupported register value mask");
}
struct interval *interval = &ra->intervals[i];
interval->reg = &ra->registers[i];
ra_add_dead_interval(ra, interval);
}
}
@ -379,8 +301,7 @@ void ra_run(struct ir *ir, const struct jit_register *registers,
int num_registers) {
struct ra ra = {0};
ra_init_sets(&ra, registers, num_registers);
ra_reset(&ra, registers, num_registers);
ra_assign_ordinals(ir);
list_for_each_entry(instr, &ir->instrs, struct ir_instr, it) {
@ -394,23 +315,23 @@ void ra_run(struct ir *ir, const struct jit_register *registers,
}
/* sort the instruction's use list */
list_sort(&result->uses, &use_cmp);
list_sort(&result->uses, &ra_use_cmp);
/* expire any old intervals, freeing up the registers they claimed */
ra_expire_intervals(&ra, instr);
/* first, try and reuse the register of one of the incoming arguments */
int reg = ra_reuse_arg_register(&ra, ir, instr);
if (reg == NO_REGISTER) {
const struct jit_register *reg = ra_reuse_arg_register(&ra, ir, instr);
if (!reg) {
/* else, allocate a new register for the result */
reg = ra_alloc_free_register(&ra, instr);
if (reg == NO_REGISTER) {
if (!reg) {
/* if a register couldn't be allocated, spill and try again */
reg = ra_alloc_blocked_register(&ra, ir, instr);
}
}
CHECK_NE(reg, NO_REGISTER, "Failed to allocate register");
result->reg = reg;
CHECK_NOTNULL(reg, "Failed to allocate register");
result->reg = (int)(reg - ra.registers);
}
}

View File

@ -1,117 +0,0 @@
#include <gtest/gtest.h>
#include "core/minmax_heap.h"
TEST(MinMaxHeap, PopEmpty) {
std::vector<int> elements;
// shouldn't do anything, just sanity checking that it doesn't crash
re::mmheap_pop_min(elements.begin(), elements.end());
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
re::mmheap_pop_max(elements.begin(), elements.end());
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
TEST(MinMaxHeap, PopMinRoot) {
std::vector<int> elements = {1};
re::mmheap_pop_min(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), 1);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
TEST(MinMaxHeap, PopMaxRoot) {
std::vector<int> elements = {1};
re::mmheap_pop_min(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), 1);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
TEST(MinMaxHeap, PopMax1) {
std::vector<int> elements = {1};
re::mmheap_pop_max(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), 1);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
TEST(MinMaxHeap, PopMax2) {
std::vector<int> elements = {1, 2};
re::mmheap_pop_max(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), 2);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
TEST(MinMaxHeap, PopMax3) {
{
std::vector<int> elements = {1, 2, 3};
re::mmheap_pop_max(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), 3);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
{
std::vector<int> elements = {1, 3, 2};
re::mmheap_pop_max(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), 3);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
}
TEST(MinMaxHeap, PushPopMinN) {
static const int N = 1337;
std::vector<int> elements = {};
for (int i = 0; i < N; i++) {
elements.push_back(i);
re::mmheap_push(elements.begin(), elements.end());
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
for (int i = 0; i < N; i++) {
re::mmheap_pop_min(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), i);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
}
TEST(MinMaxHeap, PushPopMaxN) {
static const int N = 1337;
std::vector<int> elements = {};
for (int i = 0; i < N; i++) {
elements.push_back(i);
re::mmheap_push(elements.begin(), elements.end());
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
for (int i = N - 1; i >= 0; i--) {
re::mmheap_pop_max(elements.begin(), elements.end());
ASSERT_EQ(elements.back(), i);
elements.pop_back();
ASSERT_TRUE(re::mmheap_validate(elements.begin(), elements.end()));
}
}