#include <assert.h>
-#include <limits.h>
#include <stdio.h> // temporary
#include <stdlib.h>
#include "pool.h"
#define MOVEABLE_POOL 1
#define TRY_BLOCKER 1
-#if 1
+#ifndef NDEBUG
void check_invariants(struct pool_head *head) {
int avail;
struct pool_item *p;
head->avail = limit - base;
head->move = move;
head->move_up = move_up;
+#ifndef NDEBUG
check_invariants(head);
-}
-
-bool pool_alloc(
- struct pool_head *head,
- struct pool_item *item,
- int size
-) {
- struct pool_item *p, *q;
- int avail, gap;
-
- // item must not be allocated yet
- assert(item->prev == NULL && item->next == NULL);
-
- // check size, disregarding fragmentation
- avail = head->avail - size;
- if (avail < 0)
- return false;
-
- // first-fit algorithm
- q = &head->item;
- while (true) {
- p = q;
- q = p->next;
-
- // calculate gap at current position
- gap = q->base - p->limit;
- //printf("p %p q %p gap %d\n", p, q, gap);
-
- // see if request fits in gap
- if (size <= gap)
- goto found;
-
- // see if gap can be left alone
- avail -= gap;
- if (avail < 0) {
-#ifdef MOVEABLE_POOL
- // move block after gap down into gap
- if (head->move == NULL || head->move_up == NULL)
- return false;
- avail += gap;
-
- head->move(q, p->limit);
- q->limit += p->limit - q->base;
- q->base = p->limit;
-#else
- return false;
#endif
- }
-
- assert(q != &head->item);
- }
-
-found:
- item->base = p->limit;
- item->limit = p->limit + size;
-
- // insert into list at found position
- item->prev = p;
- p->next = item;
- item->next = q;
- q->prev = item;
-
- // keep track of total allocation
- head->avail -= size;
- check_invariants(head);
- return true;
}
-
+
bool pool_realloc(
struct pool_head *head,
struct pool_item *item,
- int size
+ int size,
+ int offset,
+ bool dir
) {
- struct pool_item *p, *q;
+ int old_size, new_base, new_limit;
+ int end_offset;
int size_change, avail, gap;
+ bool orig_dir;
+ struct pool_item *p, *q;
#ifdef TRY_BLOCKER
- struct pool_item *blocker;
- int blocker_size;
+ struct pool_item *r, *target_item;
+ int target_size;
+ bool prev_next;
#endif
-#ifdef MOVEABLE_POOL
- int avail_save;
-#endif
-
- // item must be already allocated
- assert(item->prev && item->next);
-
- // realloc requires at least the move-down routine
- // (even though some requests succeed without move)
- assert(head->move);
-
- // try to change current block size without copying
- size_change = size + item->base - item->limit;
+ int old_base_truncated, new_base_offset;
+ int old_limit_truncated, new_limit_offset;
+
+ // see if request is for "alloc" or "realloc"
+ old_size = 0;
+ if (offset != INT_MIN) {
+ // realloc, item must be already allocated
+ assert(item->prev && item->next);
+ old_size = item->limit - item->base;
+
+ // calculate new spot if block is extended not moved
+ new_base = item->base - offset;
+ new_limit = new_base + size;
#ifndef TRY_BLOCKER
- if (item->base + size <= item->next->base)
- goto resize;
+ if (new_base >= item->prev->limit && new_limit <= item->next->base)
+ goto resize;
+#endif
+ }
+#ifndef NDEBUG
+ else {
+ // alloc, item must not be already allocated
+ assert(item->prev == NULL && item->next == NULL);
+ }
#endif
// check size, disregarding fragmentation
+ size_change = size - old_size;
avail = head->avail - size_change;
if (avail < 0)
return false;
- // first-fit algorithm, until our own list entry
- q = &head->item;
-
-#ifdef TRY_BLOCKER
-blocker_test:
- // enter here when blocker has changed
- blocker = item->next;
-
- // see if we fit, given the new blocker
- if (item->base + size <= blocker->base)
- goto resize;
-
- // otherwise, see if blocker is moveable
- blocker_size =
- blocker == &head->item ? INT_MAX : blocker->limit - blocker->base;
-#endif
-
+ // work in specified direction, then reverse
+ orig_dir = dir;
while (true) {
- p = q;
- q = p->next;
- if (q == item)
- break;
-
- // calculate gap at current position
- gap = q->base - p->limit;
-
- // see if request fits in gap
- if (size <= gap)
- goto found;
+ p = &head->item;
+ q = &head->item;
#ifdef TRY_BLOCKER
- // see if blocker fits in gap
- if (blocker_size <= gap && head->move_up) {
- // move blocker down into gap
- head->move(blocker, p->limit);
- blocker->limit += p->limit - blocker->base;
- blocker->base = p->limit;
-
- // remove from list
- blocker->prev->next = blocker->next;
- blocker->next->prev = blocker->prev;
-
- // insert into list at gap position
- blocker->prev = p;
- p->next = blocker;
- blocker->next = q;
- q->prev = blocker;
-
- // calculate new blocker
- q = blocker;
- goto blocker_test;
- }
-#endif
-
- // see if gap can be left alone
- avail -= gap;
- if (avail < 0) {
- // move block after gap down into gap
- if (head->move_up == NULL)
- return false;
- avail += gap;
-
- head->move(q, p->limit);
- q->limit += p->limit - q->base;
- q->base = p->limit;
- }
-
- assert(q != &head->item);
- }
-
-#ifdef MOVEABLE_POOL
- // optional step: try an ordinary realloc in rest of list
- // this is because if the pool consists of a tightly packed region
- // followed by a mainly free region, it's best to move us to there
- avail_save = avail;
-#endif
-
- // first-fit algorithm, including and after our own list entry,
- // the first iteration (ours) is different because q != p->next,
- // this causes us to ignore our own entry in the gap calculation
- q = q->next;
- goto loop_entry;
- do {
- p = q;
- q = p->next;
- loop_entry:
- gap = q->base - p->limit;
- if (size <= gap)
- goto found;
- if (q == &head->item)
- break;
- avail -= gap;
- } while (avail >= 0);
-#ifdef MOVEABLE_POOL
- if (head->move_up == NULL)
- return false;
-
- avail = avail_save;
- p = item->prev;
-
- // calculate gap that would be left if we didn't move ourself
- gap = item->base - p->limit;
-
- // see if gap can be left alone
- avail -= gap;
- if (avail < 0) {
- // move block after gap (ourself) down into gap
- avail += gap;
-
- head->move(item, p->limit);
- item->limit += p->limit - item->base;
- item->base = p->limit;
-
- // if didn't do ordinary realloc above, see if we now fit
- //if (item->base + size <= item->next->base)
- // goto resize;
- }
-
- // at this point we will not move our own block anymore (because
- // a last-fit approach is risky, given the likelihood that we will
- // soon want to resize again), compact the remainder down from top
- p = &head->item;
- while (true) {
- q = p;
- p = q->prev;
- if (p == item) {
- assert(item->base + size <= item->next->base);
- goto resize;
- }
-
- // calculate gap at current position
- gap = q->base - p->limit;
-
-#ifdef TRY_BLOCKER
- // see if blocker fits in gap
- if (blocker_size <= gap && blocker != p) {
- // if moveable realloc is used, then user must provide move_up routine
- assert(head->move_up);
- head->move_up(blocker, q->base);
- blocker->base += q->base - blocker->limit;
- blocker->limit = q->base;
-
- // remove from list
- blocker->prev->next = blocker->next;
- blocker->next->prev = blocker->prev;
-
- // insert into list at gap position
- blocker->prev = p;
- p->next = blocker;
- blocker->next = q;
- q->prev = blocker;
-
- // calculate new blocker
- p = blocker;
- blocker = item->next;
-
- // see if we fit, given the new blocker
- if (item->base + size <= blocker->base)
+blocker_test:
+ // enter here when either blocker has changed
+ target_item = item;
+ target_size = size;
+ //printf("a target_size %d\n", target_size);
+ if (offset != INT_MIN) {
+ if (new_base >= item->prev->limit && new_limit <= item->next->base)
goto resize;
- // otherwise, see if blocker is moveable
- blocker_size =
- blocker == &head->item ? INT_MAX : blocker->limit - blocker->base;
- continue;
+ r = item->prev;
+ prev_next = false;
+ do {
+ if (new_base < r->limit && r != &head->item) {
+ gap = r->limit - r->base;
+ if (gap < target_size) {
+ target_item = r;
+ target_size = gap;
+ //printf("b target_size %d\n", target_size);
+ }
+ }
+ r = item->next;
+ prev_next = !prev_next;
+ } while (prev_next);
}
#endif
- // if gap too large, move block before gap up into gap
- avail -= gap;
- if (avail < 0) {
- avail += gap;
-
- // if moveable realloc is used, user must provide move_up routine
- assert(head->move_up);
- head->move_up(p, q->base);
- p->base += q->base - p->limit;
- p->limit = q->base;
- }
-
- assert(p != &head->item);
- }
+ while (true) {
+ //printf("avail %d\n", avail);
+ // advance, so that p, q are before/after gap
+ if (!dir) {
+ p = q;
+ q = p->next;
+//#ifdef TRY_BLOCKER
+// if (q == item || q == target_item) {
+//#else
+ if (q == item) {
+//#endif
+ q = q->next;
+ break;
+ }
+ //printf("forw %p(%d)->%p(%d)\n", p, p->limit, q, q->base);
+ }
+ else {
+ q = p;
+ p = q->prev;
+//#ifdef TRY_BLOCKER
+// if (p == item || p == target_item) {
+//#else
+ if (p == item) {
+//#endif
+ p = p->prev;
+ break;
+ }
+ //printf("back %p(%d)->%p(%d)\n", p, p->limit, q, q->base);
+ }
+
+ // see if target fits in current gap
+ gap = q->base - p->limit;
+#ifdef TRY_BLOCKER
+ if (target_size <= gap) {
+ //printf("target\n");
+ if (target_item == item)
+ goto found;
+ if (!dir) {
+ if (!head->move)
+ goto no_blocker;
+ head->move(target_item, p->limit);
+ target_item->limit += p->limit - target_item->base;
+ target_item->base = p->limit;
+ }
+ else {
+ if (!head->move_up)
+ goto no_blocker;
+ head->move_up(target_item, q->base);
+ target_item->base += q->base - target_item->limit;
+ target_item->limit = q->base;
+ }
+
+ // remove from list
+ target_item->prev->next = target_item->next;
+ target_item->next->prev = target_item->prev;
+ if (!dir)
+ q = p->next;
+ else
+ p = q->prev;
+
+ // insert into list at found position
+ target_item->prev = p;
+ p->next = target_item;
+ target_item->next = q;
+ q->prev = target_item;
+
+ p = target_item;
+ q = target_item;
+ goto blocker_test;
+
+ no_blocker:
+ ;
+ }
#else
- return false;
+ if (size <= gap)
+ goto found;
#endif
-found:
- // move ourself into found spot, always non-overlapping or downwards
- assert(head->move);
- head->move(item, p->limit);
- //item->limit += p->limit - item->base;
- item->base = p->limit;
-
- // remove from list
- item->prev->next = item->next;
- item->next->prev = item->prev;
-
- // insert into list at found position
- item->prev = p;
- p->next = item;
- item->next = q;
- q->prev = item;
-
-resize:
- // resize in place (or do the commented code above if moved ourself)
- item->limit = item->base + size;
-
- // keep track of total allocation
- head->avail -= size_change;
- check_invariants(head);
- return true;
-}
-
-bool pool_realloc_base(
- struct pool_head *head,
- struct pool_item *item,
- int size
-) {
- struct pool_item *p, *q;
- int size_change, avail, gap;
-#ifdef TRY_BLOCKER
- struct pool_item *blocker;
- int blocker_size;
-#endif
+ // see if gap can be left alone
+ avail -= gap;
+ if (avail < 0) {
#ifdef MOVEABLE_POOL
- int avail_save;
+ //printf("compact\n");
+ if (!dir) {
+ if (!head->move)
+#ifndef NDEBUG
+ {
+ check_invariants(head);
#endif
-
- // item must be already allocated
- assert(item->prev && item->next);
-
- // realloc_base requires at least the move-up routine
- // (even though some requests succeed without move)
- assert(head->move_up);
-
- // try to change current block size without copying
- size_change = size + item->base - item->limit;
-#ifndef TRY_BLOCKER
- if (item->limit - size >= item->prev->limit)
- goto resize;
+ return false;
+#ifndef NDEBUG
+ }
#endif
-
- // check size, disregarding fragmentation
- avail = head->avail - size_change;
- if (avail < 0)
- return false;
-
- // last-fit algorithm, until our own list entry
- p = &head->item;
-
-#ifdef TRY_BLOCKER
-blocker_test:
- // enter here when blocker has changed
- blocker = item->prev;
-
- // see if we fit, given the new blocker
- if (item->limit - size >= blocker->limit)
- goto resize;
-
- // otherwise, see if blocker is moveable
- blocker_size =
- blocker == &head->item ? INT_MAX : blocker->limit - blocker->base;
+ head->move(q, p->limit);
+ q->limit += p->limit - q->base;
+ q->base = p->limit;
+ }
+ else {
+ if (!head->move_up)
+#ifndef NDEBUG
+ {
+ check_invariants(head);
#endif
-
- while (true) {
- q = p;
- p = q->prev;
- if (p == item)
- break;
-
- // calculate gap at current position
- gap = q->base - p->limit;
-
- // see if request fits in gap
- if (size <= gap)
- goto found;
-
-#ifdef TRY_BLOCKER
- // see if blocker fits in gap
- if (blocker_size <= gap && head->move) {
- // move blocker up into gap
- head->move_up(blocker, q->base);
- blocker->base += q->base - blocker->limit;
- blocker->limit = q->base;
-
- // remove from list
- blocker->prev->next = blocker->next;
- blocker->next->prev = blocker->prev;
-
- // insert into list at gap position
- blocker->prev = p;
- p->next = blocker;
- blocker->next = q;
- q->prev = blocker;
-
- // calculate new blocker
- p = blocker;
- goto blocker_test;
- }
+ return false;
+#ifndef NDEBUG
+ }
#endif
-
- // see if gap can be left alone
- avail -= gap;
- if (avail < 0) {
- // move block before gap up into gap
- if (head->move == NULL)
+ head->move_up(p, q->base);
+ p->base += q->base - p->limit;
+ p->limit = q->base;
+ }
+ avail += gap;
+
+ // if we have just moved the target, then the target is
+ // no longer valid and we should go to blocker_test, but
+ // the loop is about to terminate anyway => don't bother
+#else
return false;
-
- avail += gap;
+#endif
+ }
- head->move_up(p, q->base);
- p->base += q->base - p->limit;
- p->limit = q->base;
+ assert((dir ? p : q) != &head->item);
}
+ //printf("done %p(%d)->%p(%d)\n", p, p->limit, q, q->base);
- assert(p != &head->item);
- }
-
-#ifdef MOVEABLE_POOL
- // optional step: try an ordinary realloc in rest of list
- // this is because if the pool consists of a tightly packed region
- // preceded by a mainly free region, it's best to move us to there
- avail_save = avail;
-#endif
-
- // last-fit algorithm, including and before our own list entry,
- // the first iteration (ours) is different because p != q->prev,
- // this causes us to ignore our own entry in the gap calculation
- p = p->prev;
- goto loop_entry;
- do {
- q = p;
- p = q->prev;
- loop_entry:
+ // have compacted up from bottom and reached our item,
+ // or compacted down from top and reached our item
+ // (test is slightly different here, as we will ignore
+ // our item for the purpose of calculating the gap)
gap = q->base - p->limit;
if (size <= gap)
goto found;
- if (p == &head->item)
- break;
- avail -= gap;
- } while (avail >= 0);
-#ifdef MOVEABLE_POOL
- if (head->move == NULL)
- return false;
-
- avail = avail_save;
- q = item->next;
-
- // calculate gap that would be left if we didn't move ourself
- gap = q->base - item->limit;
-
- // see if gap can be left alone
- avail -= gap;
- if (avail < 0) {
- // move block before gap (ourself) up into gap
- avail += gap;
- head->move_up(item, q->base);
- item->base += q->base - item->limit;
- item->limit = q->base;
-
- // if didn't do ordinary realloc above, see if we now fit
- //if (item->limit - size >= item->prev->limit)
- // goto resize;
+ dir = !dir;
+ //printf("dir changed to %d\n", dir);
+ assert(dir != orig_dir);
}
- // at this point we will not move our own block anymore (because
- // a first-fit approach is risky, given the likelihood that we will
- // soon want to resize again), compact the remainder up from bottom
- q = &head->item;
- while (true) {
- p = q;
- q = p->next;
- if (q == item) {
- assert(item->limit - size >= item->prev->limit);
- goto resize;
- }
-
- // calculate gap at current position
- gap = q->base - p->limit;
-
-#ifdef TRY_BLOCKER
- // see if blocker fits in gap
- if (blocker_size <= gap && blocker != q) {
- // if moveable realloc is used, then user must provide move routine
- assert(head->move);
- head->move(blocker, p->limit);
- blocker->limit += p->limit - blocker->base;
- blocker->base = p->limit;
-
- // remove from list
- blocker->prev->next = blocker->next;
- blocker->next->prev = blocker->prev;
-
- // insert into list at gap position
- blocker->prev = p;
- p->next = blocker;
- blocker->next = q;
- q->prev = blocker;
-
- // calculate new blocker
- q = blocker;
- blocker = item->prev;
-
- // see if we fit, given the new blocker
- if (item->limit - size >= blocker->limit)
- goto resize;
+found:
+ //printf("found\n");
+ // align ourself in found spot
+ // (try to allow future extension in desired direction)
+ if (orig_dir == false) {
+ new_base = p->limit;
+ new_limit = new_base + size;
+ }
+ else {
+ new_limit = q->base;
+ new_base = new_limit - size;
+ }
- // otherwise, see if blocker is moveable
- blocker_size =
- blocker == &head->item ? INT_MAX : blocker->limit - blocker->base;
- continue;
+ if (offset != INT_MIN) {
+ // move ourself into found spot
+
+ // we have "offset", which is position of start of existing data
+ // relative to start of new block, now calculate symmetric one:
+ // position of end of existing data relative to end of new block
+ // (new_limit + end_offset) - (new_base + offset) = old_size
+ // (new_limit - new_base) + (end_offset - offset) = old_size
+ // size + end_offset - offset = old_size
+ // end_offset = offset - (size - old_size)
+ end_offset = offset - size_change;
+
+ old_base_truncated = item->base;
+ new_base_offset = new_base;
+ if (offset < 0)
+ // copy truncated start of old block to base of new block
+ old_base_truncated -= offset;
+ else
+ // copy original start of old block to offset in new block
+ new_base_offset += offset;
+
+ old_limit_truncated = item->limit;
+ new_limit_offset = new_limit;
+ if (end_offset >= 0)
+ // copy truncated end of old block to limit of new block
+ old_limit_truncated -= end_offset;
+ else
+ // copy original end of old block to offset in new block
+ new_limit_offset += end_offset;
+
+ offset = new_base_offset - old_base_truncated; // amount to move by
+ if (offset < 0) {
+ if (!head->move)
+#ifndef NDEBUG
+ {
+ check_invariants(head);
+#endif
+ return false;
+#ifndef NDEBUG
+ }
+#endif
+ item->base = old_base_truncated;
+ item->limit = old_limit_truncated;
+ head->move(item, new_base_offset);
}
+ else if (offset > 0) {
+ if (!head->move_up)
+#ifndef NDEBUG
+ {
+ check_invariants(head);
#endif
-
- // if gap too large, move block after gap down into gap
- avail -= gap;
- if (avail < 0) {
- avail += gap;
-
- // if moveable realloc is used, user must provide move routine
- assert(head->move);
- head->move(q, p->limit);
- q->limit += p->limit - q->base;
- q->base = p->limit;
+ return false;
+#ifndef NDEBUG
+ }
+#endif
+ item->base = old_base_truncated;
+ item->limit = old_limit_truncated;
+ head->move_up(item, new_limit_offset);
}
- assert(q != &head->item);
+ // remove from list
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
}
-#else
- return false;
-#endif
-
-found:
- // move ourself into found spot, always non-overlapping or downwards
- assert(head->move_up);
- head->move_up(item, q->base);
- //item->base += q->base - item->limit;
- item->limit = q->base;
-
- // remove from list
- item->prev->next = item->next;
- item->next->prev = item->prev;
// insert into list at found position
item->prev = p;
q->prev = item;
resize:
- // resize in place (or do the commented code above if moved ourself)
- item->base = item->limit - size;
+ //printf("fits %p(%d)->%p(%d) [%d,%d)\n", item->prev, item->prev->limit, item->next, item->next->base, new_base, new_limit);
+ // resize in place
+ item->base = new_base;
+ item->limit = new_limit;
// keep track of total allocation
head->avail -= size_change;
+#ifndef NDEBUG
check_invariants(head);
+#endif
return true;
}
// keep track of total allocation
head->avail += item->limit - item->base;
+#ifndef NDEBUG
check_invariants(head);
+#endif
}
#endif
#if !defined(INODE_SWAP) && !defined(PREALLOCATE_SWAP)
rassert(
- pool_realloc(&swap_table, &victim->swap_item, victim_swap_blocks + blocks)
+ pool_realloc(
+ &swap_table,
+ &victim->swap_item,
+ victim_swap_blocks + blocks,
+ 0,
+ false
+ )
);
#endif
goto loop_entry;
// add to swap pool
victim_swap_blocks = 0;
#if !defined(INODE_SWAP) && !defined(PREALLOCATE_SWAP)
- rassert(pool_alloc(&swap_table, &victim->swap_item, blocks));
+ rassert(pool_alloc(&swap_table, &victim->swap_item, blocks, false));
#endif
// remove from LRU list
#ifndef PREALLOCATE_CORE
// no, reduce core allocation
rassert(
- pool_realloc_base(&core_table, &victim->core_item, victim_core_blocks)
+ pool_realloc(
+ &core_table,
+ &victim->core_item,
+ victim_core_blocks,
+ -blocks,
+ true
+ )
);
#endif
if (
process_avail < blocks
#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
- || !pool_alloc(&swap_table, &process->swap_item, blocks)
+ || !pool_alloc(&swap_table, &process->swap_item, blocks, false)
#endif
)
return false;
// allocate core and possible swap
#ifdef MOVEABLE_CORE
- rassert(pool_alloc(&core_table, &process->core_item, blocks));
+ rassert(pool_alloc(&core_table, &process->core_item, blocks, false));
#else
- if (!pool_alloc(&core_table, &process->core_item, blocks)) {
+ if (!pool_alloc(&core_table, &process->core_item, blocks, false)) {
#ifdef INODE_SWAP
//printf("deref inode %d\n", process->swap_inode->c_num);
i_deref(process->swap_inode);
}
#endif
#if defined(PREALLOCATE_SWAP) && defined(MOVEABLE_SWAP)
- rassert(pool_alloc(&swap_table, &process->swap_item, blocks));
+ rassert(pool_alloc(&swap_table, &process->swap_item, blocks, false));
#endif
#ifdef INDIRECT_CORE
#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
pool_free(&swap_table, &process->swap_item);
- if (!pool_alloc(&swap_table, &process->swap_item, blocks)) {
- rassert(pool_alloc(&swap_table, &process->swap_item, old_blocks));
+ if (!pool_alloc(&swap_table, &process->swap_item, blocks, false)) {
+ rassert(pool_alloc(&swap_table, &process->swap_item, old_blocks, false));
return false;
}
#endif
// reallocate core and possible swap
#ifdef MOVEABLE_CORE
- rassert(pool_realloc(&core_table, &process->core_item, blocks));
+ rassert(pool_realloc(&core_table, &process->core_item, blocks, 0, false));
#else
- if (!pool_realloc(&core_table, &process->core_item, blocks)) {
+ if (!pool_realloc(&core_table, &process->core_item, blocks, 0, false)) {
#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
pool_free(&swap_table, &process->swap_item);
- rassert(pool_alloc(&swap_table, &process->swap_item, old_blocks));
+ rassert(pool_alloc(&swap_table, &process->swap_item, old_blocks, false));
#endif
return false;
}
#endif
#if defined(PREALLOCATE_SWAP) && defined(MOVEABLE_SWAP)
pool_free(&swap_table, &process->swap_item);
- rassert(pool_alloc(&swap_table, &process->swap_item, blocks));
+ rassert(pool_alloc(&swap_table, &process->swap_item, blocks, false));
#endif
#ifdef INDIRECT_CORE
// add to core pool
process_core_blocks = 0;
#ifndef PREALLOCATE_CORE
- rassert(pool_alloc(&core_table, &process->core_item, blocks));
+ rassert(pool_alloc(&core_table, &process->core_item, blocks, false));
#endif
goto loop_entry_full;
}
udata.u_offset = (long)process_swap_blocks << BLOCK_SHIFT;
f_trunc(process->swap_inode);
#else
- rassert(pool_realloc(&swap_table, &process->swap_item, process_swap_blocks));
+ rassert(
+ pool_realloc(
+ &swap_table,
+ &process->swap_item,
+ process_swap_blocks,
+ false
+ )
+ );
#endif
loop_entry_partial:
// increase core allocation
#ifndef PREALLOCATE_CORE
rassert(
- pool_realloc_base(&core_table,
+ pool_realloc(
+ &core_table,
&process->core_item,
- process_core_blocks + blocks
+ process_core_blocks + blocks,
+ blocks,
+ true
)
);
#endif
hash = (hash & 0xffffLL) + (hash >> 16);
hash = (hash & 0xffLL) + (hash >> 8);
hash = (hash & 0xffLL) + (hash >> 8);
- assert(core_block_mem[addr] == 0xaa);
+ rassert(core_block_mem[addr] == 0xaa);
core_block_mem[addr] = (uint8_t)hash;
}
}
hash = (hash & 0xffLL) + (hash >> 8);
hash = (hash & 0xffLL) + (hash >> 8);
rassert(core_block_mem[addr] == (uint8_t)hash);
-#ifndef NDEBUG
core_block_mem[addr] = 0xaa;
-#endif
}
}
hash = (hash & 0xffLL) + (hash >> 8);
hash = (hash & 0xffLL) + (hash >> 8);
rassert(swap_block_mem[addr] == (uint8_t)hash);
-#ifndef NDEBUG
swap_block_mem[addr] = 0xaa;
-#endif
}
}
#endif
//printf("core_copy %ld(%d) %ld(%d) %ld(%d)\n", src_base, (int)(src_base >> BLOCK_SHIFT), dest_base, (int)(dest_base >> BLOCK_SHIFT), size, (int)((size + (BLOCK_SIZE - 1)) >> BLOCK_SHIFT));
for (i = 0L; i < size; ++i) {
core_block_mem[dest_base + i] = core_block_mem[src_base + i];
-#ifndef NDEBUG
core_block_mem[src_base + i] = 0xaa;
-#endif
}
}
size = -size;
for (i = -1L; i >= size; --i) {
core_block_mem[dest_limit + i] = core_block_mem[src_limit + i];
-#ifndef NDEBUG
core_block_mem[src_limit + i] = 0xaa;
-#endif
}
}
#endif
long i, count;
//printf("swap_read %d %ld %ld %ld\n", inode->c_num, offset, core_base, size);
-#ifndef NDEBUG
for (i = 0; i < size; ++i)
- assert(core_block_mem[core_base + i] == 0xaa);
-#endif
+ rassert(core_block_mem[core_base + i] == 0xaa);
for (i = 0; (count = size - i) > 0; i += count) {
if (count > TRANSFER_SIZE)
rassert(writei(inode) == (int)count && udata.u_error == 0);
}
-#ifndef NDEBUG
memset(core_block_mem + core_base, 0xaa, size);
-#endif
}
#else
#ifndef INDIRECT_SWAP
//printf("swap_copy %ld(%d) %ld(%d) %ld(%d)\n", src_base, (int)(src_base >> BLOCK_SHIFT), dest_base, (int)(dest_base >> BLOCK_SHIFT), size, (int)((size + (BLOCK_SIZE - 1)) >> BLOCK_SHIFT));
for (i = 0; i < size; ++i) {
swap_block_mem[dest_base + i] = swap_block_mem[src_base + i];
-#ifndef NDEBUG
- swap_block_mem[src_base + i] = 0x55;
-#endif
+ swap_block_mem[src_base + i] = 0xaa;
}
}
size = -size;
for (i = -1; i >= size; --i) {
swap_block_mem[dest_limit + i] = swap_block_mem[src_limit + i];
-#ifndef NDEBUG
- swap_block_mem[src_limit + i] = 0x55;
-#endif
+ swap_block_mem[src_limit + i] = 0xaa;
}
}
#endif
long i;
//printf("core_to_swap_copy %ld(%d) %ld(%d) %ld(%d)\n", src_base, (int)(src_base >> BLOCK_SHIFT), dest_base, (int)(dest_base >> BLOCK_SHIFT), size, (int)((size + (BLOCK_SIZE - 1)) >> BLOCK_SHIFT));
-#ifndef NDEBUG
for (i = 0; i < size; ++i)
- assert(swap_block_mem[dest_base + i] == 0xaa);
-#endif
+ rassert(swap_block_mem[dest_base + i] == 0xaa);
memcpy(swap_block_mem + dest_base, core_block_mem + src_base, size);
-#ifndef NDEBUG
memset(core_block_mem + src_base, 0xaa, size);
-#endif
}
void swap_to_core_copy(long src_base, long dest_base, long size) {
long i;
//printf("swap_to_core_copy %ld(%d) %ld(%d) %ld(%d)\n", src_base, (int)(src_base >> BLOCK_SHIFT), dest_base, (int)(dest_base >> BLOCK_SHIFT), size, (int)((size + (BLOCK_SIZE - 1)) >> BLOCK_SHIFT));
-#ifndef NDEBUG
for (i = 0; i < size; ++i)
- assert(core_block_mem[dest_base + i] == 0xaa);
-#endif
+ rassert(core_block_mem[dest_base + i] == 0xaa);
memcpy(core_block_mem + dest_base, swap_block_mem + src_base, size);
-#ifndef NDEBUG
memset(swap_block_mem + src_base, 0xaa, size);
-#endif
}
#endif
#endif
#if defined(INDIRECT_CORE) || defined(INDIRECT_SWAP)
int i, j, k;
-#elif !defined(NDEBUG)
- int i, j;
#else
- int i;
+ int i, j;
#endif
char buf[256];
int process;
#endif
core_block_mem = malloc((long)n_core_blocks << BLOCK_SHIFT);
rassert(core_block_mem);
-#ifndef NDEBUG
memset(core_block_mem, 0xaa, (long)n_core_blocks << BLOCK_SHIFT);
-#endif
#ifdef INODE_SWAP
fd_open(fs_path);
#endif
swap_block_mem = malloc((long)n_swap_blocks << BLOCK_SHIFT);
rassert(swap_block_mem);
-#ifndef NDEBUG
memset(swap_block_mem, 0xaa, (long)n_swap_blocks << BLOCK_SHIFT);
-#endif
#endif
process_init(n_processes, spare);
}
#ifdef INDIRECT_CORE
+#ifndef NDEBUG
for (i = 0; i < core_table_blocks; ++i)
rassert(core_table_mem[i] == 0x55555555);
+#endif
{
k = n_core_blocks >> 3;
for (j = 0; j < k; ++j)
rassert(core_block_bitmap[j] == (uint8_t)~(-1 << k));
}
#endif
-#ifndef NDEBUG
j = n_core_blocks << BLOCK_SHIFT;
for (i = 0; i < j; ++i)
- assert(core_block_mem[i] == 0xaa);
-#endif
+ rassert(core_block_mem[i] == 0xaa);
#ifdef INODE_SWAP
printf("free inodes %d blocks %d\n", fs_tab[0].s_tinode, fs_tab[0].s_tfree);
#else
#ifdef INDIRECT_SWAP
+#ifndef NDEBUG
for (i = 0; i < swap_table_blocks; ++i)
rassert(swap_table_mem[i] == 0x55555555);
+#endif
{
k = n_swap_blocks >> 3;
for (j = 0; j < k; ++j)
rassert(swap_block_bitmap[j] == (uint8_t)~(-1 << k));
}
#endif
-#ifndef NDEBUG
j = n_swap_blocks << BLOCK_SHIFT;
for (i = 0; i < j; ++i)
rassert(swap_block_mem[i] == 0xaa);
-#endif
#endif
return 0;