#include <stdlib.h>
#include "pool.h"
+// can only define TRY_BLOCKER if MOVEABLE_POOL also defined
+#define MOVEABLE_POOL 1
#define TRY_BLOCKER 1
-#define TRY_REALLOC 1
#if 1
void check_invariants(struct pool_head *head) {
check_invariants(head);
}
-#ifdef CLASSIC_POOL
bool pool_alloc(
struct pool_head *head,
struct pool_item *item,
// item must not be allocated yet
assert(item->prev == NULL && item->next == NULL);
- // check size, disregarding fragmentation
- avail = head->avail - size;
- if (avail < 0)
- return false;
-
- // first-fit algorithm
- q = &head->item;
- do {
- p = q;
- q = p->next;
- gap = q->base - p->limit;
- if (size <= gap)
- goto found;
- if (q == &head->item)
- break;
- avail -= gap;
- } while (avail >= 0);
- return false;
-
-found:
- item->base = p->limit;
- item->limit = p->limit + size;
-
- // insert into list at found position
- item->prev = p;
- p->next = item;
- item->next = q;
- q->prev = item;
-
- // keep track of total allocation
- head->avail -= size;
- check_invariants(head);
- return true;
-}
-
-bool pool_realloc(
- struct pool_head *head,
- struct pool_item *item,
- int size
-) {
- struct pool_item *p, *q;
- int size_change, avail, gap;
-
- // item must be already allocated
- assert(item->prev && item->next);
-
- // try to change current block size without copying
- size_change = size + item->base - item->limit;
- if (item->base + size <= item->next->base)
- goto resize;
-
- // check size, disregarding fragmentation
- avail = head->avail - size_change;
- if (avail < 0)
- return false;
-
- // first-fit algorithm, until our own list entry
- q = &head->item;
- do {
- p = q;
- q = p->next;
- if (q == item)
- goto ourself;
- gap = q->base - p->limit;
- if (size <= gap)
- goto found;
- assert(q != &head->item);
- avail -= gap;
- } while (avail >= 0);
- return false;
-
-ourself:
- // first-fit algorithm, including and after our own list entry,
- // the first iteration (ours) is different because q != p->next,
- // this causes us to ignore our own entry in the gap calculation
- q = q->next;
- goto loop_entry;
- do {
- p = q;
- q = p->next;
- loop_entry:
- gap = q->base - p->limit;
- if (size <= gap)
- goto found;
- if (q == &head->item)
- break;
- avail -= gap;
- } while (avail >= 0);
- return false;
-
-found:
- // if realloc is used, then user must provide move routine
- // (when called from here, it is always non-overlapping or downwards)
- assert(head->move);
- head->move(item, p->limit);
- //item->limit += p->limit - item->base;
- item->base = p->limit;
-
- // remove from list
- item->prev->next = item->next;
- item->next->prev = item->prev;
-
- // insert into list at found position
- item->prev = p;
- p->next = item;
- item->next = q;
- q->prev = item;
-
-resize:
- item->limit = item->base + size;
-
- // keep track of total allocation
- head->avail -= size_change;
- check_invariants(head);
- return true;
-}
-
-bool pool_realloc_base(
- struct pool_head *head,
- struct pool_item *item,
- int size
-) {
- struct pool_item *p, *q;
- int size_change, avail, gap;
-
- // item must be already allocated
- assert(item->prev && item->next);
-
- // try to change current block size without copying
- size_change = size + item->base - item->limit;
- if (item->limit - size >= item->prev->limit)
- goto resize;
-
- // check size, disregarding fragmentation
- avail = head->avail - size_change;
- if (avail < 0)
- return false;
-
- // last-fit algorithm, until our own list entry
- p = &head->item;
- do {
- q = p;
- p = q->prev;
- if (p == item)
- goto ourself;
- gap = q->base - p->limit;
- if (size <= gap)
- goto found;
- assert(p != &head->item);
- avail -= gap;
- } while (avail >= 0);
- return false;
-
-ourself:
- // first-fit algorithm, including and after our own list entry,
- // the first iteration (ours) is different because p != q->prev,
- // this causes us to ignore our own entry in the gap calculation
- p = p->prev;
- goto loop_entry;
- do {
- q = p;
- p = q->prev;
- loop_entry:
- gap = q->base - p->limit;
- if (size <= gap)
- goto found;
- if (p == &head->item)
- break;
- avail -= gap;
- } while (avail >= 0);
- return false;
-
-found:
- // if realloc is used, then user must provide move_up routine
- // (when called from here, it is always non-overlapping or upwards)
- assert(head->move_up);
- head->move_up(item, q->base);
- //item->base += q->base - item->limit;
- item->limit = q->base;
-
- // remove from list
- item->prev->next = item->next;
- item->next->prev = item->prev;
-
- // insert into list at found position
- item->prev = p;
- p->next = item;
- item->next = q;
- q->prev = item;
-
-resize:
- item->base = item->limit - size;
-
- // keep track of total allocation
- head->avail -= size_change;
- check_invariants(head);
- return true;
-}
-#endif
-
-#ifdef MOVEABLE_POOL
-bool pool_alloc_moveable(
- struct pool_head *head,
- struct pool_item *item,
- int size
-) {
- struct pool_item *p, *q;
- int avail, gap;
-
- // item must not be allocated yet
- assert(item->prev == NULL && item->next == NULL);
-
// check size, disregarding fragmentation
avail = head->avail - size;
if (avail < 0)
if (size <= gap)
goto found;
- // if gap is too large, move block after gap down into gap
+ // see if gap can be left alone
avail -= gap;
if (avail < 0) {
+#ifdef MOVEABLE_POOL
+ // move block after gap down into gap
+ if (head->move == NULL || head->move_up == NULL)
+ return false;
avail += gap;
- // if moveable alloc is used, then user must provide move routine
- assert(head->move);
head->move(q, p->limit);
q->limit += p->limit - q->base;
q->base = p->limit;
+#else
+ return false;
+#endif
}
assert(q != &head->item);
return true;
}
-bool pool_realloc_moveable(
+bool pool_realloc(
struct pool_head *head,
struct pool_item *item,
int size
struct pool_item *blocker;
int blocker_size;
#endif
-#ifdef TRY_REALLOC
+#ifdef MOVEABLE_POOL
int avail_save;
#endif
// item must be already allocated
assert(item->prev && item->next);
+ // realloc requires at least the move-down routine
+ // (even though some requests succeed without move)
+ assert(head->move);
+
// try to change current block size without copying
size_change = size + item->base - item->limit;
#ifndef TRY_BLOCKER
#ifdef TRY_BLOCKER
// see if blocker fits in gap
- if (blocker_size <= gap) {
- // if moveable realloc is used, then user must provide move routine
- assert(head->move);
+ if (blocker_size <= gap && head->move_up) {
+ // move blocker down into gap
head->move(blocker, p->limit);
blocker->limit += p->limit - blocker->base;
blocker->base = p->limit;
}
#endif
- // if gap is too large, move block after gap down into gap
+ // see if gap can be left alone
avail -= gap;
if (avail < 0) {
+ // move block after gap down into gap
+ if (head->move_up == NULL)
+ return false;
avail += gap;
- // if moveable realloc is used, then user must provide move routine
- assert(head->move);
head->move(q, p->limit);
q->limit += p->limit - q->base;
q->base = p->limit;
assert(q != &head->item);
}
-#ifdef TRY_REALLOC
+#ifdef MOVEABLE_POOL
// optional step: try an ordinary realloc in rest of list
// this is because if the pool consists of a tightly packed region
// followed by a mainly free region, it's best to move us to there
avail_save = avail;
+#endif
// first-fit algorithm, including and after our own list entry,
// the first iteration (ours) is different because q != p->next,
break;
avail -= gap;
} while (avail >= 0);
+#ifdef MOVEABLE_POOL
+ if (head->move_up == NULL)
+ return false;
avail = avail_save;
p = item->prev;
-#endif
- // calculate gap that would be left if we didn't move ourself,
- // if too large, move block after gap (ourself) down into gap
+ // calculate gap that would be left if we didn't move ourself
gap = item->base - p->limit;
+
+ // see if gap can be left alone
avail -= gap;
if (avail < 0) {
+ // move block after gap (ourself) down into gap
avail += gap;
- // if moveable realloc is used, then user must provide move routine
- assert(head->move);
head->move(item, p->limit);
item->limit += p->limit - item->base;
item->base = p->limit;
-#ifndef TRY_REALLOC
// if didn't do ordinary realloc above, see if we now fit
- if (item->base + size <= item->next->base)
- goto resize;
-#endif
+ //if (item->base + size <= item->next->base)
+ // goto resize;
}
// at this point we will not move our own block anymore (because
assert(p != &head->item);
}
+#else
+ return false;
+#endif
found:
- // if moveable realloc is used, then user must provide move routine
- // (when called from here, it is always non-overlapping or downwards)
+ // move ourself into found spot, always non-overlapping or downwards
assert(head->move);
head->move(item, p->limit);
//item->limit += p->limit - item->base;
q->prev = item;
resize:
+ // resize in place (or do the commented code above if moved ourself)
item->limit = item->base + size;
// keep track of total allocation
return true;
}
-bool pool_realloc_base_moveable(
+bool pool_realloc_base(
struct pool_head *head,
struct pool_item *item,
int size
struct pool_item *blocker;
int blocker_size;
#endif
-#ifdef TRY_REALLOC
+#ifdef MOVEABLE_POOL
int avail_save;
#endif
// item must be already allocated
assert(item->prev && item->next);
+ // realloc_base requires at least the move-up routine
+ // (even though some requests succeed without move)
+ assert(head->move_up);
+
// try to change current block size without copying
size_change = size + item->base - item->limit;
#ifndef TRY_BLOCKER
#ifdef TRY_BLOCKER
// see if blocker fits in gap
- if (blocker_size <= gap) {
- // if moveable realloc is used, then user must provide move_up routine
- assert(head->move_up);
+ if (blocker_size <= gap && head->move) {
+ // move blocker up into gap
head->move_up(blocker, q->base);
blocker->base += q->base - blocker->limit;
blocker->limit = q->base;
}
#endif
- // if gap is too large, move block before gap up into gap
+ // see if gap can be left alone
avail -= gap;
if (avail < 0) {
+ // move block before gap up into gap
+ if (head->move == NULL)
+ return false;
+
avail += gap;
- // if moveable realloc is used, then user must provide move_up routine
- assert(head->move_up);
head->move_up(p, q->base);
p->base += q->base - p->limit;
p->limit = q->base;
assert(p != &head->item);
}
-#ifdef TRY_REALLOC
+#ifdef MOVEABLE_POOL
// optional step: try an ordinary realloc in rest of list
// this is because if the pool consists of a tightly packed region
// preceded by a mainly free region, it's best to move us to there
avail_save = avail;
+#endif
// last-fit algorithm, including and before our own list entry,
// the first iteration (ours) is different because p != q->prev,
break;
avail -= gap;
} while (avail >= 0);
-
+#ifdef MOVEABLE_POOL
+ if (head->move == NULL)
+ return false;
+
avail = avail_save;
q = item->next;
-#endif
- // calculate gap that would be left if we didn't move ourself,
- // if too large, move block before gap (ourself) up into gap
+ // calculate gap that would be left if we didn't move ourself
gap = q->base - item->limit;
+
+ // see if gap can be left alone
avail -= gap;
if (avail < 0) {
+ // move block before gap (ourself) up into gap
avail += gap;
- // if moveable realloc is used, then user must provide move_up routine
- assert(head->move_up);
head->move_up(item, q->base);
item->base += q->base - item->limit;
item->limit = q->base;
-#ifndef TRY_REALLOC
// if didn't do ordinary realloc above, see if we now fit
- if (item->limit - size >= item->prev->limit)
- goto resize;
-#endif
+ //if (item->limit - size >= item->prev->limit)
+ // goto resize;
}
// at this point we will not move our own block anymore (because
assert(q != &head->item);
}
+#else
+ return false;
+#endif
found:
- // if moveable realloc is used, then user must provide move_up routine
- // (when called from here, it is always non-overlapping or upwards)
+ // move ourself into found spot, always non-overlapping or downwards
assert(head->move_up);
head->move_up(item, q->base);
//item->base += q->base - item->limit;
q->prev = item;
resize:
+ // resize in place (or do the commented code above if moved ourself)
item->base = item->limit - size;
// keep track of total allocation
check_invariants(head);
return true;
}
-#endif
void pool_free(struct pool_head *head, struct pool_item *item) {
// item must be already allocated
#endif
#if !defined(INODE_SWAP) && !defined(PREALLOCATE_SWAP)
rassert(
- swap_table_realloc(&victim->swap_item, victim_swap_blocks + blocks)
+ pool_realloc(&swap_table, &victim->swap_item, victim_swap_blocks + blocks)
);
#endif
goto loop_entry;
// add to swap pool
victim_swap_blocks = 0;
#if !defined(INODE_SWAP) && !defined(PREALLOCATE_SWAP)
- rassert(swap_table_alloc(&victim->swap_item, blocks));
+ rassert(pool_alloc(&swap_table, &victim->swap_item, blocks));
#endif
// remove from LRU list
#ifndef PREALLOCATE_CORE
// no, reduce core allocation
rassert(
- core_table_realloc_base(&victim->core_item, victim_core_blocks)
+ pool_realloc_base(&core_table, &victim->core_item, victim_core_blocks)
);
#endif
#ifndef PREALLOCATE_CORE
// remove from core pool
- core_table_free(&victim->core_item);
+ pool_free(&core_table, &victim->core_item);
#endif
victim = NULL;
if (
process_avail < blocks
#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
- || !swap_table_alloc(&process->swap_item, blocks)
+ || !pool_alloc(&swap_table, &process->swap_item, blocks)
#endif
)
return false;
// allocate core and possible swap
#ifdef MOVEABLE_CORE
- rassert(core_table_alloc(&process->core_item, blocks));
+ rassert(pool_alloc(&core_table, &process->core_item, blocks));
#else
- if (!core_table_alloc(&process->core_item, blocks)) {
+ if (!pool_alloc(&core_table, &process->core_item, blocks)) {
#ifdef INODE_SWAP
//printf("deref inode %d\n", process->swap_inode->c_num);
i_deref(process->swap_inode);
#elif defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
- swap_table_free(&process->swap_item);
+ pool_free(&swap_table, &process->swap_item);
#endif
return false;
}
#endif
#if defined(PREALLOCATE_SWAP) && defined(MOVEABLE_SWAP)
- rassert(swap_table_alloc(&process->swap_item, blocks));
+ rassert(pool_alloc(&swap_table, &process->swap_item, blocks));
#endif
#ifdef INDIRECT_CORE
#endif
#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
- swap_table_free(&process->swap_item);
- if (!swap_table_alloc(&process->swap_item, blocks)) {
- rassert(swap_table_alloc(&process->swap_item, old_blocks));
+ pool_free(&swap_table, &process->swap_item);
+ if (!pool_alloc(&swap_table, &process->swap_item, blocks)) {
+ rassert(pool_alloc(&swap_table, &process->swap_item, old_blocks));
return false;
}
#endif
// reallocate core and possible swap
#ifdef MOVEABLE_CORE
- rassert(core_table_realloc(&process->core_item, blocks));
+ rassert(pool_realloc(&core_table, &process->core_item, blocks));
#else
- if (!core_table_realloc(&process->core_item, blocks)) {
+ if (!pool_realloc(&core_table, &process->core_item, blocks)) {
#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
- swap_table_free(&process->swap_item);
- rassert(swap_table_alloc(&process->swap_item, old_blocks));
+ pool_free(&swap_table, &process->swap_item);
+ rassert(pool_alloc(&swap_table, &process->swap_item, old_blocks));
#endif
return false;
}
#endif
#if defined(PREALLOCATE_SWAP) && defined(MOVEABLE_SWAP)
- swap_table_free(&process->swap_item);
- rassert(swap_table_alloc(&process->swap_item, blocks));
+ pool_free(&swap_table, &process->swap_item);
+ rassert(pool_alloc(&swap_table, &process->swap_item, blocks));
#endif
#ifdef INDIRECT_CORE
// add to core pool
process_core_blocks = 0;
#ifndef PREALLOCATE_CORE
- rassert(core_table_alloc(&process->core_item, blocks));
+ rassert(pool_alloc(&core_table, &process->core_item, blocks));
#endif
goto loop_entry_full;
}
udata.u_offset = (long)process_swap_blocks << BLOCK_SHIFT;
f_trunc(process->swap_inode);
#else
- rassert(swap_table_realloc(&process->swap_item, process_swap_blocks));
+ rassert(pool_realloc(&swap_table, &process->swap_item, process_swap_blocks));
#endif
loop_entry_partial:
// increase core allocation
#ifndef PREALLOCATE_CORE
rassert(
- core_table_realloc_base(
+ pool_realloc_base(&core_table,
&process->core_item,
process_core_blocks + blocks
)
f_trunc(process->swap_inode);
#elif !defined(PREALLOCATE_SWAP)
// remove from swap pool
- swap_table_free(&process->swap_item);
+ pool_free(&swap_table, &process->swap_item);
#endif
}
);
#endif
#ifndef PREALLOCATE_CORE
- core_table_free(&process->core_item);
+ pool_free(&core_table, &process->core_item);
#endif
}
else if (process == victim) {
swap_block_free(swap_table_mem + swap_base, victim_swap_blocks);
#endif
#if !defined(INODE_SWAP) && !defined(PREALLOCATE_SWAP)
- swap_table_free(&victim->swap_item);
+ pool_free(&swap_table, &victim->swap_item);
#endif
#ifdef INDIRECT_CORE
#ifdef PREALLOCATE_CORE
core_block_free(core_table_mem + core_base, victim_core_blocks);
#endif
#ifndef PREALLOCATE_CORE
- core_table_free(&victim->core_item);
+ pool_free(&core_table, &victim->core_item);
#endif
victim = NULL;
}
);
#endif
#if !defined(INODE_SWAP) && !defined(PREALLOCATE_SWAP)
- swap_table_free(&process->swap_item);
+ pool_free(&swap_table, &process->swap_item);
#endif
}
// free the per-process table entries
#ifdef PREALLOCATE_CORE
- core_table_free(&process->core_item);
+ pool_free(&core_table, &process->core_item);
#endif
#ifdef PREALLOCATE_SWAP
- swap_table_free(&process->swap_item);
+ pool_free(&swap_table, &process->swap_item);
#elif defined(INODE_SWAP)
//printf("deref inode %d\n", process->swap_inode->c_num);
i_deref(process->swap_inode);