// increase swap allocation
victim_swap_size += size;
#ifndef PREALLOCATE_SWAP
- rassert(
- pool_realloc_moveable(&swap_head, &victim_swap_item, victim_swap_size)
- );
+ rassert(swap_realloc(&victim_swap_item, victim_swap_size));
#endif
goto loop_entry;
}
// add to swap pool, using dedicated swap item
victim_swap_size = size;
#ifndef PREALLOCATE_SWAP
- rassert(
- pool_alloc_moveable(&swap_head, &victim_swap_item, victim_swap_size)
- );
+ rassert(swap_alloc(&victim_swap_item, victim_swap_size));
#endif
loop_entry:
if (victim_core_size) {
#ifndef PREALLOCATE_CORE
// no, reduce core allocation, using dedicated core item
- rassert(
- pool_realloc_moveable(&core_head, &victim_core_item, victim_core_size)
- );
+ rassert(core_realloc(&victim_core_item, victim_core_size));
#endif
// as an optimization, skip the calculation of swap_out -= size
printf("victimized %d\n", (int)(victim - processes));
#ifndef PREALLOCATE_CORE
// remove from core pool, using dedicated core item
- pool_free(&core_head, &victim_core_item);
+ core_free(&victim_core_item);
#endif
#ifndef PREALLOCATE_SWAP
// must not be already allocated
assert(process->size == -1);
- // check size, disregarding fragmentation
- if (process_avail < size)
+ // check size
+ if (
+ process_avail < size
+#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
+ || !swap_alloc(&process->swap_item, size)
+#endif
+ )
return false;
+ printf("%d %d %d\n", process_avail, core_head.avail, swap_head.avail);
// free up as much core as we need to
swap_out = size - core_head.avail;
#endif
do_swap_out(swap_out);
- // allocate core, can't fail
+ // allocate core and possible swap
+#ifdef MOVEABLE_CORE
+ rassert(
+ core_alloc(
#ifdef PREALLOCATE_CORE
- rassert(pool_alloc_moveable(&core_head, &process->core_item, size));
+ &process->core_item,
#else
- rassert(pool_alloc_moveable(&core_head, &process->pool_item, size));
+ &process->pool_item,
#endif
-#ifdef PREALLOCATE_SWAP
- rassert(pool_alloc_moveable(&swap_head, &process->swap_item, size));
+ size
+ )
+ );
+#else
+ if (
+ !core_alloc(
+#ifdef PREALLOCATE_CORE
+ &process->core_item,
+#else
+ &process->pool_item,
+#endif
+ size
+ )
+ ) {
+#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
+ swap_free(&process->swap_item);
+#endif
+ return false;
+ }
+#endif
+#if defined(PREALLOCATE_SWAP) && defined(MOVEABLE_SWAP)
+ rassert(swap_alloc(&process->swap_item, size));
#endif
// insert at head of LRU list
// must be fully in core
assert(process->lru_item.prev != NULL);
- // check size, disregarding fragmentation
+ // check size
int size_change = size - process->size;
if (process_avail < size_change)
return false;
+#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
+ swap_free(&process->swap_item);
+ if (!swap_alloc(&process->swap_item, size)) {
+ rassert(swap_alloc(&process->swap_item, process->size));
+ return false;
+ }
+#endif
// free up as much core as we need to
swap_out = size_change - core_head.avail;
#endif
do_swap_out(swap_out);
- // reallocate core, can't fail
+ // reallocate core and possible swap
+ // allocate core and possible swap
+#ifdef MOVEABLE_CORE
+ rassert(
+ core_realloc(
#ifdef PREALLOCATE_CORE
- rassert(pool_realloc_moveable(&core_head, &process->core_item, size));
+ &process->core_item,
#else
- rassert(pool_realloc_moveable(&core_head, &process->pool_item, size));
+ &process->pool_item,
#endif
-#ifdef PREALLOCATE_SWAP
- rassert(pool_realloc_moveable(&swap_head, &process->swap_item, size));
+ size
+ )
+ );
+#else
+ if (
+ !core_realloc(
+#ifdef PREALLOCATE_CORE
+ &process->core_item,
+#else
+ &process->pool_item,
+#endif
+ size
+ )
+ ) {
+#if defined(PREALLOCATE_SWAP) && !defined(MOVEABLE_SWAP)
+ swap_free(&process->swap_item);
+ rassert(swap_alloc(&process->swap_item, process->size));
+#endif
+ return false;
+ }
+#endif
+#if defined(PREALLOCATE_SWAP) && defined(MOVEABLE_SWAP)
+ swap_free(&process->swap_item);
+ rassert(swap_alloc(&process->swap_item, size));
#endif
// track total allocation
// add to core pool, using dedicated core item
process_core_size = size;
#ifndef PREALLOCATE_CORE
- rassert(
- pool_alloc_moveable(&core_head, &process_core_item, process_core_size)
- );
+ rassert(core_alloc(&process_core_item, process_core_size));
#endif
goto loop_entry_full;
}
do {
#ifndef PREALLOCATE_SWAP
// reduce swap allocation
- rassert(
- pool_realloc_moveable(&swap_head, &process_swap_item, process_swap_size)
- );
+ rassert(swap_realloc(&process_swap_item, process_swap_size));
loop_entry_partial:
#endif
// increase core allocation
process_core_size += size;
#ifndef PREALLOCATE_CORE
- rassert(
- pool_realloc_moveable(&core_head, &process_core_item, process_core_size)
- );
+ rassert(core_realloc(&process_core_item, process_core_size));
#endif
loop_entry_full:
#ifndef PREALLOCATE_SWAP
// remove from swap pool, using dedicated swap item
- pool_free(&swap_head, &process_swap_item);
+ swap_free(&process_swap_item);
#endif
}
assert(process->size != -1);
#ifdef PREALLOCATE_CORE
- pool_free(&core_head, &process->core_item);
+ core_free(&process->core_item);
#endif
#ifdef PREALLOCATE_SWAP
- pool_free(&swap_head, &process->swap_item);
+ swap_free(&process->swap_item);
#endif
// see whether fully in core, victim, or fully in swap
process->lru_item.next->prev = process->lru_item.prev;
#ifndef PREALLOCATE_CORE
- pool_free(&core_head, &process->pool_item);
+ core_free(&process->pool_item);
#endif
}
else if (process == victim) {
#ifdef PREALLOCATE_CORE
victim_core_size = 0;
#else
- pool_free(&core_head, &victim_core_item);
+ core_free(&victim_core_item);
#endif
#ifdef PREALLOCATE_SWAP
victim_swap_size = 0;
#else
- pool_free(&swap_head, &victim_swap_item);
+ swap_free(&victim_swap_item);
#endif
}
#ifndef PREALLOCATE_SWAP
- else {
+ else
// fully in swap, remove from swap pool
- pool_free(&swap_head, &process->pool_item);
- }
+ swap_free(&process->pool_item);
#endif
// track total allocation