(para_base >> BLOCK_PARAS_SHIFT);
}
-bool process_alloc(struct process *process, int para_base, int para_limit) {
- int blocks;
-
- // must not be already allocated
- assert((process->flags & PROCESS_FLAGS_ACTIVE) == 0);
-
- // check blocks
- blocks = calc_blocks(para_base, para_limit);
- if (
- process_avail < blocks ||
- !pool_alloc(&swap_table, &process->swap_item, blocks, 0)
- )
- return false;
-
-#ifndef INDIRECT_CORE
- // free up as much core as we need to
- rassert(do_swap_out(blocks - core_table.avail));
-
- // allocate core and possible swap
- rassert(
- pool_alloc(
- &core_table,
- &process->core_item,
- blocks,
- POOL_ALLOC_MODE_MOVEABLE
- )
- );
-#else /* INDIRECT_CORE */
- // allocate core and possible swap
- if (!pool_alloc(&core_table, &process->core_item, blocks, 0)) {
- pool_free(&swap_table, &process->swap_item);
- return false;
- }
-
- // free up as much core as we need to
- rassert(do_swap_out(blocks - core_block_pool.avail));
-
- // populate physical blocks as needed
- rassert(
- block_pool_alloc(
- &core_block_pool,
- core_table_mem + process->core_item.base,
- blocks
- )
- );
-#endif /* INDIRECT_CORE */
-
- // insert at head of LRU list
- process->lru_item.prev = &lru_head;
- process->lru_item.next = lru_head.next;
- process->lru_item.prev->next = &process->lru_item;
- process->lru_item.next->prev = &process->lru_item;
-
- // track total allocation
-#ifndef INDIRECT_CORE
- process->flags = PROCESS_FLAGS_ACTIVE | PROCESS_FLAGS_CORE_ITEM;
-#else /* INDIRECT_CORE */
- process->flags = PROCESS_FLAGS_ACTIVE;
-#endif /* INDIRECT_CORE */
- process->para_base = para_base;
- process->para_limit = para_limit;
-#ifdef INDIRECT_CORE
- process->in_core_block =
- (para_limit + (BLOCK_PARAS - 1)) >> BLOCK_PARAS_SHIFT;
-#endif
- process_avail -= blocks;
- return true;
-}
-
-bool process_realloc(
+bool process_alloc(
struct process *process,
int para_base,
int para_limit,
- bool dir
+ int mode
) {
+ int block_base, block_limit, blocks, blocks_change;
struct process_calc calc;
- int old_blocks, block_base, block_limit, blocks, blocks_change;
-
- // must be already allocated
- assert(process->flags & PROCESS_FLAGS_ACTIVE);
-
- // must be fully in core
-#ifndef INDIRECT_CORE
- assert(process->flags & PROCESS_FLAGS_CORE_ITEM);
-#endif /* ! INDIRECT_CORE */
- process_calc(process, &calc);
- assert(calc.in_core_block == calc.block_limit);
-
- // for now, not allowed to resize both ends at once
- // (in case we free physical blocks then fail to resize)
- assert(
- para_base == process->para_base ||
- para_limit == process->para_limit
- );
+ int old_blocks;
// check blocks
- old_blocks = calc.block_limit - calc.block_base;
block_base = para_base >> BLOCK_PARAS_SHIFT;
block_limit = (para_limit + (BLOCK_PARAS - 1)) >> BLOCK_PARAS_SHIFT;
blocks = block_limit - block_base;
- blocks_change = blocks - old_blocks;
+
+ // check mode
+ blocks_change = blocks;
+ if ((mode & PROCESS_ALLOC_MODE_REALLOC) == 0) {
+ // alloc, must not be already allocated
+ assert((process->flags & PROCESS_FLAGS_ACTIVE) == 0);
+
+ // set up dummy calculations
+ calc.block_base = block_base;
+ calc.block_limit = block_base;
+ calc.core_origin = 0;
+ }
+ else {
+ // realloc, must be already allocated
+ assert(process->flags & PROCESS_FLAGS_ACTIVE);
+
+ // for now, not allowed to resize both ends at once
+ // (in case we free physical blocks then fail to resize)
+ assert(
+ para_base == process->para_base ||
+ para_limit == process->para_limit
+ );
+
+ // must be fully in core
+ process_calc(process, &calc);
+ assert(calc.in_core_block == calc.block_limit);
+#ifndef INDIRECT_CORE
+ assert(process->flags & PROCESS_FLAGS_CORE_ITEM);
+#endif /* ! INDIRECT_CORE */
+
+ old_blocks = calc.block_limit - calc.block_base;
+ blocks_change -= old_blocks;
+ }
if (process_avail < blocks_change)
return false;
&swap_table,
&process->swap_item,
blocks,
- dir | POOL_ALLOC_MODE_REALLOC,
+ mode,
0 //calc.block_base - block_base
)
)
&core_table,
&process->core_item,
blocks,
- dir | (POOL_ALLOC_MODE_MOVEABLE | POOL_ALLOC_MODE_REALLOC),
+ mode | POOL_ALLOC_MODE_MOVEABLE,
calc.block_base - block_base
)
);
#else /* INDIRECT_CORE */
// discard physical blocks as needed
+ // note: base pointer is garbage in the alloc case
block_pool_free(
&core_block_pool,
core_table_mem + calc.core_origin + calc.block_base,
&core_table,
&process->core_item,
blocks,
- dir | POOL_ALLOC_MODE_REALLOC,
+ mode,
calc.block_base - block_base
)
) {
// note: if we get to here we are resizing larger,
// so we can't have freed any physical blocks above
- rassert(
- pool_alloc(
- &swap_table,
- &process->swap_item,
- old_blocks,
- dir | POOL_ALLOC_MODE_REALLOC,
- 0 //calc.block_base - block_base
- )
- );
+ if ((mode & PROCESS_ALLOC_MODE_REALLOC) == 0)
+ pool_free(&swap_table, &process->swap_item);
+ else
+ rassert(
+ pool_alloc(
+ &swap_table,
+ &process->swap_item,
+ old_blocks,
+ mode,
+ 0 //calc.block_base - block_base
+ )
+ );
return false;
}
calc.core_origin = process->core_item.base - block_base;
);
#endif /* INDIRECT_CORE */
+ // for alloc, perform remaining initialization
+ if ((process->flags & PROCESS_ALLOC_MODE_REALLOC) == 0) {
+ // insert at head of LRU list
+ process->lru_item.prev = &lru_head;
+ process->lru_item.next = lru_head.next;
+ process->lru_item.prev->next = &process->lru_item;
+ process->lru_item.next->prev = &process->lru_item;
+
+#ifndef INDIRECT_CORE
+ process->flags = PROCESS_FLAGS_ACTIVE | PROCESS_FLAGS_CORE_ITEM;
+#else /* INDIRECT_CORE */
+ process->flags = PROCESS_FLAGS_ACTIVE;
+#endif /* INDIRECT_CORE */
+ }
+
// track total allocation
process->para_base = para_base;
process->para_limit = para_limit;