Unify process_alloc() and process_realloc() with PROCESS_ALLOC_MODE_REALLOC bit
[moveable_pool.git] / process.c
1 #include <assert.h>
2 #include <stddef.h>
3 #include <stdio.h> // temporary
4 #include <stdlib.h>
5 #include "core.h"
6 #include "process.h"
7 #include "rassert.h"
8
9 #include "swap.h"
10
11 struct process *processes;
12 int n_processes;
13
14 int process_avail;
15
16 struct lru_item lru_head, *victim;
17
18 void process_init(int n, int spare) {
19   processes = calloc(n, sizeof(struct process));
20   rassert(processes);
21   n_processes = n;
22
23 #ifndef INDIRECT_CORE
24 #ifndef INDIRECT_SWAP
25   process_avail = core_table.avail + swap_table.avail - spare;
26 #else /* INDIRECT_SWAP */
27   process_avail = core_table.avail + swap_block_pool.avail - spare;
28 #endif /* INDIRECT_SWAP */
29 #else /* INDIRECT_CORE */
30 #ifndef INDIRECT_SWAP
31   process_avail = core_block_pool.avail + swap_table.avail - spare;
32 #else /* INDIRECT_SWAP */
33   process_avail = core_block_pool.avail + swap_block_pool.avail - spare;
34 #endif /* INDIRECT_SWAP */
35   if (process_avail > core_table.avail)
36     process_avail = core_table.avail;
37 #endif /* INDIRECT_CORE */
38   if (process_avail > swap_table.avail)
39     process_avail = swap_table.avail;
40  printf("process_avail %d\n", process_avail);
41
42   lru_head.prev = &lru_head;
43   lru_head.next = &lru_head;
44   victim = &lru_head;
45 }
46
47 static void do_swap_read_write(
48   struct process *process,
49   struct process_calc *calc,
50   int blocks,
51   bool dir
52 ) {
53 #ifndef INDIRECT_CORE
54 #ifndef INDIRECT_SWAP
55   swap_read_write(
56     calc->core_origin + calc->in_core_block,
57     calc->swap_origin + calc->in_core_block,
58     blocks,
59     dir
60   );
61 #else /* INDIRECT_SWAP */
62   int core_block, swap_block, i;
63
64   core_block = calc->core_origin + calc->in_core_block;
65   swap_block = calc->swap_origin + calc->in_core_block;
66  printf("blocks");
67   for (i = 0; i < blocks; ++i)
68  {
69   printf(" %s%d", dir ? "->" : "<-", swap_table_mem[swap_block + i]);
70     swap_read_write(
71       core_block + i,
72       swap_table_mem[swap_block + i],
73       1,
74       dir
75     );
76  }
77  printf("\n");
78 #endif /* INDIRECT_SWAP */
79 #else /* INDIRECT_CORE */
80   int core_block, swap_block, i;
81
82   core_block = calc->core_origin + calc->in_core_block;
83   swap_block = calc->swap_origin + calc->in_core_block;
84  printf("blocks");
85   for (i = 0; i < blocks; ++i)
86  {
87 #ifndef INDIRECT_SWAP
88   printf(" %d%s", core_table_mem[core_block + i], dir ? "->" : "<-");
89 #else /* INDIRECT_SWAP */
90   printf(" %d%s%d", core_table_mem[core_block + i], dir ? "->" : "<-", swap_table_mem[swap_block + i]);
91 #endif
92     swap_read_write(
93       core_table_mem[core_block + i],
94 #ifndef INDIRECT_SWAP
95       swap_block + i,
96 #else /* INDIRECT_SWAP */
97       swap_table_mem[swap_block + i],
98 #endif
99       1,
100       dir
101     );
102  }
103  printf("\n");
104 #endif /* INDIRECT_CORE */
105 }
106
107 // note: swap_out argument can be negative, indicates a no-op
108 // note: not allowed to move any core, or caller must refresh
109 static bool do_swap_out(int swap_out) {
110   struct process *process;
111   struct process_calc calc;
112   int blocks;
113
114   for (; swap_out > 0; swap_out -= blocks) {
115 #ifdef INDIRECT_SWAP
116     if (swap_block_pool.avail == 0)
117       return false;
118 #endif /* INDIRECT_SWAP */
119
120     assert(victim->prev != &lru_head);
121     process = (struct process *)(
122       (char *)victim->prev - offsetof(struct process, lru_item)
123     );
124     process_calc(process, &calc);
125
126     // calculate amount to swap out
127     blocks = calc.in_core_block - calc.block_base;
128 #ifdef INDIRECT_SWAP
129     if (blocks > swap_block_pool.avail)
130       blocks = swap_block_pool.avail;
131 #endif /* INDIRECT_SWAP */
132     if (blocks > swap_out)
133       blocks = swap_out;
134  printf("victim %d, swap out %d of %d\n", (int)(process - processes), blocks, calc.in_core_block - calc.block_base);
135
136     // adjust swap pointer
137     calc.in_core_block -= blocks;
138 #ifdef INDIRECT_CORE
139     process->in_core_block = calc.in_core_block;
140 #endif
141
142     // transfer data to swap
143 #ifdef INDIRECT_SWAP
144     rassert(
145       block_pool_alloc(
146         &swap_block_pool,
147         swap_table_mem + calc.swap_origin + calc.in_core_block,
148         blocks
149       )
150     );
151 #endif /* INDIRECT_SWAP */
152  printf("write core [%d,%d) to swap [%d,%d)\n", calc.core_origin + calc.in_core_block, calc.core_origin + calc.in_core_block + blocks, calc.swap_origin + calc.in_core_block, calc.swap_origin + calc.in_core_block + blocks);
153     do_swap_read_write(process, &calc, blocks, true);
154 #ifdef INDIRECT_CORE
155     block_pool_free(
156       &core_block_pool,
157       core_table_mem + calc.core_origin + calc.in_core_block,
158       blocks
159     );
160 #endif /* INDIRECT_CORE */
161
162     // see if victim fully swapped out
163     if (calc.in_core_block > calc.block_base) {
164 #ifndef INDIRECT_CORE
165       // no, reduce core allocation
166       rassert(
167         pool_alloc(
168           &core_table,
169           &process->core_item,
170           calc.in_core_block - calc.block_base,
171           POOL_ALLOC_MODE_MOVEABLE | POOL_ALLOC_MODE_REALLOC,
172           0
173         )
174       );
175 #endif /* ! INDIRECT_CORE */
176
177       // in this case there can't be any further victim to swap
178       // report whether aborted due to swap space or user request
179       return swap_out == blocks;
180     }
181  printf("victimized %d\n", (int)(process - processes));
182
183 #ifndef INDIRECT_CORE
184     // remove from core pool
185     pool_free(&core_table, &process->core_item);
186     process->flags &= ~PROCESS_FLAGS_CORE_ITEM;
187 #endif /* ! INDIRECT_CORE */
188     victim = &process->lru_item;
189   }
190   return true;
191 }
192
193 static int calc_blocks(int para_base, int para_limit) {
194   return
195     ((para_limit + (BLOCK_PARAS - 1)) >> BLOCK_PARAS_SHIFT) -
196       (para_base >> BLOCK_PARAS_SHIFT);
197 }
198
199 bool process_alloc(
200   struct process *process,
201   int para_base,
202   int para_limit,
203   int mode
204 ) {
205   int block_base, block_limit, blocks, blocks_change;
206   struct process_calc calc;
207   int old_blocks;
208
209   // check blocks
210   block_base = para_base >> BLOCK_PARAS_SHIFT;
211   block_limit = (para_limit + (BLOCK_PARAS - 1)) >> BLOCK_PARAS_SHIFT;
212   blocks = block_limit - block_base;
213
214   // check mode
215   blocks_change = blocks;
216   if ((mode & PROCESS_ALLOC_MODE_REALLOC) == 0) {
217     // alloc, must not be already allocated
218     assert((process->flags & PROCESS_FLAGS_ACTIVE) == 0);
219
220     // set up dummy calculations
221     calc.block_base = block_base;
222     calc.block_limit = block_base;
223     calc.core_origin = 0;
224   }
225   else {
226     // realloc, must be already allocated
227     assert(process->flags & PROCESS_FLAGS_ACTIVE);
228
229     // for now, not allowed to resize both ends at once
230     // (in case we free physical blocks then fail to resize)
231     assert(
232       para_base == process->para_base ||
233         para_limit == process->para_limit
234     );
235
236     // must be fully in core
237     process_calc(process, &calc);
238     assert(calc.in_core_block == calc.block_limit);
239 #ifndef INDIRECT_CORE
240     assert(process->flags & PROCESS_FLAGS_CORE_ITEM);
241 #endif /* ! INDIRECT_CORE */
242
243     old_blocks = calc.block_limit - calc.block_base;
244     blocks_change -= old_blocks;
245   }
246   if (process_avail < blocks_change)
247     return false;
248
249   // reallocate swap
250   if (
251     !pool_alloc(
252       &swap_table,
253       &process->swap_item,
254       blocks,
255       mode,
256       0 //calc.block_base - block_base
257     )
258   )
259     return false;
260
261 #ifndef INDIRECT_CORE
262   // free up as much core as we need to
263   rassert(do_swap_out(blocks_change - core_table.avail));
264
265   // reallocate core
266   rassert(
267     pool_alloc(
268       &core_table,
269       &process->core_item,
270       blocks,
271       mode | POOL_ALLOC_MODE_MOVEABLE,
272       calc.block_base - block_base
273     )
274   );
275 #else /* INDIRECT_CORE */
276   // discard physical blocks as needed
277   // note: base pointer is garbage in the alloc case
278   block_pool_free(
279     &core_block_pool,
280     core_table_mem + calc.core_origin + calc.block_base,
281     block_base - calc.block_base
282   );
283   block_pool_free(
284     &core_block_pool,
285     core_table_mem + calc.core_origin + block_limit,
286     calc.block_limit - block_limit
287   );
288
289   // reallocate core
290   if (
291     !pool_alloc(
292       &core_table,
293       &process->core_item,
294       blocks,
295       mode,
296       calc.block_base - block_base
297     )
298   ) {
299     // note: if we get to here we are resizing larger,
300     // so we can't have freed any physical blocks above
301     if ((mode & PROCESS_ALLOC_MODE_REALLOC) == 0)
302       pool_free(&swap_table, &process->swap_item);
303     else
304       rassert(
305         pool_alloc(
306           &swap_table,
307           &process->swap_item,
308           old_blocks,
309           mode,
310           0 //calc.block_base - block_base
311         )
312       );
313     return false;
314   }
315   calc.core_origin = process->core_item.base - block_base;
316
317   // free up as much core as we need to
318   rassert(do_swap_out(blocks_change - core_block_pool.avail));
319
320   // populate physical blocks as needed
321   rassert(
322     block_pool_alloc(
323       &core_block_pool,
324       core_table_mem + calc.core_origin + block_base,
325       calc.block_base - block_base
326     ) &&
327     block_pool_alloc(
328       &core_block_pool,
329       core_table_mem + calc.core_origin + calc.block_limit,
330       block_limit - calc.block_limit
331     )
332   );
333 #endif /* INDIRECT_CORE */
334
335   // for alloc, perform remaining initialization
336   if ((process->flags & PROCESS_ALLOC_MODE_REALLOC) == 0) {
337     // insert at head of LRU list
338     process->lru_item.prev = &lru_head;
339     process->lru_item.next = lru_head.next;
340     process->lru_item.prev->next = &process->lru_item;
341     process->lru_item.next->prev = &process->lru_item;
342
343 #ifndef INDIRECT_CORE
344     process->flags = PROCESS_FLAGS_ACTIVE | PROCESS_FLAGS_CORE_ITEM;
345 #else /* INDIRECT_CORE */
346     process->flags = PROCESS_FLAGS_ACTIVE;
347 #endif /* INDIRECT_CORE */
348   } 
349
350   // track total allocation
351   process->para_base = para_base;
352   process->para_limit = para_limit;
353 #ifdef INDIRECT_CORE
354   process->in_core_block = block_limit;
355 #endif
356   process_avail -= blocks_change;
357   return true;
358 }
359
360 void process_run(struct process *process) {
361   struct process_calc calc;
362   int swap_in, blocks;
363
364   // must be already allocated
365   assert(process->flags & PROCESS_FLAGS_ACTIVE);
366
367   // remove from LRU list
368   if (victim == &process->lru_item)
369     victim = process->lru_item.next;
370   process->lru_item.prev->next = process->lru_item.next;
371   process->lru_item.next->prev = process->lru_item.prev;
372
373   // loop to swap out then swap in
374   process_calc(process, &calc);
375   for (
376     swap_in = calc.block_limit - calc.in_core_block;
377 #ifndef INDIRECT_CORE
378     (process->flags & PROCESS_FLAGS_CORE_ITEM) == 0 || swap_in > 0;
379 #else
380     swap_in > 0;
381 #endif
382     swap_in -= blocks
383   ) {
384     // free up as much core as we can
385 #ifndef INDIRECT_CORE
386     do_swap_out(swap_in - core_table.avail);
387     blocks = core_table.avail;
388 #else /* INDIRECT_CORE */
389     do_swap_out(swap_in - core_block_pool.avail);
390     blocks = core_block_pool.avail;
391 #endif /* INDIRECT_CORE */
392     if (blocks > swap_in)
393       blocks = swap_in;
394
395 #ifndef INDIRECT_CORE
396     // increase core allocation
397     rassert(
398       pool_alloc(
399         &core_table,
400         &process->core_item,
401         calc.in_core_block - calc.block_base + blocks,
402         process->flags & PROCESS_FLAGS_CORE_ITEM ?
403           POOL_ALLOC_MODE_MOVEABLE | POOL_ALLOC_MODE_REALLOC :
404           POOL_ALLOC_MODE_MOVEABLE,
405         0
406       )
407     );
408     process->flags |= PROCESS_FLAGS_CORE_ITEM;
409     calc.core_origin = process->core_item.base - calc.block_base;
410 #endif /* ! INDIRECT_CORE */
411
412     // transfer data to core
413 #ifdef INDIRECT_CORE
414     rassert(
415       block_pool_alloc(
416         &core_block_pool,
417         core_table_mem + calc.core_origin + calc.in_core_block,
418         blocks
419       )
420     );
421 #endif /* INDIRECT_CORE */
422  printf("read swap [%d,%d) to core [%d,%d)\n", calc.swap_origin + calc.in_core_block, calc.swap_origin + calc.in_core_block + blocks, calc.core_origin + calc.in_core_block, calc.core_origin + calc.in_core_block + blocks);
423     do_swap_read_write(process, &calc, blocks, false);
424 #ifdef INDIRECT_SWAP
425     block_pool_free(
426       &swap_block_pool,
427       swap_table_mem + calc.swap_origin + calc.in_core_block,
428       blocks
429     );
430 #endif /* INDIRECT_SWAP */
431
432     // adjust swap pointer
433     calc.in_core_block += blocks;
434 #ifdef INDIRECT_CORE
435     process->in_core_block = calc.in_core_block;
436 #endif /* INDIRECT_CORE */
437   }
438
439   // insert at head of LRU list
440   process->lru_item.prev = &lru_head;
441   process->lru_item.next = lru_head.next;
442   process->lru_item.prev->next = &process->lru_item;
443   process->lru_item.next->prev = &process->lru_item;
444 }
445
446 void process_free(struct process *process) {
447 #ifdef INDIRECT_SWAP
448   int block, swap_in;
449 #endif /* INDIRECT_SWAP */
450  
451   // must be already allocated
452   assert(process->flags & PROCESS_FLAGS_ACTIVE);
453
454   // remove from LRU list
455   if (victim == &process->lru_item)
456     victim = process->lru_item.next;
457   process->lru_item.prev->next = process->lru_item.next;
458   process->lru_item.next->prev = process->lru_item.prev;
459
460 #ifdef INDIRECT_SWAP
461   // calculate blocks before freeing anything
462 #ifndef INDIRECT_CORE
463   block =
464     process->flags & PROCESS_FLAGS_CORE_ITEM ?
465       process->core_item.limit - process->core_item.base :
466       0;
467 #else /* INDIRECT_CORE */
468   block =
469     process->in_core_block - (process->para_base >> BLOCK_PARAS_SHIFT);
470 #endif /* INDIRECT_CORE */
471   swap_in =
472     process->swap_item.limit - process->swap_item.base - block;
473 #endif /* INDIRECT_SWAP */
474
475   // remove from core pool
476 #ifndef INDIRECT_CORE
477   if (process->flags & PROCESS_FLAGS_CORE_ITEM)
478     pool_free(&core_table, &process->core_item);
479 #else /* INDIRECT_CORE */
480   block_pool_free(
481     &core_block_pool,
482     core_table_mem + process->core_item.base,
483 #ifdef INDIRECT_SWAP
484     block
485 #else /* ! INDIRECT_SWAP */
486     process->in_core_block - (process->para_base >> BLOCK_PARAS_SHIFT)
487 #endif /* ! INDIRECT_SWAP */
488   );
489   pool_free(&core_table, &process->core_item);
490 #endif /* INDIRECT_CORE */
491
492   // remove from swap pool
493 #ifdef INDIRECT_SWAP
494   block_pool_free(
495     &swap_block_pool,
496     swap_table_mem + process->swap_item.base + block,
497     swap_in
498   );
499 #endif /* INDIRECT_SWAP */
500   pool_free(&swap_table, &process->swap_item);
501
502   // track total allocation
503   process_avail += calc_blocks(process->para_base, process->para_limit);
504   process->flags = 0;
505 }
506
507 void process_calc(struct process *process, struct process_calc *calc) {
508   calc->block_base =
509     process->para_base >> BLOCK_PARAS_SHIFT;
510   calc->block_limit =
511     (process->para_limit + (BLOCK_PARAS - 1)) >> BLOCK_PARAS_SHIFT;
512 #ifdef INDIRECT_CORE
513   assert(
514     process->core_item.limit - process->core_item.base ==
515       calc->block_limit - calc->block_base
516   );
517 #endif /* INDIRECT_CORE */
518   // note: following can fail during realloc because swap reallocated first:
519   //assert(
520   //  process->swap_item.limit - process->swap_item.base ==
521   //    calc->block_limit - calc->block_base
522   //);
523
524 #ifndef INDIRECT_CORE
525   calc->core_origin = (
526     process->flags & PROCESS_FLAGS_CORE_ITEM ? process->core_item.base : 0
527   ) - calc->block_base;
528 #else /* INDIRECT_CORE */
529   calc->core_origin = process->core_item.base - calc->block_base;
530   assert(calc->core_origin == process->core_item.limit - calc->block_limit);
531 #endif
532   // note: following can fail during realloc because swap reallocated first:
533   calc->swap_origin = process->swap_item.base - calc->block_base;
534   //assert(calc->swap_origin == process->swap_item.limit - calc->block_limit);
535
536 #ifndef INDIRECT_CORE
537   calc->in_core_block = (
538     process->flags & PROCESS_FLAGS_CORE_ITEM ? process->core_item.limit : 0
539   ) - calc->core_origin;
540  //printf("para [%d,%d) in_core_blocks %d block [%d,%d) core_origin %d swap_origin %d in_core_block %d\n", process->para_base, process->para_limit, process->flags & PROCESS_FLAGS_CORE_ITEM ? process->core_item.limit - process->core_item.base : 0, calc->block_base, calc->block_limit, calc->core_origin, calc->swap_origin, calc->in_core_block);
541 #else
542   calc->in_core_block = process->in_core_block;
543  //printf("para [%d,%d) in_core_blocks %d block [%d,%d) core_origin %d swap_origin %d in_core_block %d\n", process->para_base, process->para_limit, process->in_core_blocks, calc->block_base, calc->block_limit, calc->core_origin, calc->swap_origin, calc->in_core_block);
544 #endif
545 }