2 * (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
3 * See the copyright notice in the ACK home directory, in the file "Copyright".
6 static char rcsid[] = "$Id: memory.c,v 3.20 1994/06/24 10:34:53 ceriel Exp $";
10 * Memory manager. Memory is divided into NMEMS pieces. There is a struct
11 * for each piece telling where it is, how many bytes are used, and how may
12 * are left. If a request for core doesn't fit in the left bytes, an sbrk()
13 * is done and pieces after the one that requested the growth are moved up.
15 * Unfortunately, we cannot use sbrk to request more memory, because its
16 * result cannot be trusted. More specifically, it does not work properly
17 * on 2.9 BSD, and probably does not work properly on 2.8 BSD and V7 either.
18 * The problem is that "sbrk" adds the increment to the current "break"
19 * WITHOUT testing the carry bit. So, if your break is at 40000, and
20 * you "sbrk(30000)", it will succeed, but your break will be at 4464
33 static free_saved_moduls();
35 struct memory mems[NMEMS];
37 bool incore = TRUE; /* TRUE while everything can be kept in core. */
38 ind_t core_position = (ind_t)0; /* Index of current module. */
40 #define GRANULE 64 /* power of 2 */
50 incr = (incr + (GRANULE - 1)) & ~(GRANULE - 1);
53 if ((refused && refused < incr) ||
54 (sizeof(char *) < sizeof(long) &&
55 (inc != incr || BASE + inc < BASE)) ||
56 brk(BASE + incr) == -1) {
57 if (!refused || refused > incr)
66 * Initialize some pieces of core. We hope that this will be our last
67 * real allocation, meaning we've made the right choices.
72 register ind_t total_size;
73 register struct memory *mem;
77 #define ALIGN 8 /* minimum alignment for pieces */
78 #define AT_LEAST (ind_t)2*ALIGN /* See comment about string areas. */
80 total_size = (ind_t)0; /* Will accumulate the sizes. */
81 BASE = base = sbrk(0); /* First free. */
82 if ((int)base % ALIGN) {
83 base = sbrk(ALIGN - (int)base % ALIGN);
84 BASE = base = sbrk(0);
87 * String areas are special-cased. The first byte is unused as a way to
88 * distinguish a name without string from a name which has the first
89 * string in the string area.
91 for (mem = mems; mem < &mems[NMEMS]; mem++) {
93 mem->mem_full = (ind_t)0;
94 if (mem == &mems[ALLOLCHR] || mem == &mems[ALLOGCHR]) {
95 if (mem->mem_left == 0) {
96 mem->mem_left = ALIGN;
100 base += mem->mem_left;
101 total_size += mem->mem_left;
106 base += mem->mem_left; /* Each piece will start after prev. */
107 total_size += mem->mem_left;
111 if (sbreak(total_size) == -1) {
112 incore = FALSE; /* In core strategy failed. */
113 if (sbreak(AT_LEAST) == -1)
114 fatal("no core at all");
117 for (mem = mems; mem < &mems[NMEMS]; mem++) {
118 mem->mem_base = base;
119 if (mem == &mems[ALLOLCHR] || mem == &mems[ALLOGCHR]) {
121 mem->mem_left = ALIGN - 1;
125 mem->mem_full = (ind_t)0;
133 * Allocate an extra block of `incr' bytes and move all pieces with index
134 * higher than `piece' up with the size of the block.
135 * Move up as much as possible, if "incr" fails.
142 register struct memory *mem;
144 extern int statistics;
147 debug("move_up(%d, %d)\n", piece, (int)incr, 0, 0);
148 while (incr > 0 && sbreak(incr) == -1)
156 if (statistics) fprintf(stderr,"moving up %lx\n", (long) incr);
158 for (mem = &mems[NMEMS - 1]; mem > &mems[piece]; mem--)
161 mems[piece].mem_left += incr;
165 extern int passnumber;
168 * This routine is called if `piece' needs `incr' bytes and the system won't
169 * give them. We first steal the free bytes of all lower pieces and move them
170 * and `piece' down. If that doesn't give us enough bytes, we steal the free
171 * bytes of all higher pieces and move them up. We return whether we have
172 * enough bytes, the first or the second time.
175 compact(piece, incr, flag)
182 register ind_t gain, size;
183 register struct memory *mem;
184 int min = piece, max = piece;
185 #define SHIFT_COUNT 2 /* let pieces only contribute if their free
186 memory is more than 1/2**SHIFT_COUNT * 100 %
187 of its occupied memory
190 debug("compact(%d, %d, %d)\n", piece, (int)incr, flag, 0);
191 for (mem = &mems[0]; mem < &mems[NMEMS - 1]; mem++) {
192 assert(mem->mem_base + mem->mem_full + mem->mem_left == (mem+1)->mem_base);
196 if (flag == NORMAL) {
197 /* try and gain a bit more than needed */
198 gain = (mem->mem_full + incr) >> SHIFT_COUNT;
199 if (incr < gain) incr = gain;
203 * First, check that moving will result in enough space
205 if (flag != FREEZE) {
206 gain = mem->mem_left;
207 for (mem = &mems[piece-1]; mem >= &mems[0]; mem--) {
209 * Don't give it all away!
210 * If this does not give us enough, bad luck
215 size = mem->mem_full >> SHIFT_COUNT;
216 if (size == 0) size = mem->mem_left >> 1;
218 if (mem->mem_left >= size)
219 gain += (mem->mem_left - size) & ~(ALIGN - 1);
221 min = mem - &mems[0];
226 for (mem = &mems[piece+1]; mem <= &mems[NMEMS - 1]; mem++) {
228 * Don't give it all away!
229 * If this does not give us enough, bad luck
234 size = mem->mem_full >> SHIFT_COUNT;
235 if (size == 0) size = mem->mem_left >> 1;
237 if (mem->mem_left >= size)
238 gain += (mem->mem_left - size) & ~(ALIGN - 1);
240 max = mem - &mems[0];
246 if (max == piece) max = 0;
248 if (gain < incr) return 0;
256 for (mem = &mems[min]; mem != &mems[piece]; mem++) {
257 /* Here memory is inserted before a piece. */
258 assert(passnumber == FIRST || gain == (ind_t)0);
259 if (gain) copy_down(mem, gain);
260 if (flag == FREEZE || gain < incr) {
261 if (flag != NORMAL) size = 0;
263 size = mem->mem_full >> SHIFT_COUNT;
264 if (size == 0) size = mem->mem_left >> 1;
266 if (mem->mem_left >= size) {
267 size = (mem->mem_left - size) & ~(ALIGN - 1);
269 mem->mem_left -= size;
276 if (gain) copy_down(mem, gain);
277 gain += mem->mem_left;
281 register ind_t up = (ind_t)0;
283 for (mem = &mems[max]; mem > &mems[piece]; mem--) {
284 /* Here memory is appended after a piece. */
285 if (flag == FREEZE || gain + up < incr) {
286 if (flag != NORMAL) size = 0;
288 size = mem->mem_full >> SHIFT_COUNT;
289 if (size == 0) size = mem->mem_left >> 1;
291 if (mem->mem_left >= size) {
292 size = (mem->mem_left - size) & ~(ALIGN - 1);
294 mem->mem_left -= size;
297 if (up) copy_up(mem, up);
301 mems[piece].mem_left += gain;
302 assert(flag == FREEZE || gain >= incr);
303 for (mem = &mems[0]; mem < &mems[NMEMS - 1]; mem++) {
304 assert(mem->mem_base + mem->mem_full + mem->mem_left == (mem+1)->mem_base);
310 * The bytes of `mem' must be moved `dist' down in the address space.
311 * We copy the bytes from low to high, because the tail of the new area may
312 * overlap with the old area, but we do not want to overwrite them before they
317 register struct memory *mem;
324 size = mem->mem_full;
333 * The bytes of `mem' must be moved `dist' up in the address space.
334 * We copy the bytes from high to low, because the tail of the new area may
335 * overlap with the old area, but we do not want to overwrite them before they
340 register struct memory *mem;
347 size = mem->mem_full;
348 old = mem->mem_base + size;
355 static int alloctype = NORMAL;
358 * Add `size' bytes to the bytes already allocated for `piece'. If it has no
359 * free bytes left, ask them from memory or, if that fails, from the free
360 * bytes of other pieces. The offset of the new area is returned. No matter
361 * how many times the area is moved, because of another allocate, this offset
369 register ind_t incr = 0;
370 ind_t left = mems[piece].mem_left;
371 register ind_t full = mems[piece].mem_full;
373 assert(passnumber == FIRST || (!incore && piece == ALLOMODL));
376 if (size != (ind_t)size)
381 size = int_align(size);
385 incr = ((size - left + (INCRSIZE - 1)) / INCRSIZE) * INCRSIZE;
388 (incr < left + full && (incr -= move_up(piece, left + full)) <= 0) ||
389 move_up(piece, incr) == incr ||
390 compact(piece, size, alloctype)) {
391 mems[piece].mem_full += size;
392 mems[piece].mem_left -= size;
401 * Same as alloc() but for a piece which really needs it. If the first
402 * attempt fails, release the space occupied by other pieces and try again.
405 hard_alloc(piece, size)
412 if (size != (ind_t)size)
414 if ((ret = alloc(piece, size)) != BADOFF) {
419 * Deallocate what we don't need.
421 for (i = 0; i < NMEMS; i++) {
429 break; /* Do not try to deallocate this. */
437 if ((ret = alloc(piece, size)) != BADOFF) {
442 ret = alloc(piece, size);
448 * We don't need the previous modules, so we put the current module
449 * at the start of the piece allocated for module contents, thereby
450 * overwriting the saved modules, and release its space.
456 register char *old, *new;
457 register struct memory *mem = &mems[ALLOMODL];
459 size = mem->mem_full - core_position;
461 old = new + core_position;
464 mem->mem_full -= core_position;
465 mem->mem_left += core_position;
466 core_position = (ind_t)0;
470 * The piece of memory with index `piece' is no longer needed.
471 * We take care that it can be used by compact() later, if needed.
477 * Some pieces need their memory throughout the program.
479 assert(piece != ALLOGLOB);
480 assert(piece != ALLOGCHR);
481 assert(piece != ALLOSYMB);
482 assert(piece != ALLOARCH);
483 mems[piece].mem_left += mems[piece].mem_full;
484 mems[piece].mem_full = (ind_t)0;
488 core_alloc(piece, size)
494 if ((off = alloc(piece, size)) == BADOFF)
496 return address(piece, off);
503 char *q = address(piece, mems[piece].mem_full);
506 switch(sizeof(unsigned) == sizeof(char *)) {
508 mems[piece].mem_full -= (unsigned) (q - p);
509 mems[piece].mem_left += (unsigned) (q - p);
512 mems[piece].mem_full -= (ind_t) q - (ind_t) p;
513 mems[piece].mem_left += (ind_t) q - (ind_t) p;
519 * Reset index into piece of memory for modules and
520 * take care that the allocated pieces will not be moved.
526 core_position = (ind_t)0;
531 for (i = 0; i < NMEMS; i++) {
537 break; /* Do not try to deallocate this. */
543 compact(NMEMS - 1, (ind_t)0, FREEZE);
546 /* ------------------------------------------------------------------------- */
549 * To transform the various pieces of the output in core to the file format,
550 * we must order the bytes in the unsigned shorts and longs as ACK prescribes.
554 unsigned short nsect;
556 register struct memory *mem;
557 extern unsigned short NLocals, NGlobals;
558 extern long NLChars, NGChars;
560 extern struct outhead outhead;
561 extern struct outsect outsect[];
562 extern char *outputname;
565 nsect = outhead.oh_nsect;
566 offchar = OFF_CHAR(outhead);
569 * We allocated two areas: one for local and one for global names.
570 * Also, we used another kind of on_foff than on file.
571 * At the end of the global area we have put the section names.
573 if (!(flagword & SFLAG)) {
574 do_crs((struct outname *)mems[ALLOLOCL].mem_base, NLocals);
575 namecpy((struct outname *)mems[ALLOLOCL].mem_base,
579 namecpy((struct outname *)mems[ALLOGLOB].mem_base,
585 * These pieces must always be written.
588 wr_sect(outsect, nsect);
589 for (mem = &mems[ALLOEMIT]; mem < &mems[ALLORELO]; mem++)
590 wrt_emit(mem->mem_base, sectionno++, mem->mem_full);
592 * The rest depends on the flags.
594 if (flagword & (RFLAG|CFLAG))
595 wr_relo((struct outrelo *) mems[ALLORELO].mem_base,
597 if (!(flagword & SFLAG)) {
598 wr_name((struct outname *) mems[ALLOLOCL].mem_base,
600 wr_name((struct outname *) mems[ALLOGLOB].mem_base,
602 wr_string(mems[ALLOLCHR].mem_base + 1, (long)NLChars);
603 wr_string(mems[ALLOGCHR].mem_base + 1, (long)NGChars);
605 wr_dbug(mems[ALLODBUG].mem_base, mems[ALLODBUG].mem_full);
610 namecpy(name, nname, offchar)
611 register struct outname *name;
612 register unsigned nname;
613 register long offchar;
617 name->on_foff += offchar - 1;