1 /* $Id: mal.c,v 1.22 1994/06/24 11:17:57 ceriel Exp $ */
3 * (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
4 * See the copyright notice in the ACK home directory, in the file "Copyright".
12 /* Malloc space is traversed by N doubly-linked lists of chunks, each
13 containing a couple of house-keeping data addressed as a
14 'mallink' and a piece of useful space, called the block.
15 The N lists are accessed through their starting pointers in
16 free_list[]. Free_list[n] points to a list of chunks between
17 2**(n+LOG_MIN_SIZE) and 2**(n+LOG_MIN_SIZE+1)-1, which means
18 that the smallest chunk is 2**LOG_MIN_SIZE (== MIN_SIZE).
23 #define SBRK sys_break
26 #define ILL_BREAK (char *)(-1) /* funny failure value */
31 #define MAX_SZ_IN_STORE (MAX_STORE*ALIGNMENT)
32 private do_free(), sell_out();
33 privatedata mallink *store[MAX_STORE];
38 register unsigned int n;
39 {check_mallinks("malloc entry");{
41 register int min_class;
46 if (n < MIN_SIZE) n = align(MIN_SIZE); else n = align(n);
48 if (n <= MAX_SZ_IN_STORE) {
49 /* look in the store first */
50 register mallink **stp = &store[(n >> LOG_ALIGNMENT) - 1];
53 *stp = log_next_of(ml);
55 check_mallinks("malloc fast exit");
56 assert(! in_store(ml));
57 return block_of_mallink(ml);
62 check_work_empty("malloc, entry");
64 /* Acquire a chunk of at least size n if at all possible;
68 /* Inline substitution of "smallest".
70 register unsigned int n1 = n;
72 assert(n1 < (1L << LOG_MAX_SIZE));
78 } while (n1 >= MIN_SIZE);
81 if (min_class >= MAX_FLIST)
82 return (char *) 0; /* we don't deal in blocks that big */
83 ml = first_present(min_class);
87 #define GRABSIZE 4096 /* Power of 2 */
88 register unsigned int req =
89 ((MIN_SIZE<<min_class)+ mallink_size() + GRABSIZE - 1) &
93 /* first align SBRK() */
96 SBRK((int) (align((size_type) p) - (size_type) p));
99 /* SBRK takes an int; sorry ... */
106 if (p == ILL_BREAK) {
107 req = n + mallink_size();
108 if ((int) req >= 0) p = SBRK((int)req);
110 if (p == ILL_BREAK) {
111 /* Now this is bad. The system will not give us
112 more memory. We can only liquidate our store
117 ml = first_present(min_class);
118 if (ml == MAL_NULL) {
120 /* In this emergency we try to locate a suitable
121 chunk in the free_list just below the safe
122 one; some of these chunks may fit the job.
124 ml = search_free_list(min_class - 1, n);
125 if (!ml) /* really out of space */
127 started_working_on(ml);
128 unlink_free_chunk(ml);
129 check_mallinks("suitable_chunk, forced");
132 else started_working_on(ml);
136 assert((size_type)p == align((size_type)p));
137 ml = create_chunk(p, req);
139 check_mallinks("suitable_chunk, extended");
141 else started_working_on(ml);
143 /* we have a chunk */
146 check_mallinks("suitable_chunk, removed");
148 if (n + MIN_SIZE <= size_of(ml)) {
151 stopped_working_on(ml);
152 check_mallinks("malloc exit");
153 check_work_empty("malloc exit");
155 assert(! in_store(ml));
157 return block_of_mallink(ml);
162 {check_mallinks("free entry");{
163 register mallink *ml;
166 check_mallinks("free(0) very fast exit");
169 ml = mallink_of_block(addr);
173 if (free_of(ml) || in_store(ml))
174 return; /* user frees free block */
175 if (size_of(ml) <= MAX_SZ_IN_STORE) {
176 /* return to store */
177 mallink **stp = &store[(size_of(ml) >> LOG_ALIGNMENT) - 1];
179 set_log_next(ml, *stp);
183 check_mallinks("free fast exit");
187 check_mallinks("free exit");
193 register mallink *ml;
198 if (free_of(ml)) return;
200 started_working_on(ml);
203 if (! last_mallink(ml)) {
204 register mallink *next = phys_next_of(ml);
206 if (free_of(next)) coalesce_forw(ml, next);
209 if (! first_mallink(ml)) {
210 register mallink *prev = phys_prev_of(ml);
213 coalesce_backw(ml, prev);
218 stopped_working_on(ml);
219 check_work_empty("free");
221 /* Compile-time checks on param.h */
223 case MIN_SIZE < OFF_SET * sizeof(mallink): break;
225 /* If this statement does not compile due to duplicate case
226 entry, the minimum size block cannot hold the links for
227 the free blocks. Either raise LOG_MIN_SIZE or switch
232 case sizeof(char *) != sizeof(size_type): break;
234 /* If this statement does not compile due to duplicate
235 case entry, size_type is not defined correctly.
236 Redefine and compile again.
244 register unsigned int n;
245 {check_mallinks("realloc entry");{
246 register mallink *ml, *ph_next;
247 register size_type size;
250 /* Behave like most Unix realloc's when handed a
259 ml = mallink_of_block(addr);
260 if (n < MIN_SIZE) n = align(MIN_SIZE); else n = align(n);
263 register mallink *stp = store[(size_of(ml) >> LOG_ALIGNMENT) - 1];
267 stp = log_next_of(stp);
269 stp = log_next_of(stp);
270 if (! stp1) store[(size_of(ml) >> LOG_MIN_SIZE) - 1] = stp;
271 else set_log_next(stp1, stp);
277 unlink_free_chunk(ml);
278 set_free(ml, 0); /* user reallocs free block */
280 started_working_on(ml);
282 if ( /* we can simplify the problem by adding the next chunk: */
285 (ph_next = phys_next_of(ml), free_of(ph_next)) &&
286 n <= size + mallink_size() + size_of(ph_next)
288 /* add in the physically next chunk */
289 unlink_free_chunk(ph_next);
290 combine_chunks(ml, ph_next);
292 check_mallinks("realloc, combining");
294 if (n > size) { /* this didn't help */
296 register char *l1, *l2 = addr;
298 stopped_working_on(ml);
299 if (!(new = l1 = malloc(n))) return (char *) 0; /* no way */
300 while (size--) *l1++ = *l2++;
302 check_work_empty("mv_realloc");
304 assert(! in_store(mallink_of_block(new)));
308 /* it helped, but maybe too well */
310 if (n + MIN_SIZE <= size_of(ml)) {
313 stopped_working_on(ml);
314 check_mallinks("realloc exit");
315 check_work_empty("realloc");
317 assert(! in_store(ml));
322 /* Auxiliary routines */
327 /* Frees all block in store.
329 register mallink **stp;
331 for (stp = &store[0]; stp < &store[MAX_STORE]; stp++) {
332 register mallink *ml = *stp;
335 *stp = log_next_of(ml);
354 write(2, ": malloc assert failed in line ", 31);
355 ch = (ln / 100) + '0'; write(2, &ch, 1); ln %= 100;
356 ch = (ln / 10) + '0'; write(2, &ch, 1); ln %= 10;
357 ch = (ln / 1) + '0'; write(2, &ch, 1);