1 /* $Id: ra_allocl.c,v 1.6 1994/06/24 10:27:27 ceriel Exp $ */
3 * (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
4 * See the copyright notice in the ACK home directory, in the file "Copyright".
6 /* R E G I S T E R A L L O C A T I O N
8 * R A _ A L L O C L I S T . C
15 #include "../share/types.h"
16 #include "../share/debug.h"
17 #include "../share/def.h"
18 #include "../share/global.h"
19 #include "../share/lset.h"
20 #include "../share/cset.h"
21 #include "../share/aux.h"
22 #include "../share/alloc.h"
23 #include "../share/map.h"
27 #include "ra_allocl.h"
28 #include "ra_interv.h"
30 STATIC count_usage(p,item,nrloops,sloopcnt,dloopcnt)
33 short nrloops, sloopcnt[], dloopcnt[];
35 /* Determine how many times the item is used in every loop.
36 * We maintain a 'static' count and a 'dynamic' count. The dynamic
37 * count estimates the number of times the item is used during
38 * execution, i.e. it gives a higher mark to items used inside
49 for (i = 0; i <= nrloops; i++) {
53 for (ui = Lfirst(item->it_usage); ui != (Lindex) 0;
54 ui = Lnext(ui,item->it_usage)) {
55 u = (time_p) Lelem(ui);
56 loops = u->t_bblock->b_loops;
57 lev = Lnrelems(loops);
58 /* set of loops in which this usage of item occurs */
59 for (li = Lfirst(loops); li != (Lindex) 0; li=Lnext(li,loops)) {
60 l = (loop_p) Lelem(li);
63 (IS_FIRM(u->t_bblock) ? loop_scale(lev) : 1);
70 STATIC alloc_p cons_alloc(item,timespan,stat_usecount,
71 dyn_usecount,inits,wholeproc,isloop,iswholeproc)
74 short stat_usecount,dyn_usecount;
77 bool isloop,iswholeproc;
82 x->al_id = ++alloc_id;
84 x->al_timespan = timespan;
85 x->al_susecount = stat_usecount;
86 x->al_dusecount = dyn_usecount;
88 x->al_wholeproc = wholeproc;
89 x->al_isloop = isloop;
90 x->al_iswholeproc = iswholeproc;
95 STATIC insert_alloc(alloc,list_p)
96 alloc_p alloc, *list_p;
98 alloc->al_next = *list_p;
104 #define MUST_INIT(i,b) (i->it_type!=LOCALVAR ||contains(b->B_BEGIN,i->it_lives))
105 #define MUST_UPDATE(i,b) (i->it_type==LOCALVAR &&contains(b->B_BEGIN,i->it_lives))
107 STATIC lset loop_inits(lp,item,header)
112 /* Build the set of entry points to loop lp where item
113 * must be initialized
116 lset s = Lempty_set();
117 if (header != (bblock_p) 0 && MUST_INIT(item,header)) {
125 #define IN_LOOP(b) (Lnrelems(b->b_loops) > 0)
127 STATIC bblock_p init_point(item)
130 /* Find the most appropriate point to initialize any register
131 * containing the item. We want to do the initialization as
132 * late as possible, to allow other items to be put in the
133 * same register, before this initialization. Yet, as we want
134 * to do the initialization only once, it must be done in a
135 * basic block that is a dominator of all points where the
136 * item is used (ultimately in the first block of the procedure).
137 * This basic block should not be part of loop.
144 for (ti = Lfirst(item->it_usage); ti != (Lindex) 0;
145 ti = Lnext(ti,item->it_usage)) {
146 t = (time_p) Lelem(ti);
148 dom = (dom == (bblock_p) 0 ? b : common_dom(dom,b));
150 while (IN_LOOP(dom)) {
151 /* Find a dominator of dom (possibly
152 * dom itself) that is outside any loop.
160 STATIC add_blocks(b,s,span)
167 if (!Cis_elem(b->b_id,*s)) {
169 add_interval(b->B_BEGIN,b->B_END,span);
170 for (pi = Lfirst(b->b_pred); pi != (Lindex) 0;
171 pi = Lnext(pi,b->b_pred)) {
172 add_blocks((bblock_p) Lelem(pi),s,span);
179 STATIC whole_lifetime(item,ini_out,span_out)
184 /* Find the initialization point and the time_span of the item, if
185 * we put the item in a register during all its uses.
188 bblock_p b, ini = init_point(item);
189 cset s = Cempty_set(blength);
192 interv_p span = (interv_p) 0;
194 for (ti = Lfirst(item->it_usage); ti != (Lindex) 0;
195 ti = Lnext(ti,item->it_usage)) {
196 t = (time_p) Lelem(ti);
198 add_blocks(b,&s,&span);
200 if (!Cis_elem(ini->b_id,s)) {
201 add_interval(ini->B_BEGIN,ini->B_END,&span);
211 STATIC lset proc_inits(p,item,ini)
216 lset s = Lempty_set();
218 if (item->it_type != LOCALVAR || item->i_t.it_off >= 0) {
219 /* only local variables need not be initialized */
226 STATIC bool updates_needed(lp,item)
230 /* See if the value of item is live after the loop has
231 * been exited, i.e. must the item be updated after the loop?
237 for (bi = Lfirst(lp->LP_BLOCKS); bi != (Lindex) 0;
238 bi = Lnext(bi,lp->LP_BLOCKS)) {
239 b = (bblock_p) Lelem(bi);
240 for (si = Lfirst(b->b_succ); si != (Lindex) 0;
241 si = Lnext(si,b->b_succ)) {
242 s = (bblock_p) Lelem(si);
243 if (!Lis_elem(s,lp->LP_BLOCKS) && MUST_UPDATE(item,s)) {
253 STATIC short countuses(usage,b)
261 for (ti = Lfirst(usage); ti != (Lindex) 0; ti = Lnext(ti,usage)) {
262 t = (time_p) Lelem(ti);
263 if (t->t_bblock == b) cnt++;
270 STATIC allocs_of_item(p,item,loops,sloopcnt,dloopcnt,alloc_list_p)
274 short *sloopcnt,*dloopcnt; /* dynamic arrays */
275 alloc_p *alloc_list_p;
280 short susecount,dusecount;
284 /* The whole procedure may be used as timespan.
285 The dynamic usecount of a procedure is taken to be the same
286 as its static usecount; this number is not very important, as
287 time-optimziation chooses loops first.
289 whole_lifetime(item,&ini,<);
290 wholeproc = cons_alloc(item,lt,Lnrelems(item->it_usage),
291 Lnrelems(item->it_usage), proc_inits(p,item,ini),
292 (alloc_p) 0,FALSE,TRUE);
293 insert_alloc(wholeproc, alloc_list_p);
294 for (li = Lfirst(loops); li != (Lindex) 0; li = Lnext(li,loops)) {
295 lp = (loop_p) Lelem(li);
296 if (sloopcnt[lp->lp_id] != 0 && !updates_needed(lp,item)
297 && !((header = lp->LP_HEADER) == (bblock_p) 0 &&
298 MUST_INIT(item,lp->lp_entry))) {
299 /* Item is used within loop, so consider loop
300 * as a timespan during which item may be put in
302 if ((header = lp->LP_HEADER) == (bblock_p) 0 &&
303 MUST_INIT(item,lp->lp_entry)) continue;
305 lt = loop_lifetime(lp);
306 susecount = sloopcnt[lp->lp_id];
307 dusecount = dloopcnt[lp->lp_id];
308 if (MUST_INIT(item,lp->lp_entry)) {
309 /* include header block in timespan */
310 add_interval(header->B_BEGIN,header->B_END,<);
311 susecount += countuses(item->it_usage,header);
313 header = (bblock_p) 0;
315 insert_alloc(cons_alloc(item,lt,susecount,dusecount,
316 loop_inits(lp,item,header),wholeproc,
319 } else if (sloopcnt[lp->lp_id] != 0) {
320 /* I confess: this is a hack. I didn't expect the
321 * Spanish inquisition.
323 if (wholeproc->al_dusecount < dloopcnt[lp->lp_id])
324 wholeproc->al_dusecount = dloopcnt[lp->lp_id];
331 alloc_p build_alloc_list(p,nrloops,itemlist)
336 short *sloopcnt,*dloopcnt; /* dynamic arrays */
337 register item_p item;
338 alloc_p alloc_list = (alloc_p) 0;
340 sloopcnt = (short *) newtable(nrloops);
341 dloopcnt = (short *) newtable(nrloops);
342 for (item = itemlist; item != (item_p) 0; item = item->it_next) {
343 count_usage(p,item,nrloops,sloopcnt,dloopcnt);
344 allocs_of_item(p,item,p->p_loops,sloopcnt,dloopcnt,
347 oldtable(sloopcnt,nrloops);
348 oldtable(dloopcnt,nrloops);
354 build_rivals_graph(alloclist)
357 /* See which allocations in the list are rivals of each other,
358 * i.e. there is some point of time, falling in both
359 * timespans, at which the items of both allocations are live.
360 * Allocations with the same item (but different timespans) are
361 * not considered to be rivals.
362 * We use an auxiliary data structure "busy" for each allocation,
363 * indicating when the item is live during the timespan of the
367 register alloc_p alloc,x;
369 for (alloc = alloclist; alloc != (alloc_p) 0; alloc = alloc->al_next) {
370 alloc->al_rivals = Cempty_set(alloc_id);
372 for (alloc = alloclist; alloc != (alloc_p) 0; alloc = alloc->al_next) {
374 (alloc->al_item->it_type == LOCALVAR ?
375 intersect(alloc->al_timespan,alloc->al_item->it_lives) :
376 copy_timespan(alloc->al_timespan));
377 for (x = alloclist; x != alloc; x = x->al_next) {
378 if (x->al_item != alloc->al_item &&
379 not_disjoint(alloc->al_busy,x->al_busy)) {
380 Cadd(x->al_id,&alloc->al_rivals);
381 Cadd(alloc->al_id,&x->al_rivals);
382 if (alloc->al_regtype == x->al_regtype) {
383 alloc->al_cntrivals++;