extern void tb_procedure(void);
extern void tb_regvar(struct procedure* proc, arith offset, int size, int type, int priority);
+extern void pass_convert_inputs_to_phis(void);
extern void pass_convert_locals_to_ssa(void);
+extern void pass_convert_nonlocal_phis(void);
extern void pass_convert_stack_ops(void);
+extern void pass_determine_vreg_usage(void);
extern void pass_eliminate_trivial_blocks(void);
extern void pass_find_phi_congruence_groups(void);
extern void pass_group_irs(void);
extern void pass_remove_dead_blocks(void);
extern void pass_remove_dead_phis(void);
extern void pass_split_critical_edges(void);
+extern void pass_split_live_ranges(void);
extern void pass_wire_up_return_values(void);
extern void platform_calculate_offsets(void);
--- /dev/null
+#include "mcg.h"
+
+/* Currently, IRs can refer to other IRs which are either in the current bb or
+ * else defined in a parent bb (although, we hope, in a strictly dominating
+ * bb). This makes life awkward in future passes, as it's not obvious where
+ * each IR is defined. So, we convert these non-local references into phis,
+ * so ensuring that all IR references are to either the current bb, or via
+ * a phi to a parent bb. */
+
+/* Tests if this node *contains* a non-local reference. */
+static bool find_non_local_direct_reference_cb(struct ir* ir, void* user)
+{
+ if (ir->left && (ir->left->bb != ir->bb))
+ return true;
+ if (ir->right && (ir->right->bb != ir->bb))
+ return true;
+ return false;
+}
+
+/* Returns the non-local reference itself. */
+static struct ir* find_non_local_direct_reference(struct basicblock* bb)
+{
+ int i;
+
+ for (i=0; i<bb->irs.count; i++)
+ {
+ struct ir* ir = ir_walk(bb->irs.item[i],
+ find_non_local_direct_reference_cb, bb);
+ if (ir)
+ {
+ if (ir->left && (ir->left->bb != bb))
+ return ir->left;
+ if (ir->right && (ir->right->bb != bb))
+ return ir->right;
+ }
+ }
+
+ return NULL;
+}
+
+static bool is_defined_in_prev_block(struct basicblock* bb, struct ir* ir)
+{
+ int i;
+
+ for (i=0; i<bb->prevs.count; i++)
+ if (bb->prevs.item[i] == ir->bb)
+ return true;
+
+ return false;
+}
+
+static struct ir* insert_phis_and_rewrite(struct basicblock* bb, struct ir* src)
+{
+ int i;
+ struct ir* dest;
+
+ dest = new_ir0(IR_PHI, src->size);
+ dest->root = dest;
+ dest->bb = bb;
+
+ tracef('I', "I: in %s, inserting phi $%d for $%d (from %s)\n",
+ bb->name, dest->id, src->id, src->bb->name);
+
+ pmap_add(&dest->u.phivalue, src->bb, src);
+ array_insert(&bb->irs, dest, 0);
+
+ ir_rewrite(bb, src, dest);
+
+ if (is_defined_in_prev_block(bb, src))
+ return dest;
+
+ return dest;
+}
+
+void pass_convert_inputs_to_phis(void)
+{
+ int i;
+
+ /* Insert new phi nodes for any use of a non-local IR in an ordinary
+ * instruction. */
+
+ for (i=0; i<dominance.preorder.count; i++)
+ {
+ struct basicblock* bb = dominance.preorder.item[i];
+
+ for (;;)
+ {
+ struct ir* ir = find_non_local_direct_reference(bb);
+ if (!ir)
+ break;
+
+ tracef('I', "I: considering $%d in %s, referring to %s\n",
+ ir->id, bb->name, ir->bb->name);
+ insert_phis_and_rewrite(bb, ir);
+ }
+ }
+}
+
+/* vim: set sw=4 ts=4 expandtab : */
--- /dev/null
+#include "mcg.h"
+
+/* Phis can import IRs defined in any IR which strictly dominates the current
+ * one. This makes life awkward for future passes, as it's not obvious what's
+ * defined where, so we convert these non-local references into phis,
+ * so ensuring that all IR references are to either the current bb, or via
+ * a phi to a parent bb. */
+
+static ARRAYOF(struct basicblock) confirmed;
+static ARRAYOF(struct basicblock) pending;
+static PMAPOF(struct ir, struct ir) added;
+
+static struct basicblock* current_src;
+static struct basicblock* current_dest;
+static struct ir* current_ir;
+
+static bool is_defined_in_prev_block(struct basicblock* bb, struct ir* ir)
+{
+ int i;
+
+ for (i=0; i<bb->prevs.count; i++)
+ if (bb->prevs.item[i] == ir->bb)
+ return true;
+
+ return false;
+}
+
+static void recursively_add_children_to_pending(struct basicblock* bb)
+{
+ int i;
+
+ for (i=0; i<bb->prevs.count; i++)
+ {
+ struct basicblock* candidate = bb->prevs.item[i];
+
+ if ((candidate != current_src)
+ && (candidate != current_dest)
+ && !array_appendu(&pending, candidate))
+ recursively_add_children_to_pending(candidate);
+ }
+}
+
+static void recursively_move_children_to_confirmed(struct basicblock* bb)
+{
+ int i;
+
+ for (i=0; i<bb->nexts.count; i++)
+ {
+ struct basicblock* candidate = bb->nexts.item[i];
+
+ if (array_contains(&pending, candidate))
+ {
+ tracef('I', "I: encompassing %s\n", candidate->name);
+ array_remove(&pending, candidate);
+ array_appendu(&confirmed, candidate);
+ recursively_move_children_to_confirmed(candidate);
+ }
+ }
+}
+
+static struct ir* new_phi(struct basicblock* bb, int size, struct ir* src)
+{
+ struct ir* phi = new_ir0(IR_PHI, size);
+ phi->root = phi;
+ phi->bb = bb;
+
+ if (src)
+ pmap_add(&phi->u.phivalue, src->bb, src);
+ array_insert(&bb->irs, phi, 0);
+
+ return phi;
+}
+
+static struct ir* insert_phi_to_prev(struct basicblock* bb, int size, struct ir* src)
+{
+ struct ir* phi = new_phi(bb, size, src);
+ tracef('I', "I: adding phi $%d for $%d in %s\n",
+ phi->id, src->id, bb->name);
+ ir_rewrite_single_block(bb, src, phi);
+ return phi;
+}
+
+static bool replace_irs_cb(struct ir* ir, void* user)
+{
+ int i;
+ bool* found = user;
+
+ if (*found)
+ return true;
+ if (ir->opcode != IR_PHI)
+ return true;
+
+ for (i=0; i<ir->u.phivalue.count; i++)
+ {
+ if (pmap_contains_bi(&added, current_ir, ir->u.phivalue.item[i].right))
+ {
+ *found = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool already_importing(struct basicblock* bb)
+{
+ int i;
+
+ for (i=0; i<bb->irs.count; i++)
+ {
+ bool found = false;
+ ir_walk(bb->irs.item[i], replace_irs_cb, &found);
+ if (found)
+ return true;
+ }
+
+ return false;
+}
+
+static void import_ir(struct ir* phi)
+{
+ int i;
+
+ confirmed.count = 0;
+ pending.count = 0;
+
+ recursively_add_children_to_pending(current_dest);
+ array_appendu(&confirmed, current_dest);
+ recursively_move_children_to_confirmed(current_src);
+
+ /* Remove the original source from the phi. */
+
+ ir_print('I', phi);
+ pmap_remove_either(&phi->u.phivalue, current_ir);
+
+ /* For every prev which is in our confirmed list, make sure
+ * that prev exports an IR, and add that IR to the phi. */
+
+ for (i=0; i<current_dest->prevs.count; i++)
+ {
+ struct basicblock* bb = current_dest->prevs.item[i];
+
+ if (bb == current_src)
+ pmap_add(&phi->u.phivalue, bb, current_ir);
+ else if (array_contains(&confirmed, bb) && !already_importing(bb))
+ {
+ struct ir* newphi = insert_phi_to_prev(bb, current_ir->size, current_ir);
+ pmap_add(&phi->u.phivalue, bb, newphi);
+ pmap_add(&added, current_ir, newphi);
+ array_remove(&confirmed, bb);
+ }
+ }
+ ir_print('I', phi);
+}
+
+void pass_convert_nonlocal_phis(void)
+{
+ int i, j, k;
+
+ added.count = 0;
+
+ /* If a phi refers to an IR defined in a node which isn't a direct parent,
+ * insert phis upstream for it. */
+
+restart:
+ for (i=0; i<dominance.preorder.count; i++)
+ {
+ current_dest = dominance.preorder.item[i];
+
+ for (j=0; j<current_dest->irs.count; j++)
+ {
+ struct ir* phi = current_dest->irs.item[j];
+ if (phi->opcode == IR_PHI)
+ {
+ for (k=0; k<phi->u.phivalue.count; k++)
+ {
+ current_ir = phi->u.phivalue.item[k].right;
+ current_src = current_ir->bb;
+
+ if (!array_contains(¤t_dest->prevs, current_src))
+ {
+ tracef('I', "I: import of non-local IR $%d into %s from %s\n",
+ current_ir->id, current_dest->name, current_src->name);
+ import_ir(phi);
+ goto restart;
+ }
+ }
+ }
+ }
+ }
+}
+
+/* vim: set sw=4 ts=4 expandtab : */
--- /dev/null
+#include "mcg.h"
+
+static struct basicblock* current_bb;
+
+static void rewrite_vregs(struct basicblock* bb,
+ int pos, struct vreg* src, struct vreg* dest)
+{
+ int i, j;
+
+ while (pos < bb->hops.count)
+ {
+ struct hop* hop = bb->hops.item[pos];
+
+ array_replace(&hop->ins, src, dest);
+ array_replace(&hop->throughs, src, dest);
+ array_replace(&hop->outs, src, dest);
+
+ for (i=0; i<hop->insels.count; i++)
+ {
+ struct insel* insel = hop->insels.item[i];
+ if ((insel->type == INSEL_VREG) && (insel->u.vreg == src))
+ insel->u.vreg = dest;
+ }
+
+ for (i=0; i<hop->constraints.count; i++)
+ {
+ struct constraint* c = hop->constraints.item[i].right;
+
+ if (hop->constraints.item[i].left == src)
+ hop->constraints.item[i].left = dest;
+
+ if (c->equals_to == src)
+ c->equals_to = dest;
+ }
+
+ pos++;
+ }
+
+ for (i=0; i<bb->nexts.count; i++)
+ {
+ struct basicblock* nextbb = bb->nexts.item[i];
+
+ for (j=0; j<nextbb->phis.count; j++)
+ {
+ struct phi* phi = nextbb->phis.item[j].right;
+ if (phi->ir->result == src)
+ phi->ir->result = dest;
+ }
+ }
+}
+
+static void rewrite_blocks(struct basicblock* startbb, int startindex,
+ struct vreg* src, struct vreg* dest)
+{
+ int i;
+
+ for (i=0; i<dominance.preorder.count; i++)
+ {
+ struct basicblock* bb = dominance.preorder.item[i];
+ if (bb == startbb)
+ break;
+ }
+ assert(i < dominance.preorder.count);
+
+ while (i < dominance.preorder.count)
+ {
+ struct basicblock* bb = dominance.preorder.item[i];
+ rewrite_vregs(bb, startindex, src, dest);
+ startindex = 0;
+ i++;
+ }
+}
+
+static int insert_move_after(int index, struct vreg* src)
+{
+ struct vreg* dest;
+ struct hop* copy;
+
+ dest = new_vreg();
+ dest->type = src->type;
+ copy = new_copy_hop(current_bb, src, dest);
+
+ array_insert(¤t_bb->hops, copy, index+1);
+
+ rewrite_blocks(current_bb, index+2, src, dest);
+ return 1;
+}
+
+void pass_split_live_ranges(void)
+{
+ int i, j, k;
+
+ for (i=0; i<dominance.preorder.count; i++)
+ {
+ current_bb = dominance.preorder.item[i];
+
+ for (j=0; j<current_bb->hops.count; j++)
+ {
+ struct hop* hop = current_bb->hops.item[j];
+
+ if (!hop->is_move)
+ {
+ for (k=0; k<hop->ins.count; k++)
+ {
+ struct vreg* vreg = hop->ins.item[k];
+ j += insert_move_after(j-1, vreg);
+ }
+
+ for (k=0; k<hop->outs.count; k++)
+ {
+ struct vreg* vreg = hop->outs.item[k];
+ insert_move_after(j, vreg);
+ }
+ }
+ }
+ }
+}
+
+/* vim: set sw=4 ts=4 expandtab : */
\ No newline at end of file
--- /dev/null
+#include "mcg.h"
+
+static ARRAYOF(struct vreg) vregs;
+
+static void assign_uses_cb(struct hop* hop, void* user)
+{
+ int i;
+
+ for (i=0; i<hop->ins.count; i++)
+ array_appendu(&hop->ins.item[i]->usedhops, hop);
+
+ for (i=0; i<hop->outs.count; i++)
+ {
+ struct vreg* vreg = hop->outs.item[i];
+ assert(vreg->defined == NULL);
+ vreg->defined = hop;
+ array_appendu(&vregs, vreg);
+ }
+}
+
+static bool is_spillable_vreg(struct vreg* vreg)
+{
+ int i;
+
+ if (vreg->defined && !vreg->defined->is_move)
+ return false;
+
+ for (i=0; i<vreg->usedhops.count; i++)
+ if (!vreg->usedhops.item[i]->is_move)
+ return false;
+
+ return true;
+}
+
+void pass_determine_vreg_usage(void)
+{
+ int i, j;
+
+ vregs.count = 0;
+ hop_walk(assign_uses_cb, NULL);
+
+ for (i=0; i<dominance.preorder.count; i++)
+ {
+ struct basicblock* bb = dominance.preorder.item[i];
+ for (j=0; j<bb->phis.count; j++)
+ {
+ struct vreg* dest = bb->phis.item[j].left;
+ struct phi* phi = bb->phis.item[j].right;
+ struct vreg* src = phi->ir->result;
+ array_appendu(&src->usedphis, bb);
+ array_appendu(&vregs, dest);
+ }
+ }
+
+ for (i=0; i<vregs.count; i++)
+ {
+ struct vreg* vreg = vregs.item[i];
+ vreg->is_spillable = is_spillable_vreg(vreg);
+ }
+}
+
+/* vim: set sw=4 ts=4 expandtab : */
+
+