#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
#include "insn-config.h"
#include "recog.h"
#include "reload.h"
#include "function.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "df.h"
#include "output.h"
#include "ggc.h"
#include "ra.h"
struct curr_use;
static unsigned HOST_WIDE_INT rtx_to_undefined (rtx);
static bitmap find_sub_conflicts (struct web_part *, unsigned int);
static bitmap get_sub_conflicts (struct web_part *, unsigned int);
static unsigned int undef_to_size_word (rtx, unsigned HOST_WIDE_INT *);
static bitmap undef_to_bitmap (struct web_part *,
unsigned HOST_WIDE_INT *);
static struct web_part * find_web_part_1 (struct web_part *);
static struct web_part * union_web_part_roots
(struct web_part *, struct web_part *);
static int defuse_overlap_p_1 (rtx, struct curr_use *);
static int live_out_1 (struct df *, struct curr_use *, rtx);
static int live_out (struct df *, struct curr_use *, rtx);
static rtx live_in_edge ( struct df *, struct curr_use *, edge);
static void live_in (struct df *, struct curr_use *, rtx);
static int copy_insn_p (rtx, rtx *, rtx *);
static void remember_move (rtx);
static void handle_asm_insn (struct df *, rtx);
static void prune_hardregs_for_mode (HARD_REG_SET *, enum machine_mode);
static void init_one_web_common (struct web *, rtx);
static void init_one_web (struct web *, rtx);
static void reinit_one_web (struct web *, rtx);
static struct web * add_subweb (struct web *, rtx);
static struct web * add_subweb_2 (struct web *, unsigned int);
static void init_web_parts (struct df *);
static void copy_conflict_list (struct web *);
static void add_conflict_edge (struct web *, struct web *);
static void build_inverse_webs (struct web *);
static void copy_web (struct web *, struct web_link **);
static void compare_and_free_webs (struct web_link **);
static void init_webs_defs_uses (void);
static unsigned int parts_to_webs_1 (struct df *, struct web_link **,
struct df_link *);
static void parts_to_webs (struct df *);
static void reset_conflicts (void);
#if 0
static void check_conflict_numbers (void)
#endif
static void conflicts_between_webs (struct df *);
static void remember_web_was_spilled (struct web *);
static void detect_spill_temps (void);
static int contains_pseudo (rtx);
static int want_to_remat (rtx x);
static void detect_remat_webs (void);
static void determine_web_costs (void);
static void detect_webs_set_in_cond_jump (void);
static void make_webs (struct df *);
static void moves_to_webs (struct df *);
static void connect_rmw_web_parts (struct df *);
static void update_regnos_mentioned (void);
static void livethrough_conflicts_bb (basic_block);
static void init_bb_info (void);
static void free_bb_info (void);
static void build_web_parts_and_conflicts (struct df *);
static sbitmap live_over_abnormal;
static unsigned int visited_pass;
static sbitmap move_handled;
struct visit_trace
{
struct web_part *wp;
unsigned HOST_WIDE_INT undefined;
};
static struct visit_trace *visit_trace;
struct ra_bb_info
{
unsigned int pass;
unsigned HOST_WIDE_INT undefined;
bitmap regnos_mentioned;
bitmap live_throughout;
void *old_aux;
};
#define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF))
#define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF)
#define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF)
unsigned int
rtx_to_bits (rtx x)
{
unsigned int len, beg;
len = GET_MODE_SIZE (GET_MODE (x));
beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
return BL_TO_WORD (beg, len);
}
static unsigned HOST_WIDE_INT
rtx_to_undefined (rtx x)
{
unsigned int len, beg;
unsigned HOST_WIDE_INT ret;
len = GET_MODE_SIZE (GET_MODE (x));
beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
ret = ~ ((unsigned HOST_WIDE_INT) 0);
ret = (~(ret << len)) << beg;
return ret;
}
struct copy_p_cache
{
int seen;
rtx source;
rtx target;
};
static struct copy_p_cache * copy_cache;
int *number_seen;
static int
copy_insn_p (rtx insn, rtx *source, rtx *target)
{
rtx d, s;
unsigned int d_regno, s_regno;
int uid = INSN_UID (insn);
gcc_assert (INSN_P (insn));
if (copy_cache[uid].seen)
{
if (copy_cache[uid].seen == 1)
{
if (source)
*source = copy_cache[uid].source;
if (target)
*target = copy_cache[uid].target;
return 1;
}
return 0;
}
copy_cache[uid].seen = 2;
insn = single_set (insn);
if (!insn)
return 0;
d = SET_DEST (insn);
s = SET_SRC (insn);
while (GET_CODE (d) == STRICT_LOW_PART)
d = XEXP (d, 0);
if (!REG_P (d)
&& (GET_CODE (d) != SUBREG || !REG_P (SUBREG_REG (d))))
return 0;
while (GET_CODE (s) == STRICT_LOW_PART)
s = XEXP (s, 0);
if (!REG_P (s)
&& (GET_CODE (s) != SUBREG || !REG_P (SUBREG_REG (s))))
return 0;
s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
d_regno = (unsigned) REGNO (GET_CODE (d) == SUBREG ? SUBREG_REG (d) : d);
if ((s_regno < FIRST_PSEUDO_REGISTER
&& d_regno < FIRST_PSEUDO_REGISTER)
|| s_regno >= max_normal_pseudo
|| d_regno >= max_normal_pseudo)
return 0;
if (source)
*source = s;
if (target)
*target = d;
copy_cache[uid].seen = 1;
copy_cache[uid].source = s;
copy_cache[uid].target = d;
return 1;
}
static bitmap
find_sub_conflicts (struct web_part *wp, unsigned int size_word)
{
struct tagged_conflict *cl;
cl = wp->sub_conflicts;
for (; cl; cl = cl->next)
if (cl->size_word == size_word)
return cl->conflicts;
return NULL;
}
static bitmap
get_sub_conflicts (struct web_part *wp, unsigned int size_word)
{
bitmap b = find_sub_conflicts (wp, size_word);
if (!b)
{
struct tagged_conflict *cl = ra_alloc (sizeof *cl);
cl->conflicts = BITMAP_XMALLOC ();
cl->size_word = size_word;
cl->next = wp->sub_conflicts;
wp->sub_conflicts = cl;
b = cl->conflicts;
}
return b;
}
static struct undef_table_s {
unsigned int new_undef;
unsigned int size_word;
} const undef_table [] = {
{ 0, BL_TO_WORD (0, 0)},
{ 0, BL_TO_WORD (0, 1)},
{ 0, BL_TO_WORD (1, 1)},
{ 0, BL_TO_WORD (0, 2)},
{ 0, BL_TO_WORD (2, 1)},
{ 1, BL_TO_WORD (2, 1)},
{ 2, BL_TO_WORD (2, 1)},
{ 3, BL_TO_WORD (2, 1)},
{ 0, BL_TO_WORD (3, 1)},
{ 1, BL_TO_WORD (3, 1)},
{ 2, BL_TO_WORD (3, 1)},
{ 3, BL_TO_WORD (3, 1)},
{ 0, BL_TO_WORD (2, 2)},
{ 1, BL_TO_WORD (2, 2)},
{ 2, BL_TO_WORD (2, 2)},
{ 0, BL_TO_WORD (0, 4)}};
static unsigned int
undef_to_size_word (rtx reg, unsigned HOST_WIDE_INT *undefined)
{
if (*undefined <= 15)
{
struct undef_table_s u;
u = undef_table[*undefined];
*undefined = u.new_undef;
return u.size_word;
}
if (*undefined <= 0xffff)
switch ((int) *undefined)
{
case 0x00f0 : *undefined = 0; return BL_TO_WORD (4, 4);
case 0x00ff : *undefined = 0; return BL_TO_WORD (0, 8);
case 0x0f00 : *undefined = 0; return BL_TO_WORD (8, 4);
case 0x0ff0 : *undefined = 0xf0; return BL_TO_WORD (8, 4);
case 0x0fff :
if (INTEGRAL_MODE_P (GET_MODE (reg)))
{ *undefined = 0xff; return BL_TO_WORD (8, 4); }
else
{ *undefined = 0; return BL_TO_WORD (0, 12); }
case 0xf000 : *undefined = 0; return BL_TO_WORD (12, 4);
case 0xff00 : *undefined = 0; return BL_TO_WORD (8, 8);
case 0xfff0 : *undefined = 0xf0; return BL_TO_WORD (8, 8);
case 0xffff : *undefined = 0; return BL_TO_WORD (0, 16);
}
{
unsigned HOST_WIDE_INT u = *undefined;
int word;
struct undef_table_s tab;
for (word = 0; (u & 15) == 0; word += 4)
u >>= 4;
u = u & 15;
tab = undef_table[u];
u = tab.new_undef;
u = (*undefined & ~((unsigned HOST_WIDE_INT)15 << word)) | (u << word);
*undefined = u;
return tab.size_word + BL_TO_WORD (word, 0);
}
}
static bitmap
undef_to_bitmap (struct web_part *wp, unsigned HOST_WIDE_INT *undefined)
{
unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
undefined);
return get_sub_conflicts (wp, size_word);
}
static struct web_part *
find_web_part_1 (struct web_part *p)
{
struct web_part *r = p;
struct web_part *p_next;
while (r->uplink)
r = r->uplink;
for (; p != r; p = p_next)
{
p_next = p->uplink;
p->uplink = r;
}
return r;
}
#define find_web_part(wp) ((! (wp)->uplink) ? (wp) \
: (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp))
static struct web_part *
union_web_part_roots (struct web_part *r1, struct web_part *r2)
{
if (r1 != r2)
{
if (r1 > r2)
{
struct web_part *h = r1;
r1 = r2;
r2 = h;
}
r2->uplink = r1;
num_webs--;
r1->spanned_deaths += r2->spanned_deaths;
if (!r1->sub_conflicts)
r1->sub_conflicts = r2->sub_conflicts;
else if (r2->sub_conflicts)
{
struct tagged_conflict *cl1, *cl2;
for (cl1 = r1->sub_conflicts; cl1; cl1 = cl1->next)
for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next)
if (cl1->size_word == cl2->size_word)
{
bitmap_operation (cl1->conflicts, cl1->conflicts,
cl2->conflicts, BITMAP_IOR);
BITMAP_XFREE (cl2->conflicts);
cl2->conflicts = NULL;
}
for (cl2 = r2->sub_conflicts; cl2;)
{
struct tagged_conflict *cl_next = cl2->next;
if (cl2->conflicts)
{
cl2->next = r1->sub_conflicts;
r1->sub_conflicts = cl2;
}
cl2 = cl_next;
}
}
r2->sub_conflicts = NULL;
r1->crosses_call |= r2->crosses_call;
}
return r1;
}
#define union_web_parts(p1, p2) \
((p1 == p2) ? find_web_part (p1) \
: union_web_part_roots (find_web_part (p1), find_web_part (p2)))
static void
remember_move (rtx insn)
{
if (!TEST_BIT (move_handled, INSN_UID (insn)))
{
rtx s, d;
int ret;
struct df_link *slink = DF_INSN_USES (df, insn);
struct df_link *link = DF_INSN_DEFS (df, insn);
SET_BIT (move_handled, INSN_UID (insn));
ret = copy_insn_p (insn, &s, &d);
gcc_assert (ret);
gcc_assert (link && link->ref);
gcc_assert (slink && slink->ref);
gcc_assert (!link->next
|| DF_REF_REGNO (link->next->ref)
< FIRST_PSEUDO_REGISTER);
if (REG_P (s) && REG_P (d))
{
struct move *m = ra_calloc (sizeof (struct move));
struct move_list *ml;
m->insn = insn;
ml = ra_alloc (sizeof (struct move_list));
ml->move = m;
ml->next = wl_moves;
wl_moves = ml;
}
}
}
struct curr_use {
struct web_part *wp;
unsigned HOST_WIDE_INT undefined;
unsigned int regno;
rtx x;
unsigned int live_over_abnormal;
};
static int
defuse_overlap_p_1 (rtx def, struct curr_use *use)
{
int mode = 0;
if (def == use->x)
return 1;
if (!def)
return 0;
if (GET_CODE (def) == SUBREG)
{
if (REGNO (SUBREG_REG (def)) != use->regno)
return 0;
mode |= 1;
}
else if (REGNO (def) != use->regno)
return 0;
if (GET_CODE (use->x) == SUBREG)
mode |= 2;
switch (mode)
{
case 0:
return 1;
case 1:
{
unsigned HOST_WIDE_INT old_u = use->undefined;
use->undefined &= ~ rtx_to_undefined (def);
return (old_u != use->undefined) ? 2 : -1;
}
case 2:
return 3;
case 3:
if (GET_MODE_SIZE (GET_MODE (def)) == GET_MODE_SIZE (GET_MODE (use->x)))
if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x))
return 1;
{
unsigned HOST_WIDE_INT old_u;
int b1, e1, b2, e2;
unsigned int bl1, bl2;
bl1 = rtx_to_bits (def);
bl2 = rtx_to_bits (use->x);
b1 = BYTE_BEGIN (bl1);
b2 = BYTE_BEGIN (bl2);
e1 = b1 + BYTE_LENGTH (bl1) - 1;
e2 = b2 + BYTE_LENGTH (bl2) - 1;
if (b1 > e2 || b2 > e1)
return -1;
old_u = use->undefined;
use->undefined &= ~ rtx_to_undefined (def);
return (old_u != use->undefined) ? 4 : -1;
}
default:
gcc_unreachable ();
}
}
#define defuse_overlap_p(def, use) \
((def) == (use)->x ? 1 : \
(REGNO (GET_CODE (def) == SUBREG \
? SUBREG_REG (def) : def) != use->regno \
? 0 : defuse_overlap_p_1 (def, use)))
static int
live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
{
int defined = 0;
int uid = INSN_UID (insn);
struct web_part *wp = use->wp;
visit_trace[uid].wp = wp;
visit_trace[uid].undefined = use->undefined;
if (INSN_P (insn))
{
unsigned int source_regno = ~0;
unsigned int regno = use->regno;
unsigned HOST_WIDE_INT orig_undef = use->undefined;
unsigned HOST_WIDE_INT final_undef = use->undefined;
rtx s = NULL;
unsigned int n, num_defs = insn_df[uid].num_defs;
struct ref **defs = insn_df[uid].defs;
wp = find_web_part (wp);
if (CALL_P (insn))
wp->crosses_call = 1;
else if (copy_insn_p (insn, &s, NULL))
source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
for (n = 0; n < num_defs; n++)
{
struct ref *ref = defs[n];
int lap;
use->undefined = orig_undef;
if ((lap = defuse_overlap_p (DF_REF_REG (ref), use)) != 0)
{
if (lap == -1)
{
unsigned HOST_WIDE_INT undef;
undef = use->undefined;
while (undef)
bitmap_set_bit (undef_to_bitmap (wp, &undef),
DF_REF_ID (ref));
continue;
}
if ((lap & 1) != 0)
defined = 1;
else
{
final_undef &= use->undefined;
if (final_undef == 0)
defined = 1;
}
wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]);
}
else
{
unsigned HOST_WIDE_INT undef = use->undefined;
if (regno == source_regno)
{
undef &= ~ rtx_to_undefined (s);
}
if (undef)
{
do
bitmap_set_bit (undef_to_bitmap (wp, &undef),
DF_REF_ID (ref));
while (undef);
}
}
}
if (defined)
use->undefined = 0;
else
{
gcc_assert (uid < death_insns_max_uid);
if (TEST_BIT (insns_with_deaths, uid))
wp->spanned_deaths++;
use->undefined = final_undef;
}
}
return !defined;
}
static inline int
live_out (struct df *df, struct curr_use *use, rtx insn)
{
unsigned int uid = INSN_UID (insn);
if (visit_trace[uid].wp
&& DF_REF_REGNO (visit_trace[uid].wp->ref) == use->regno
&& (use->undefined & ~visit_trace[uid].undefined) == 0)
{
union_web_parts (visit_trace[uid].wp, use->wp);
return 0;
}
else
return live_out_1 (df, use, insn);
}
static rtx
live_in_edge (struct df *df, struct curr_use *use, edge e)
{
struct ra_bb_info *info_pred;
rtx next_insn;
if ((e->flags & EDGE_EH) && use->regno < FIRST_PSEUDO_REGISTER
&& call_used_regs[use->regno])
return NULL_RTX;
if (e->flags & EDGE_ABNORMAL)
use->live_over_abnormal = 1;
bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
info_pred = (struct ra_bb_info *) e->src->aux;
next_insn = BB_END (e->src);
if (live_out (df, use, next_insn))
{
if (!bitmap_bit_p (info_pred->regnos_mentioned, use->regno)
&& (rtx_to_undefined (use->x) & ~use->undefined) == 0)
{
bitmap_set_bit (info_pred->live_throughout,
DF_REF_ID (use->wp->ref));
next_insn = BB_HEAD (e->src);
}
return next_insn;
}
else
return NULL_RTX;
}
static void
live_in (struct df *df, struct curr_use *use, rtx insn)
{
unsigned int loc_vpass = visited_pass;
while (1)
{
unsigned int i;
int uid = INSN_UID (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
number_seen[uid]++;
for (insn = PREV_INSN (insn); insn && !INSN_P (insn);
insn = PREV_INSN (insn))
;
if (!insn)
return;
if (bb != BLOCK_FOR_INSN (insn))
{
edge e;
unsigned HOST_WIDE_INT undef = use->undefined;
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
if (EDGE_COUNT (bb->preds) == 0)
return;
if (info->pass == loc_vpass && (undef & ~info->undefined) == 0)
return;
info->pass = loc_vpass;
info->undefined = undef;
for (e = NULL, i = 0; i < EDGE_COUNT (bb->preds) - 1; i++)
{
e = EDGE_PRED (bb, i);
insn = live_in_edge (df, use, e);
if (insn)
live_in (df, use, insn);
use->undefined = undef;
}
insn = live_in_edge (df, use, e);
if (!insn)
return;
}
else if (!live_out (df, use, insn))
return;
}
}
static void
update_regnos_mentioned (void)
{
int last_uid = last_max_uid;
rtx insn;
basic_block bb;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
if (INSN_UID (insn) < last_uid)
{
if (copy_insn_p (insn, NULL, NULL))
remember_move (insn);
}
else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
{
rtx source;
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
bitmap mentioned = info->regnos_mentioned;
struct df_link *link;
if (copy_insn_p (insn, &source, NULL))
{
remember_move (insn);
bitmap_set_bit (mentioned,
REGNO (GET_CODE (source) == SUBREG
? SUBREG_REG (source) : source));
}
for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
if (link->ref)
bitmap_set_bit (mentioned, DF_REF_REGNO (link->ref));
}
}
}
static void
livethrough_conflicts_bb (basic_block bb)
{
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
rtx insn;
bitmap all_defs;
int first, use_id;
unsigned int deaths = 0;
unsigned int contains_call = 0;
if ((first = bitmap_first_set_bit (info->live_throughout)) < 0)
return;
all_defs = BITMAP_XMALLOC ();
for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
{
unsigned int n;
struct ra_insn_info info;
info = insn_df[INSN_UID (insn)];
for (n = 0; n < info.num_defs; n++)
bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
deaths++;
if (CALL_P (insn))
contains_call = 1;
}
if (insn == BB_END (bb))
break;
}
if (deaths > 0
|| contains_call
|| bitmap_first_set_bit (all_defs) >= 0)
{
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id, bi)
{
struct web_part *wp = &web_parts[df->def_id + use_id];
unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
bitmap conflicts;
wp = find_web_part (wp);
wp->spanned_deaths += deaths;
wp->crosses_call |= contains_call;
conflicts = get_sub_conflicts (wp, bl);
bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
}
}
BITMAP_XFREE (all_defs);
}
static void
init_bb_info (void)
{
basic_block bb;
FOR_ALL_BB (bb)
{
struct ra_bb_info *info = xcalloc (1, sizeof *info);
info->regnos_mentioned = BITMAP_XMALLOC ();
info->live_throughout = BITMAP_XMALLOC ();
info->old_aux = bb->aux;
bb->aux = (void *) info;
}
}
static void
free_bb_info (void)
{
basic_block bb;
FOR_ALL_BB (bb)
{
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
BITMAP_XFREE (info->regnos_mentioned);
BITMAP_XFREE (info->live_throughout);
bb->aux = info->old_aux;
free (info);
}
}
static void
build_web_parts_and_conflicts (struct df *df)
{
struct df_link *link;
struct curr_use use;
basic_block bb;
number_seen = xcalloc (get_max_uid (), sizeof (int));
visit_trace = xcalloc (get_max_uid (), sizeof (visit_trace[0]));
update_regnos_mentioned ();
visited_pass = 0;
for (use.regno = 0; use.regno < (unsigned int)max_regno; use.regno++)
if (use.regno >= FIRST_PSEUDO_REGISTER || !fixed_regs[use.regno])
for (link = df->regs[use.regno].uses; link; link = link->next)
if (link->ref)
{
struct ref *ref = link->ref;
rtx insn = DF_REF_INSN (ref);
if (use.regno >= FIRST_PSEUDO_REGISTER
&& DF_REF_ID (ref) < last_use_id
&& !TEST_BIT (last_check_uses, DF_REF_ID (ref)))
continue;
use.wp = &web_parts[df->def_id + DF_REF_ID (ref)];
use.x = DF_REF_REG (ref);
use.live_over_abnormal = 0;
use.undefined = rtx_to_undefined (use.x);
visited_pass++;
live_in (df, &use, insn);
if (use.live_over_abnormal)
SET_BIT (live_over_abnormal, DF_REF_ID (ref));
}
dump_number_seen ();
FOR_ALL_BB (bb)
{
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
livethrough_conflicts_bb (bb);
bitmap_zero (info->live_throughout);
info->pass = 0;
}
free (visit_trace);
free (number_seen);
}
static void
connect_rmw_web_parts (struct df *df)
{
unsigned int i;
for (i = 0; i < df->use_id; i++)
{
struct web_part *wp1 = &web_parts[df->def_id + i];
rtx reg;
struct df_link *link;
if (!wp1->ref)
continue;
if (find_web_part (wp1) >= &web_parts[df->def_id])
continue;
reg = DF_REF_REAL_REG (wp1->ref);
link = DF_INSN_DEFS (df, DF_REF_INSN (wp1->ref));
for (; link; link = link->next)
if (reg == DF_REF_REAL_REG (link->ref))
{
struct web_part *wp2 = &web_parts[DF_REF_ID (link->ref)];
union_web_parts (wp1, wp2);
}
}
}
static void
prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode)
{
AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
}
static void
init_one_web_common (struct web *web, rtx reg)
{
gcc_assert (REG_P (reg));
web->regno = REGNO (reg);
web->orig_x = reg;
if (!web->dlink)
{
web->dlink = ra_calloc (sizeof (struct dlist));
DLIST_WEB (web->dlink) = web;
}
web->regclass = reg_class_subunion
[reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)];
web->regclass = reg_preferred_class (web->regno);
if (web->regno < FIRST_PSEUDO_REGISTER)
{
web->color = web->regno;
put_web (web, PRECOLORED);
web->num_conflicts = UINT_MAX;
web->add_hardregs = 0;
CLEAR_HARD_REG_SET (web->usable_regs);
SET_HARD_REG_BIT (web->usable_regs, web->regno);
web->num_freedom = 1;
}
else
{
HARD_REG_SET alternate;
web->color = -1;
put_web (web, INITIAL);
web->add_hardregs =
CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
web->num_conflicts = 0 * web->add_hardregs;
COPY_HARD_REG_SET (web->usable_regs,
reg_class_contents[reg_preferred_class (web->regno)]);
COPY_HARD_REG_SET (alternate,
reg_class_contents[reg_alternate_class (web->regno)]);
IOR_HARD_REG_SET (web->usable_regs, alternate);
AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
prune_hardregs_for_mode (&web->usable_regs,
PSEUDO_REGNO_MODE (web->regno));
#ifdef CANNOT_CHANGE_MODE_CLASS
if (web->mode_changed)
AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
#endif
web->num_freedom = hard_regs_count (web->usable_regs);
web->num_freedom -= web->add_hardregs;
gcc_assert (web->num_freedom);
}
COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
}
static void
init_one_web (struct web *web, rtx reg)
{
memset (web, 0, sizeof (struct web));
init_one_web_common (web, reg);
web->useless_conflicts = BITMAP_XMALLOC ();
}
static void
reinit_one_web (struct web *web, rtx reg)
{
web->old_color = web->color + 1;
init_one_web_common (web, reg);
web->span_deaths = 0;
web->spill_temp = 0;
web->orig_spill_temp = 0;
web->use_my_regs = 0;
web->spill_cost = 0;
web->was_spilled = 0;
web->is_coalesced = 0;
web->artificial = 0;
web->live_over_abnormal = 0;
web->mode_changed = 0;
web->subreg_stripped = 0;
web->move_related = 0;
web->in_load = 0;
web->target_of_spilled_move = 0;
web->num_aliased = 0;
if (web->type == PRECOLORED)
{
web->num_defs = 0;
web->num_uses = 0;
web->orig_spill_cost = 0;
}
CLEAR_HARD_REG_SET (web->bias_colors);
CLEAR_HARD_REG_SET (web->prefer_colors);
web->reg_rtx = NULL;
web->stack_slot = NULL;
web->pattern = NULL;
web->alias = NULL;
gcc_assert (!web->moves);
gcc_assert (web->useless_conflicts);
}
static struct web *
add_subweb (struct web *web, rtx reg)
{
struct web *w;
gcc_assert (GET_CODE (reg) == SUBREG);
w = xmalloc (sizeof (struct web));
*w = *web;
w->orig_x = reg;
w->add_hardregs = CLASS_MAX_NREGS (web->regclass, GET_MODE (reg)) - 1;
w->num_conflicts = 0 * w->add_hardregs;
w->num_defs = 0;
w->num_uses = 0;
w->dlink = NULL;
w->parent_web = web;
w->subreg_next = web->subreg_next;
web->subreg_next = w;
return w;
}
static struct web *
add_subweb_2 (struct web *web, unsigned int size_word)
{
rtx ref_rtx = (web->subreg_next ? web->subreg_next : web)->orig_x;
unsigned int size = BYTE_LENGTH (size_word) * BITS_PER_UNIT;
enum machine_mode mode;
mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
if (mode == BLKmode)
mode = mode_for_size (size, MODE_INT, 0);
gcc_assert (mode != BLKmode);
web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
BYTE_BEGIN (size_word)));
web->artificial = 1;
return web;
}
static void
init_web_parts (struct df *df)
{
int regno;
unsigned int no;
num_webs = 0;
for (no = 0; no < df->def_id; no++)
{
if (df->defs[no])
{
gcc_assert (no >= last_def_id || web_parts[no].ref == df->defs[no]);
web_parts[no].ref = df->defs[no];
if (!web_parts[no].uplink)
num_webs++;
}
else
web_parts[no].ref = NULL;
}
for (no = 0; no < df->use_id; no++)
{
if (df->uses[no])
{
gcc_assert (no >= last_use_id
|| web_parts[no + df->def_id].ref == df->uses[no]);
web_parts[no + df->def_id].ref = df->uses[no];
if (!web_parts[no + df->def_id].uplink)
num_webs++;
}
else
web_parts[no + df->def_id].ref = NULL;
}
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
{
struct web_part *r1 = NULL;
struct df_link *link;
for (link = df->regs[regno].defs; link; link = link->next)
if (link->ref)
{
struct web_part *r2 = &web_parts[DF_REF_ID (link->ref)];
if (!r1)
r1 = r2;
else
r1 = union_web_parts (r1, r2);
}
for (link = df->regs[regno].uses; link; link = link->next)
if (link->ref)
{
struct web_part *r2 = &web_parts[df->def_id
+ DF_REF_ID (link->ref)];
if (!r1)
r1 = r2;
else
r1 = union_web_parts (r1, r2);
}
}
}
static void
copy_conflict_list (struct web *web)
{
struct conflict_link *cl;
gcc_assert (!web->orig_conflict_list);
gcc_assert (!web->have_orig_conflicts);
web->have_orig_conflicts = 1;
for (cl = web->conflict_list; cl; cl = cl->next)
{
struct conflict_link *ncl;
ncl = ra_alloc (sizeof *ncl);
ncl->t = cl->t;
ncl->sub = NULL;
ncl->next = web->orig_conflict_list;
web->orig_conflict_list = ncl;
if (cl->sub)
{
struct sub_conflict *sl, *nsl;
for (sl = cl->sub; sl; sl = sl->next)
{
nsl = ra_alloc (sizeof *nsl);
nsl->s = sl->s;
nsl->t = sl->t;
nsl->next = ncl->sub;
ncl->sub = nsl;
}
}
}
}
static void
add_conflict_edge (struct web *from, struct web *to)
{
if (from->type != PRECOLORED)
{
struct web *pfrom = find_web_for_subweb (from);
struct web *pto = find_web_for_subweb (to);
struct sub_conflict *sl;
struct conflict_link *cl = pfrom->conflict_list;
int may_delete = 1;
if (pfrom == pto)
return;
if (remember_conflicts && !pfrom->have_orig_conflicts)
copy_conflict_list (pfrom);
if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)))
{
cl = ra_alloc (sizeof (*cl));
cl->t = pto;
cl->sub = NULL;
cl->next = pfrom->conflict_list;
pfrom->conflict_list = cl;
if (pto->type != SELECT && pto->type != COALESCED)
pfrom->num_conflicts += 1 + pto->add_hardregs;
SET_BIT (sup_igraph, (pfrom->id * num_webs + pto->id));
may_delete = 0;
}
else
while (cl->t != pto)
cl = cl->next;
if (pfrom != from || pto != to)
{
if (!may_delete || cl->sub != NULL)
{
sl = ra_alloc (sizeof (*sl));
sl->s = from;
sl->t = to;
sl->next = cl->sub;
cl->sub = sl;
}
}
else
cl->sub = NULL;
}
}
void
record_conflict (struct web *web1, struct web *web2)
{
unsigned int id1 = web1->id, id2 = web2->id;
unsigned int index = igraph_index (id1, id2);
if (web1 == web2 || TEST_BIT (igraph, index))
return;
gcc_assert (id1 != id2);
if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
|| (web2->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web2->regno]))
return;
if ((web1->type == PRECOLORED
&& ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno))
|| (web2->type == PRECOLORED
&& ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno)))
return;
if (web1->type != PRECOLORED && web2->type != PRECOLORED
&& ! hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs))
{
struct web *p1 = find_web_for_subweb (web1);
struct web *p2 = find_web_for_subweb (web2);
bitmap_set_bit (p1->useless_conflicts, p2->id);
bitmap_set_bit (p2->useless_conflicts, p1->id);
return;
}
SET_BIT (igraph, index);
add_conflict_edge (web1, web2);
add_conflict_edge (web2, web1);
}
static void
build_inverse_webs (struct web *web)
{
struct web *sweb = web->subreg_next;
unsigned HOST_WIDE_INT undef;
undef = rtx_to_undefined (web->orig_x);
for (; sweb; sweb = sweb->subreg_next)
if (!sweb->artificial)
{
unsigned HOST_WIDE_INT bits;
bits = undef & ~ rtx_to_undefined (sweb->orig_x);
while (bits)
{
unsigned int size_word = undef_to_size_word (web->orig_x, &bits);
if (!find_subweb_2 (web, size_word))
add_subweb_2 (web, size_word);
}
}
}
static void
copy_web (struct web *web, struct web_link **wl)
{
struct web *cweb = xmalloc (sizeof *cweb);
struct web_link *link = ra_alloc (sizeof *link);
link->next = *wl;
*wl = link;
link->web = cweb;
*cweb = *web;
}
static void
compare_and_free_webs (struct web_link **link)
{
struct web_link *wl;
for (wl = *link; wl; wl = wl->next)
{
struct web *web1 = wl->web;
struct web *web2 = ID2WEB (web1->id);
gcc_assert (web1->regno == web2->regno);
gcc_assert (web1->mode_changed == web2->mode_changed);
gcc_assert (rtx_equal_p (web1->orig_x, web2->orig_x));
gcc_assert (web1->type == web2->type);
if (web1->type != PRECOLORED)
{
unsigned int i;
gcc_assert (web1->num_uses == web2->num_uses);
gcc_assert (web1->num_defs == web2->num_defs);
gcc_assert (web1->crosses_call == web2->crosses_call);
gcc_assert (web1->live_over_abnormal == web2->live_over_abnormal);
for (i = 0; i < web1->num_defs; i++)
gcc_assert (web1->defs[i] == web2->defs[i]);
for (i = 0; i < web1->num_uses; i++)
gcc_assert (web1->uses[i] == web2->uses[i]);
}
if (web1->type == PRECOLORED)
{
if (web1->defs)
free (web1->defs);
if (web1->uses)
free (web1->uses);
}
free (web1);
}
*link = NULL;
}
static void
init_webs_defs_uses (void)
{
struct dlist *d;
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
unsigned int def_i, use_i;
struct df_link *link;
if (web->old_web)
continue;
if (web->type == PRECOLORED)
{
web->num_defs = web->num_uses = 0;
continue;
}
if (web->num_defs)
web->defs = xmalloc (web->num_defs * sizeof (web->defs[0]));
if (web->num_uses)
web->uses = xmalloc (web->num_uses * sizeof (web->uses[0]));
def_i = use_i = 0;
for (link = web->temp_refs; link; link = link->next)
{
if (DF_REF_REG_DEF_P (link->ref))
web->defs[def_i++] = link->ref;
else
web->uses[use_i++] = link->ref;
}
web->temp_refs = NULL;
gcc_assert (def_i == web->num_defs);
gcc_assert (use_i == web->num_uses);
}
}
static unsigned int
parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
struct df_link *all_refs)
{
unsigned int i;
unsigned int webnum;
unsigned int def_id = df->def_id;
unsigned int use_id = df->use_id;
struct web_part *wp_first_use = &web_parts[def_id];
webnum = 0;
for (i = 0; i < def_id + use_id; i++)
{
struct web *subweb, *web = 0;
struct web_part *wp = &web_parts[i];
struct ref *ref = wp->ref;
unsigned int ref_id;
rtx reg;
if (!ref)
continue;
ref_id = i;
if (i >= def_id)
ref_id -= def_id;
all_refs[i].ref = ref;
reg = DF_REF_REG (ref);
if (! wp->uplink)
{
unsigned int newid = ~(unsigned)0;
unsigned int old_web = 0;
if (ra_pass == 1)
{
web = xmalloc (sizeof (struct web));
newid = last_num_webs++;
init_one_web (web, GET_CODE (reg) == SUBREG
? SUBREG_REG (reg) : reg);
}
else
{
web = def2web[i];
if (!web && DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER)
web = hardreg2web[DF_REF_REGNO (ref)];
if (web)
{
web = find_web_for_subweb (web);
remove_list (web->dlink, &WEBS(INITIAL));
old_web = 1;
copy_web (web, copy_webs);
}
else
{
if (WEBS(FREE))
web = DLIST_WEB (pop_list (&WEBS(FREE)));
else
{
web = xmalloc (sizeof (struct web));
newid = last_num_webs++;
}
}
if (newid == ~(unsigned)0)
newid = web->id;
if (old_web)
reinit_one_web (web, GET_CODE (reg) == SUBREG
? SUBREG_REG (reg) : reg);
else
init_one_web (web, GET_CODE (reg) == SUBREG
? SUBREG_REG (reg) : reg);
web->old_web = (old_web && web->type != PRECOLORED) ? 1 : 0;
}
web->span_deaths = wp->spanned_deaths;
web->crosses_call = wp->crosses_call;
web->id = newid;
web->temp_refs = NULL;
webnum++;
if (web->regno < FIRST_PSEUDO_REGISTER)
{
if (!hardreg2web[web->regno])
hardreg2web[web->regno] = web;
else
gcc_assert (hardreg2web[web->regno] == web);
}
}
if (def2web[i] != NULL)
{
web = def2web[i];
web = find_web_for_subweb (web);
if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
&& web->regno >= FIRST_PSEUDO_REGISTER)
web->mode_changed = 1;
if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
&& web->regno >= FIRST_PSEUDO_REGISTER)
web->subreg_stripped = 1;
if (i >= def_id
&& TEST_BIT (live_over_abnormal, ref_id))
web->live_over_abnormal = 1;
gcc_assert (web->old_web);
gcc_assert (web->type != PRECOLORED);
continue;
}
if (wp->uplink)
{
struct web_part *rwp = find_web_part (wp);
unsigned int j = DF_REF_ID (rwp->ref);
if (rwp < wp_first_use)
web = def2web[j];
else
web = use2web[j];
web = find_web_for_subweb (web);
}
all_refs[i].next = web->temp_refs;
web->temp_refs = &all_refs[i];
gcc_assert (!web->old_web || web->type == PRECOLORED);
if (GET_CODE (reg) == SUBREG)
{
subweb = find_subweb (web, reg);
if (!subweb)
{
subweb = add_subweb (web, reg);
gcc_assert (!web->old_web);
}
}
else
subweb = web;
if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
&& web->regno >= FIRST_PSEUDO_REGISTER)
web->mode_changed = 1;
if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
&& web->regno >= FIRST_PSEUDO_REGISTER)
web->subreg_stripped = 1;
if (i < def_id)
{
if (ra_pass > 1)
{
struct web *compare = def2web[i];
if (i < last_def_id)
gcc_assert (!web->old_web || compare == subweb);
gcc_assert (web->old_web || !compare);
gcc_assert (!compare || compare == subweb);
}
def2web[i] = subweb;
web->num_defs++;
}
else
{
if (ra_pass > 1)
{
struct web *compare = use2web[ref_id];
gcc_assert (ref_id >= last_use_id
|| !web->old_web || compare == subweb);
gcc_assert (web->old_web || !compare);
gcc_assert (!compare || compare == subweb);
}
use2web[ref_id] = subweb;
web->num_uses++;
if (TEST_BIT (live_over_abnormal, ref_id))
web->live_over_abnormal = 1;
}
}
gcc_assert (webnum == num_webs);
return webnum;
}
static void
parts_to_webs (struct df *df)
{
unsigned int i;
unsigned int webnum;
struct web_link *copy_webs = NULL;
struct dlist *d;
struct df_link *all_refs;
num_subwebs = 0;
all_refs = xcalloc (df->def_id + df->use_id, sizeof (all_refs[0]));
webnum = parts_to_webs_1 (df, ©_webs, all_refs);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (!hardreg2web[i])
{
struct web *web = xmalloc (sizeof (struct web));
init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i));
web->id = last_num_webs++;
hardreg2web[web->regno] = web;
}
num_webs = last_num_webs;
for (i = 0; i < df->def_id + df->use_id; i++)
{
struct web_part *wp = &web_parts[i];
struct tagged_conflict *cl;
struct web *web;
if (wp->uplink || !wp->ref)
{
gcc_assert (!wp->sub_conflicts);
continue;
}
web = def2web[i];
web = find_web_for_subweb (web);
for (cl = wp->sub_conflicts; cl; cl = cl->next)
if (!find_subweb_2 (web, cl->size_word))
add_subweb_2 (web, cl->size_word);
}
webnum = last_num_webs;
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
if (web->subreg_next)
{
struct web *sweb;
build_inverse_webs (web);
for (sweb = web->subreg_next; sweb; sweb = sweb->subreg_next)
sweb->id = webnum++;
}
}
id2web = xcalloc (webnum, sizeof (id2web[0]));
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
ID2WEB (web->id) = web;
for (web = web->subreg_next; web; web = web->subreg_next)
ID2WEB (web->id) = web;
}
num_subwebs = webnum - last_num_webs;
num_allwebs = num_webs + num_subwebs;
num_webs += num_subwebs;
igraph = sbitmap_alloc (num_webs * num_webs / 2);
sup_igraph = sbitmap_alloc (num_webs * num_webs);
sbitmap_zero (igraph);
sbitmap_zero (sup_igraph);
init_webs_defs_uses ();
compare_and_free_webs (©_webs);
free (all_refs);
}
static void
reset_conflicts (void)
{
unsigned int i;
bitmap newwebs = BITMAP_XMALLOC ();
for (i = 0; i < num_webs - num_subwebs; i++)
{
struct web *web = ID2WEB (i);
if (web->type == PRECOLORED || !web->old_web)
bitmap_set_bit (newwebs, web->id);
}
for (i = 0; i < num_webs - num_subwebs; i++)
{
struct web *web = ID2WEB (i);
struct conflict_link *cl;
struct conflict_link **pcl;
pcl = &(web->conflict_list);
if (web->have_orig_conflicts)
{
web->conflict_list = web->orig_conflict_list;
web->orig_conflict_list = NULL;
}
gcc_assert (!web->orig_conflict_list);
if (web->type != PRECOLORED && !web->old_web)
{
*pcl = NULL;
gcc_assert (bitmap_first_set_bit (web->useless_conflicts) < 0);
}
else
{
bitmap_operation (web->useless_conflicts, web->useless_conflicts,
newwebs, BITMAP_AND_COMPL);
for (cl = web->conflict_list; cl; cl = cl->next)
{
if (cl->t->old_web || cl->t->type == PRECOLORED)
{
*pcl = cl;
pcl = &(cl->next);
web->num_conflicts += 1 + cl->t->add_hardregs;
SET_BIT (sup_igraph, (web->id * num_webs + cl->t->id));
if (!cl->sub)
SET_BIT (igraph, igraph_index (web->id, cl->t->id));
else
{
struct sub_conflict *sl;
for (sl = cl->sub; sl; sl = sl->next)
SET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
}
}
}
*pcl = NULL;
}
web->have_orig_conflicts = 0;
}
BITMAP_XFREE (newwebs);
}
#if 0
static void
check_conflict_numbers (void)
{
unsigned int i;
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
int new_conf = 0 * web->add_hardregs;
struct conflict_link *cl;
for (cl = web->conflict_list; cl; cl = cl->next)
if (cl->t->type != SELECT && cl->t->type != COALESCED)
new_conf += 1 + cl->t->add_hardregs;
gcc_assert (web->type == PRECOLORED || new_conf == web->num_conflicts);
}
}
#endif
static void
conflicts_between_webs (struct df *df)
{
unsigned int i;
struct dlist *d;
bitmap ignore_defs = BITMAP_XMALLOC ();
unsigned int have_ignored;
unsigned int *pass_cache = xcalloc (num_webs, sizeof (int));
unsigned int pass = 0;
if (ra_pass > 1)
reset_conflicts ();
for (i = 0; i < df->def_id; i++)
if (web_parts[i].ref == NULL)
bitmap_set_bit (ignore_defs, i);
have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0);
for (i = 0; i < df->def_id + df->use_id; i++)
{
struct tagged_conflict *cl = web_parts[i].sub_conflicts;
struct web *supweb1;
if (!cl
|| (i >= df->def_id
&& DF_REF_REGNO (web_parts[i].ref) >= FIRST_PSEUDO_REGISTER))
continue;
supweb1 = def2web[i];
supweb1 = find_web_for_subweb (supweb1);
for (; cl; cl = cl->next)
if (cl->conflicts)
{
int j;
struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
bitmap_iterator bi;
if (have_ignored)
bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
BITMAP_AND_COMPL);
pass++;
EXECUTE_IF_SET_IN_BITMAP (cl->conflicts, 0, j, bi)
{
struct web *web2 = def2web[j];
unsigned int id2 = web2->id;
if (pass_cache[id2] != pass)
{
pass_cache[id2] = pass;
record_conflict (web1, web2);
}
}
}
}
free (pass_cache);
BITMAP_XFREE (ignore_defs);
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
int j;
if (web->crosses_call)
for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, j))
record_conflict (web, hardreg2web[j]);
#ifdef STACK_REGS
if (web->live_over_abnormal)
for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++)
record_conflict (web, hardreg2web[j]);
#endif
}
}
static void
remember_web_was_spilled (struct web *web)
{
int i;
unsigned int found_size = 0;
int adjust;
web->spill_temp = 1;
web->use_my_regs = 1;
if (web->regno >= max_normal_pseudo)
{
COPY_HARD_REG_SET (web->usable_regs,
reg_class_contents[reg_preferred_class (web->regno)]);
IOR_HARD_REG_SET (web->usable_regs,
reg_class_contents[reg_alternate_class (web->regno)]);
}
else
#ifdef TARGET_POWERPC
COPY_HARD_REG_SET (web->usable_regs,
reg_class_contents[(int) NON_SPECIAL_REGS]);
#else
COPY_HARD_REG_SET (web->usable_regs,
reg_class_contents[(int) GENERAL_REGS]);
#endif
AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno));
#ifdef CANNOT_CHANGE_MODE_CLASS
if (web->mode_changed)
AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
#endif
web->num_freedom = hard_regs_count (web->usable_regs);
gcc_assert (web->num_freedom);
COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
web->regclass = NO_REGS;
for (i = (int) ALL_REGS - 1; i > 0; i--)
{
unsigned int size;
HARD_REG_SET test;
COPY_HARD_REG_SET (test, reg_class_contents[i]);
AND_COMPL_HARD_REG_SET (test, never_use_colors);
GO_IF_HARD_REG_SUBSET (test, web->usable_regs, found);
continue;
found:
size = hard_regs_count (test);
if (found_size < size)
{
web->regclass = (enum reg_class) i;
found_size = size;
}
}
adjust = 0 * web->add_hardregs;
web->add_hardregs =
CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
web->num_freedom -= web->add_hardregs;
gcc_assert (web->num_freedom);
adjust -= 0 * web->add_hardregs;
web->num_conflicts -= adjust;
}
static void
detect_spill_temps (void)
{
struct dlist *d;
bitmap already = BITMAP_XMALLOC ();
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
if (web->regno < FIRST_PSEUDO_REGISTER)
continue;
if (web->num_defs == 0)
continue;
if (web->num_uses == 0)
web->spill_temp = 3;
else if (web->changed)
web->spill_temp = 1;
else
{
unsigned int i;
int spill_involved = 0;
for (i = 0; i < web->num_uses && !spill_involved; i++)
if (DF_REF_INSN_UID (web->uses[i]) >= orig_max_uid)
spill_involved = 1;
for (i = 0; i < web->num_defs && !spill_involved; i++)
if (DF_REF_INSN_UID (web->defs[i]) >= orig_max_uid)
spill_involved = 1;
if (spill_involved)
{
int num_deaths = web->span_deaths;
remember_web_was_spilled (web);
bitmap_zero (already);
for (i = 0; i < web->num_defs; i++)
{
rtx insn = web->defs[i]->insn;
if (TEST_BIT (insns_with_deaths, INSN_UID (insn))
&& !bitmap_bit_p (already, INSN_UID (insn)))
{
unsigned int j;
bitmap_set_bit (already, INSN_UID (insn));
for (j = 0; j < web->num_uses; j++)
if (web->uses[j]->insn == insn)
{
num_deaths--;
break;
}
}
}
if (web->crosses_call || num_deaths > 0)
web->spill_temp = 1 * 2;
}
else if (web->span_deaths == 0 && !web->crosses_call)
web->spill_temp = 3;
}
web->orig_spill_temp = web->spill_temp;
}
BITMAP_XFREE (already);
}
int
memref_is_stack_slot (rtx mem)
{
rtx ad = XEXP (mem, 0);
rtx x;
if (GET_CODE (ad) != PLUS || GET_CODE (XEXP (ad, 1)) != CONST_INT)
return 0;
x = XEXP (ad, 0);
if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
|| (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
|| x == stack_pointer_rtx)
return 1;
return 0;
}
static int
contains_pseudo (rtx x)
{
const char *fmt;
int i;
if (GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
if (REG_P (x))
{
if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
return 1;
else
return 0;
}
fmt = GET_RTX_FORMAT (GET_CODE (x));
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
if (fmt[i] == 'e')
{
if (contains_pseudo (XEXP (x, i)))
return 1;
}
else if (fmt[i] == 'E')
{
int j;
for (j = 0; j < XVECLEN (x, i); j++)
if (contains_pseudo (XVECEXP (x, i, j)))
return 1;
}
return 0;
}
static GTY(()) rtx remat_test_insn;
static int
want_to_remat (rtx x)
{
int num_clobbers = 0;
int icode;
if (general_operand (x, GET_MODE (x)))
return 1;
if (remat_test_insn == 0)
{
remat_test_insn
= make_insn_raw (gen_rtx_SET (VOIDmode,
gen_rtx_REG (word_mode,
FIRST_PSEUDO_REGISTER * 2),
const0_rtx));
NEXT_INSN (remat_test_insn) = PREV_INSN (remat_test_insn) = 0;
}
PUT_MODE (SET_DEST (PATTERN (remat_test_insn)), GET_MODE (x));
SET_SRC (PATTERN (remat_test_insn)) = x;
return ((icode = recog (PATTERN (remat_test_insn), remat_test_insn,
&num_clobbers)) >= 0
&& (num_clobbers == 0
));
}
static void
detect_remat_webs (void)
{
struct dlist *d;
for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
unsigned int i;
rtx pat = NULL_RTX;
if (web->regno < FIRST_PSEUDO_REGISTER || !web->num_defs
|| !web->num_uses)
continue;
for (i = 0; i < web->num_defs; i++)
{
rtx insn;
rtx set = single_set (insn = DF_REF_INSN (web->defs[i]));
rtx src;
if (!set)
break;
src = SET_SRC (set);
if (!rtx_equal_p (SET_DEST (set), web->orig_x))
break;
if (pat && !rtx_equal_p (pat, src))
break;
if (pat)
continue;
if ((CONSTANT_P (src)
|| (!rtx_unstable_p (src) && !contains_pseudo (src))
|| (MEM_P (src)
&& INSN_UID (insn) >= orig_max_uid
&& memref_is_stack_slot (src)))
&& want_to_remat (src))
pat = src;
else
break;
}
if (pat && i == web->num_defs)
web->pattern = pat;
}
}
static void
determine_web_costs (void)
{
struct dlist *d;
for (d = WEBS(INITIAL); d; d = d->next)
{
unsigned int i, num_loads;
int load_cost, store_cost;
unsigned HOST_WIDE_INT w;
struct web *web = DLIST_WEB (d);
if (web->type == PRECOLORED)
continue;
if (web->pattern)
{
load_cost = 1 + rtx_cost (web->pattern, 0);
store_cost = 0;
}
else
{
load_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
web->regclass, 1);
store_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
web->regclass, 0);
}
num_loads = MIN (web->span_deaths, web->num_uses);
for (w = 0, i = 0; i < web->num_uses; i++)
w += DF_REF_BB (web->uses[i])->frequency + 1;
if (num_loads < web->num_uses)
w = (w * num_loads + web->num_uses - 1) / web->num_uses;
web->spill_cost = w * load_cost;
if (store_cost)
{
for (w = 0, i = 0; i < web->num_defs; i++)
w += DF_REF_BB (web->defs[i])->frequency + 1;
web->spill_cost += w * store_cost;
}
web->orig_spill_cost = web->spill_cost;
}
}
static void
detect_webs_set_in_cond_jump (void)
{
basic_block bb;
FOR_EACH_BB (bb)
if (JUMP_P (BB_END (bb)))
{
struct df_link *link;
for (link = DF_INSN_DEFS (df, BB_END (bb)); link; link = link->next)
if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
{
struct web *web = def2web[DF_REF_ID (link->ref)];
web->orig_spill_temp = web->spill_temp = 3;
}
}
}
static void
make_webs (struct df *df)
{
parts_to_webs (df);
detect_spill_temps ();
detect_webs_set_in_cond_jump ();
conflicts_between_webs (df);
detect_remat_webs ();
determine_web_costs ();
}
static void
moves_to_webs (struct df *df)
{
struct df_link *link;
struct move_list *ml;
for (ml = wl_moves; ml; ml = ml->next)
{
struct move *m = ml->move;
struct web *web;
struct move_list *newml;
if (!m)
continue;
m->type = WORKLIST;
m->dlink = NULL;
for (link = DF_INSN_DEFS (df, m->insn); link; link = link->next)
if (link->ref)
{
web = def2web[DF_REF_ID (link->ref)];
web = find_web_for_subweb (web);
if (!m->target_web || web->regno < m->target_web->regno)
m->target_web = web;
}
for (link = DF_INSN_USES (df, m->insn); link; link = link->next)
if (link->ref)
{
web = use2web[DF_REF_ID (link->ref)];
web = find_web_for_subweb (web);
if (!m->source_web || web->regno < m->source_web->regno)
m->source_web = web;
}
if (m->source_web && m->target_web
&& hard_regs_intersect_p (&m->source_web->usable_regs,
&m->target_web->usable_regs))
{
if (!flag_ra_optimistic_coalescing)
{
struct move_list *test = m->source_web->moves;
for (; test && test->move != m; test = test->next);
if (! test)
{
newml = ra_alloc (sizeof (struct move_list));
newml->move = m;
newml->next = m->source_web->moves;
m->source_web->moves = newml;
}
test = m->target_web->moves;
for (; test && test->move != m; test = test->next);
if (! test)
{
newml = ra_alloc (sizeof (struct move_list));
newml->move = m;
newml->next = m->target_web->moves;
m->target_web->moves = newml;
}
}
}
else
ml->move = NULL;
}
}
static void
handle_asm_insn (struct df *df, rtx insn)
{
const char *constraints[MAX_RECOG_OPERANDS];
enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
int i, noperands, in_output;
HARD_REG_SET clobbered, allowed, conflict;
rtx pat;
if (! INSN_P (insn)
|| (noperands = asm_noperands (PATTERN (insn))) < 0)
return;
pat = PATTERN (insn);
CLEAR_HARD_REG_SET (clobbered);
if (GET_CODE (pat) == PARALLEL)
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx t = XVECEXP (pat, 0, i);
if (GET_CODE (t) == CLOBBER && REG_P (XEXP (t, 0))
&& REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
}
decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc,
constraints, operand_mode);
in_output = 1;
for (i = 0; i < noperands; i++)
{
const char *p = constraints[i];
int cls = (int) NO_REGS;
struct df_link *link;
rtx reg;
struct web *web;
int nothing_allowed = 1;
reg = recog_data.operand[i];
while (GET_CODE (reg) == SUBREG
|| GET_CODE (reg) == ZERO_EXTRACT
|| GET_CODE (reg) == SIGN_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
continue;
while (1)
{
if (in_output)
link = df->insns[INSN_UID (insn)].defs;
else
link = df->insns[INSN_UID (insn)].uses;
while (link && link->ref && DF_REF_REAL_REG (link->ref) != reg)
link = link->next;
if (!link || !link->ref)
{
gcc_assert (in_output);
in_output = 0;
}
else
break;
}
if (in_output)
web = def2web[DF_REF_ID (link->ref)];
else
web = use2web[DF_REF_ID (link->ref)];
reg = DF_REF_REG (link->ref);
CLEAR_HARD_REG_SET (allowed);
while (1)
{
char c = *p;
if (c == '\0' || c == ',' || c == '#')
{
p++;
IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
if (cls != NO_REGS)
nothing_allowed = 0;
cls = NO_REGS;
if (c == '#')
do {
c = *p++;
} while (c != '\0' && c != ',');
if (c == '\0')
break;
continue;
}
switch (c)
{
case '=': case '+': case '*': case '%': case '?': case '!':
case '0': case '1': case '2': case '3': case '4': case 'm':
case '<': case '>': case 'V': case 'o': case '&': case 'E':
case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
case 'P':
break;
case 'p':
cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
nothing_allowed = 0;
break;
case 'g':
case 'r':
cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
nothing_allowed = 0;
break;
default:
cls =
(int) reg_class_subunion[cls][(int)
REG_CLASS_FROM_CONSTRAINT (c,
p)];
}
p += CONSTRAINT_LEN (c, p);
}
if (nothing_allowed)
{
CLEAR_HARD_REG_SET (conflict);
}
else
{
COPY_HARD_REG_SET (conflict, usable_regs
[reg_preferred_class (web->regno)]);
IOR_HARD_REG_SET (conflict, usable_regs
[reg_alternate_class (web->regno)]);
AND_COMPL_HARD_REG_SET (conflict, allowed);
#if 0
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if (TEST_HARD_REG_BIT (conflict, c))
record_conflict (web, hardreg2web[c]);
#endif
}
if (dump_file)
{
int c;
ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
if (TEST_HARD_REG_BIT (conflict, c))
ra_debug_msg (DUMP_ASM, " %d", c);
ra_debug_msg (DUMP_ASM, "\n");
}
}
}
void
build_i_graph (struct df *df)
{
rtx insn;
init_web_parts (df);
sbitmap_zero (move_handled);
wl_moves = NULL;
build_web_parts_and_conflicts (df);
connect_rmw_web_parts (df);
make_webs (df);
moves_to_webs (df);
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
handle_asm_insn (df, insn);
}
void
ra_build_realloc (struct df *df)
{
struct web_part *last_web_parts = web_parts;
struct web **last_def2web = def2web;
struct web **last_use2web = use2web;
sbitmap last_live_over_abnormal = live_over_abnormal;
unsigned int i;
struct dlist *d;
move_handled = sbitmap_alloc (get_max_uid () );
web_parts = xcalloc (df->def_id + df->use_id, sizeof web_parts[0]);
def2web = xcalloc (df->def_id + df->use_id, sizeof def2web[0]);
use2web = &def2web[df->def_id];
live_over_abnormal = sbitmap_alloc (df->use_id);
sbitmap_zero (live_over_abnormal);
for (i = 0; i < last_def_id + last_use_id; i++)
{
struct web_part *dest = &web_parts[i < last_def_id
? i : (df->def_id + i - last_def_id)];
struct web_part *up;
*dest = last_web_parts[i];
up = dest->uplink;
dest->uplink = NULL;
if (up && up->ref)
{
unsigned int id = DF_REF_ID (up->ref);
if (up < &last_web_parts[last_def_id])
{
if (df->defs[id])
dest->uplink = &web_parts[DF_REF_ID (up->ref)];
}
else if (df->uses[id])
dest->uplink = &web_parts[df->def_id + DF_REF_ID (up->ref)];
}
}
for (i = 0; i < last_def_id; i++)
{
struct web *web = last_def2web[i];
if (web)
{
web = find_web_for_subweb (web);
if (web->type != FREE && web->type != PRECOLORED)
def2web[i] = last_def2web[i];
}
}
for (i = 0; i < last_use_id; i++)
{
struct web *web = last_use2web[i];
if (web)
{
web = find_web_for_subweb (web);
if (web->type != FREE && web->type != PRECOLORED)
use2web[i] = last_use2web[i];
}
if (TEST_BIT (last_live_over_abnormal, i))
SET_BIT (live_over_abnormal, i);
}
for (d = WEBS(FREE); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
struct web *wnext;
for (web = web->subreg_next; web; web = wnext)
{
wnext = web->subreg_next;
free (web);
}
DLIST_WEB (d)->subreg_next = NULL;
}
if (last_check_uses)
sbitmap_difference (live_over_abnormal, live_over_abnormal,
last_check_uses);
if (last_def_id || last_use_id)
{
sbitmap_free (last_live_over_abnormal);
free (last_web_parts);
free (last_def2web);
}
if (!last_max_uid)
{
copy_cache = xcalloc (get_max_uid (), sizeof (copy_cache[0]));
init_bb_info ();
}
else
{
copy_cache = xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
memset (©_cache[last_max_uid], 0,
(get_max_uid () - last_max_uid) * sizeof (copy_cache[0]));
}
}
void
ra_build_free (void)
{
struct dlist *d;
unsigned int i;
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
gcc_assert (web);
gcc_assert (i < num_webs - num_subwebs
|| (!web->conflict_list && !web->orig_conflict_list));
web->moves = NULL;
}
for (d = WEBS(FREE); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
if (web->defs)
free (web->defs);
web->defs = NULL;
if (web->uses)
free (web->uses);
web->uses = NULL;
}
for (i = 0; i < df->def_id + df->use_id; i++)
{
struct tagged_conflict *cl;
for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
{
if (cl->conflicts)
BITMAP_XFREE (cl->conflicts);
}
web_parts[i].sub_conflicts = NULL;
}
wl_moves = NULL;
free (id2web);
free (move_handled);
sbitmap_free (sup_igraph);
sbitmap_free (igraph);
}
void
ra_build_free_all (struct df *df)
{
unsigned int i;
free_bb_info ();
free (copy_cache);
copy_cache = NULL;
for (i = 0; i < df->def_id + df->use_id; i++)
{
struct tagged_conflict *cl;
for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
{
if (cl->conflicts)
BITMAP_XFREE (cl->conflicts);
}
web_parts[i].sub_conflicts = NULL;
}
sbitmap_free (live_over_abnormal);
free (web_parts);
web_parts = NULL;
if (last_check_uses)
sbitmap_free (last_check_uses);
last_check_uses = NULL;
free (def2web);
use2web = NULL;
def2web = NULL;
}
#include "gt-ra-build.h"