#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "toplev.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
#include "insn-config.h"
#include "insn-attr.h"
#include "except.h"
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
#include "target.h"
#include "output.h"
#include "params.h"
#ifdef INSN_SCHEDULING
static int issue_rate;
static int sched_verbose_param = 0;
int sched_verbose = 0;
FILE *sched_dump = 0;
static int old_max_uid;
void
fix_sched_param (const char *param, const char *val)
{
if (!strcmp (param, "verbose"))
sched_verbose_param = atoi (val);
else
warning (0, "fix_sched_param: unknown param: %s", param);
}
struct haifa_insn_data *h_i_d;
#define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
#define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
#define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
#define INVALID_TICK (-(max_insn_queue_index + 1))
#define MIN_TICK (-max_insn_queue_index)
#define ISSUE_POINTS(INSN) 1
static rtx *line_note_head;
static rtx note_list;
static struct spec_info_def spec_info_var;
static spec_info_t spec_info;
static bool added_recovery_block_p;
static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
regset *glat_start, *glat_end;
static rtx *bb_header = 0;
static int old_last_basic_block;
static basic_block before_recovery;
static rtx *insn_queue;
static int q_ptr = 0;
static int q_size = 0;
#define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
#define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
#define QUEUE_SCHEDULED (-3)
#define QUEUE_NOWHERE (-2)
#define QUEUE_READY (-1)
#define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
state_t curr_state;
static size_t dfa_state_size;
static char *ready_try;
struct ready_list
{
rtx *vec;
int veclen;
int first;
int n_ready;
};
static struct ready_list *readyp;
static int clock_var;
static int rgn_n_insns;
static int may_trap_exp (rtx, int);
#define CONST_BASED_ADDRESS_P(x) \
(REG_P (x) \
|| ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
|| (GET_CODE (x) == LO_SUM)) \
&& (CONSTANT_P (XEXP (x, 0)) \
|| CONSTANT_P (XEXP (x, 1)))))
static int
may_trap_exp (rtx x, int is_store)
{
enum rtx_code code;
if (x == 0)
return TRAP_FREE;
code = GET_CODE (x);
if (is_store)
{
if (code == MEM && may_trap_p (x))
return TRAP_RISKY;
else
return TRAP_FREE;
}
if (code == MEM)
{
if (MEM_VOLATILE_P (x))
return IRISKY;
if (!may_trap_p (x))
return IFREE;
if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
return PFREE_CANDIDATE;
return PRISKY_CANDIDATE;
}
else
{
const char *fmt;
int i, insn_class = TRAP_FREE;
if (may_trap_p (x))
return TRAP_RISKY;
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
{
int tmp_class = may_trap_exp (XEXP (x, i), is_store);
insn_class = WORST_CLASS (insn_class, tmp_class);
}
else if (fmt[i] == 'E')
{
int j;
for (j = 0; j < XVECLEN (x, i); j++)
{
int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
insn_class = WORST_CLASS (insn_class, tmp_class);
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
break;
}
}
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
break;
}
return insn_class;
}
}
int
haifa_classify_insn (rtx insn)
{
rtx pat = PATTERN (insn);
int tmp_class = TRAP_FREE;
int insn_class = TRAP_FREE;
enum rtx_code code;
if (GET_CODE (pat) == PARALLEL)
{
int i, len = XVECLEN (pat, 0);
for (i = len - 1; i >= 0; i--)
{
code = GET_CODE (XVECEXP (pat, 0, i));
switch (code)
{
case CLOBBER:
tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
break;
case SET:
tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
if (tmp_class == TRAP_RISKY)
break;
tmp_class
= WORST_CLASS (tmp_class,
may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
0));
break;
case COND_EXEC:
case TRAP_IF:
tmp_class = TRAP_RISKY;
break;
default:
;
}
insn_class = WORST_CLASS (insn_class, tmp_class);
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
break;
}
}
else
{
code = GET_CODE (pat);
switch (code)
{
case CLOBBER:
tmp_class = may_trap_exp (XEXP (pat, 0), 1);
break;
case SET:
tmp_class = may_trap_exp (SET_DEST (pat), 1);
if (tmp_class == TRAP_RISKY)
break;
tmp_class =
WORST_CLASS (tmp_class,
may_trap_exp (SET_SRC (pat), 0));
break;
case COND_EXEC:
case TRAP_IF:
tmp_class = TRAP_RISKY;
break;
default:;
}
insn_class = tmp_class;
}
return insn_class;
}
HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
static int priority (rtx);
static int rank_for_schedule (const void *, const void *);
static void swap_sort (rtx *, int);
static void queue_insn (rtx, int);
static int schedule_insn (rtx);
static int find_set_reg_weight (rtx);
static void find_insn_reg_weight (basic_block);
static void find_insn_reg_weight1 (rtx);
static void adjust_priority (rtx);
static void advance_one_cycle (void);
static rtx unlink_other_notes (rtx, rtx);
static rtx unlink_line_notes (rtx, rtx);
static void reemit_notes (rtx);
static rtx *ready_lastpos (struct ready_list *);
static void ready_add (struct ready_list *, rtx, bool);
static void ready_sort (struct ready_list *);
static rtx ready_remove_first (struct ready_list *);
static void queue_to_ready (struct ready_list *);
static int early_queue_to_ready (state_t, struct ready_list *);
static void debug_ready_list (struct ready_list *);
static void move_insn (rtx);
static rtx ready_element (struct ready_list *, int);
static rtx ready_remove (struct ready_list *, int);
static void ready_remove_insn (rtx);
static int max_issue (struct ready_list *, int *, int);
static rtx choose_ready (struct ready_list *);
static void fix_inter_tick (rtx, rtx);
static int fix_tick_ready (rtx);
static void change_queue_index (rtx, int);
static void resolve_dep (rtx, rtx);
static void extend_h_i_d (void);
static void extend_ready (int);
static void extend_global (rtx);
static void extend_all (rtx);
static void init_h_i_d (rtx);
static void generate_recovery_code (rtx);
static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
static void begin_speculative_block (rtx);
static void add_to_speculative_block (rtx);
static dw_t dep_weak (ds_t);
static edge find_fallthru_edge (basic_block);
static void init_before_recovery (void);
static basic_block create_recovery_block (void);
static void create_check_block_twin (rtx, bool);
static void fix_recovery_deps (basic_block);
static void associate_line_notes_with_blocks (basic_block);
static void change_pattern (rtx, rtx);
static int speculate_insn (rtx, ds_t, rtx *);
static void dump_new_block_header (int, basic_block, rtx, rtx);
static void restore_bb_notes (basic_block);
static void extend_bb (basic_block);
static void fix_jump_move (rtx);
static void move_block_after_check (rtx);
static void move_succs (VEC(edge,gc) **, basic_block);
static void init_glat (void);
static void init_glat1 (basic_block);
static void attach_life_info1 (basic_block);
static void free_glat (void);
static void sched_remove_insn (rtx);
static void clear_priorities (rtx);
static void add_jump_dependencies (rtx, rtx);
static void calc_priorities (rtx);
#ifdef ENABLE_CHECKING
static int has_edge_p (VEC(edge,gc) *, int);
static void check_cfg (rtx, rtx);
static void check_sched_flags (void);
#endif
#endif
struct sched_info *current_sched_info;
#ifndef INSN_SCHEDULING
void
schedule_insns (void)
{
}
#else
static struct sched_info current_sched_info_var;
static rtx last_scheduled_insn;
HAIFA_INLINE int
insn_cost (rtx insn, rtx link, rtx used)
{
return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
link, used);
}
HAIFA_INLINE static int
insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
{
int cost = INSN_COST (insn);
if (cost < 0)
{
if (recog_memoized (insn) < 0)
{
INSN_COST (insn) = 0;
return 0;
}
else
{
cost = insn_default_latency (insn);
if (cost < 0)
cost = 0;
INSN_COST (insn) = cost;
}
}
if (used == 0)
return cost;
if (recog_memoized (used) < 0)
cost = 0;
else
{
gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
if (INSN_CODE (insn) >= 0)
{
if (dep_type == REG_DEP_ANTI)
cost = 0;
else if (dep_type == REG_DEP_OUTPUT)
{
cost = (insn_default_latency (insn)
- insn_default_latency (used));
if (cost <= 0)
cost = 1;
}
else if (bypass_p (insn))
cost = insn_latency (insn, used);
}
if (targetm.sched.adjust_cost_2)
cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
else
{
gcc_assert (link);
if (targetm.sched.adjust_cost)
cost = targetm.sched.adjust_cost (used, link, insn, cost);
}
if (cost < 0)
cost = 0;
}
return cost;
}
static int
priority (rtx insn)
{
rtx link;
if (! INSN_P (insn))
return 0;
if (! INSN_PRIORITY_KNOWN (insn))
{
int this_priority = 0;
if (INSN_DEPEND (insn) == 0)
this_priority = insn_cost (insn, 0, 0);
else
{
rtx prev_first, twin;
basic_block rec;
rec = RECOVERY_BLOCK (insn);
if (!rec || rec == EXIT_BLOCK_PTR)
{
prev_first = PREV_INSN (insn);
twin = insn;
}
else
{
prev_first = NEXT_INSN (BB_HEAD (rec));
twin = PREV_INSN (BB_END (rec));
}
do
{
for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
{
rtx next;
int next_priority;
next = XEXP (link, 0);
if (BLOCK_FOR_INSN (next) != rec)
{
if (! (*current_sched_info->contributes_to_priority)
(next, insn)
|| ((current_sched_info->flags & DO_SPECULATION)
&& (DEP_STATUS (link) & SPECULATIVE)
&& !(spec_info->flags
& COUNT_SPEC_IN_CRITICAL_PATH)))
continue;
next_priority = insn_cost1 (insn,
twin == insn ?
REG_NOTE_KIND (link) :
REG_DEP_ANTI,
twin == insn ? link : 0,
next) + priority (next);
if (next_priority > this_priority)
this_priority = next_priority;
}
}
twin = PREV_INSN (twin);
}
while (twin != prev_first);
}
INSN_PRIORITY (insn) = this_priority;
INSN_PRIORITY_KNOWN (insn) = 1;
}
return INSN_PRIORITY (insn);
}
#define SCHED_SORT(READY, N_READY) \
do { if ((N_READY) == 2) \
swap_sort (READY, N_READY); \
else if ((N_READY) > 2) \
qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
while (0)
static int
rank_for_schedule (const void *x, const void *y)
{
rtx tmp = *(const rtx *) y;
rtx tmp2 = *(const rtx *) x;
rtx link;
int tmp_class, tmp2_class, depend_count1, depend_count2;
int val, priority_val, weight_val, info_val;
if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
return SCHED_GROUP_P (tmp2) ? 1 : -1;
priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
if (priority_val)
return priority_val;
if (spec_info)
{
ds_t ds1, ds2;
dw_t dw1, dw2;
int dw;
ds1 = TODO_SPEC (tmp) & SPECULATIVE;
if (ds1)
dw1 = dep_weak (ds1);
else
dw1 = NO_DEP_WEAK;
ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
if (ds2)
dw2 = dep_weak (ds2);
else
dw2 = NO_DEP_WEAK;
dw = dw2 - dw1;
if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
return dw;
}
if (!reload_completed &&
(weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
return weight_val;
info_val = (*current_sched_info->rank) (tmp, tmp2);
if (info_val)
return info_val;
if (INSN_P (last_scheduled_insn))
{
link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
tmp_class = 3;
else if (REG_NOTE_KIND (link) == 0)
tmp_class = 1;
else
tmp_class = 2;
link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
tmp2_class = 3;
else if (REG_NOTE_KIND (link) == 0)
tmp2_class = 1;
else
tmp2_class = 2;
if ((val = tmp2_class - tmp_class))
return val;
}
depend_count1 = 0;
for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
depend_count1++;
depend_count2 = 0;
for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
depend_count2++;
val = depend_count2 - depend_count1;
if (val)
return val;
return INSN_LUID (tmp) - INSN_LUID (tmp2);
}
HAIFA_INLINE static void
swap_sort (rtx *a, int n)
{
rtx insn = a[n - 1];
int i = n - 2;
while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
{
a[i + 1] = a[i];
i -= 1;
}
a[i + 1] = insn;
}
HAIFA_INLINE static void
queue_insn (rtx insn, int n_cycles)
{
int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
gcc_assert (n_cycles <= max_insn_queue_index);
insn_queue[next_q] = link;
q_size += 1;
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
(*current_sched_info->print_insn) (insn, 0));
fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
}
QUEUE_INDEX (insn) = next_q;
}
static void
queue_remove (rtx insn)
{
gcc_assert (QUEUE_INDEX (insn) >= 0);
remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
q_size--;
QUEUE_INDEX (insn) = QUEUE_NOWHERE;
}
HAIFA_INLINE static rtx *
ready_lastpos (struct ready_list *ready)
{
gcc_assert (ready->n_ready >= 1);
return ready->vec + ready->first - ready->n_ready + 1;
}
HAIFA_INLINE static void
ready_add (struct ready_list *ready, rtx insn, bool first_p)
{
if (!first_p)
{
if (ready->first == ready->n_ready)
{
memmove (ready->vec + ready->veclen - ready->n_ready,
ready_lastpos (ready),
ready->n_ready * sizeof (rtx));
ready->first = ready->veclen - 1;
}
ready->vec[ready->first - ready->n_ready] = insn;
}
else
{
if (ready->first == ready->veclen - 1)
{
if (ready->n_ready)
memmove (ready->vec + ready->veclen - ready->n_ready - 1,
ready_lastpos (ready),
ready->n_ready * sizeof (rtx));
ready->first = ready->veclen - 2;
}
ready->vec[++(ready->first)] = insn;
}
ready->n_ready++;
gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
QUEUE_INDEX (insn) = QUEUE_READY;
}
HAIFA_INLINE static rtx
ready_remove_first (struct ready_list *ready)
{
rtx t;
gcc_assert (ready->n_ready);
t = ready->vec[ready->first--];
ready->n_ready--;
if (ready->n_ready == 0)
ready->first = ready->veclen - 1;
gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
QUEUE_INDEX (t) = QUEUE_NOWHERE;
return t;
}
HAIFA_INLINE static rtx
ready_element (struct ready_list *ready, int index)
{
gcc_assert (ready->n_ready && index < ready->n_ready);
return ready->vec[ready->first - index];
}
HAIFA_INLINE static rtx
ready_remove (struct ready_list *ready, int index)
{
rtx t;
int i;
if (index == 0)
return ready_remove_first (ready);
gcc_assert (ready->n_ready && index < ready->n_ready);
t = ready->vec[ready->first - index];
ready->n_ready--;
for (i = index; i < ready->n_ready; i++)
ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
QUEUE_INDEX (t) = QUEUE_NOWHERE;
return t;
}
static void
ready_remove_insn (rtx insn)
{
int i;
for (i = 0; i < readyp->n_ready; i++)
if (ready_element (readyp, i) == insn)
{
ready_remove (readyp, i);
return;
}
gcc_unreachable ();
}
HAIFA_INLINE static void
ready_sort (struct ready_list *ready)
{
rtx *first = ready_lastpos (ready);
SCHED_SORT (first, ready->n_ready);
}
HAIFA_INLINE static void
adjust_priority (rtx prev)
{
if (targetm.sched.adjust_priority)
INSN_PRIORITY (prev) =
targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
}
HAIFA_INLINE static void
advance_one_cycle (void)
{
if (targetm.sched.dfa_pre_cycle_insn)
state_transition (curr_state,
targetm.sched.dfa_pre_cycle_insn ());
state_transition (curr_state, NULL);
if (targetm.sched.dfa_post_cycle_insn)
state_transition (curr_state,
targetm.sched.dfa_post_cycle_insn ());
}
static int last_clock_var;
static int
schedule_insn (rtx insn)
{
rtx link;
int advance = 0;
if (sched_verbose >= 1)
{
char buf[2048];
print_insn (buf, insn, 0);
buf[40] = 0;
fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
if (recog_memoized (insn) < 0)
fprintf (sched_dump, "nothing");
else
print_reservation (sched_dump, insn);
fputc ('\n', sched_dump);
}
gcc_assert (INSN_DEP_COUNT (insn) == 0);
gcc_assert (!LOG_LINKS (insn));
gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
if (current_sched_info->flags & USE_DEPS_LIST)
free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
else
free_INSN_LIST_list (&RESOLVED_DEPS (insn));
gcc_assert (INSN_TICK (insn) >= MIN_TICK);
if (INSN_TICK (insn) > clock_var)
gcc_assert (flag_sched_stalled_insns);
INSN_TICK (insn) = clock_var;
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
{
rtx next = XEXP (link, 0);
resolve_dep (next, insn);
if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
{
int effective_cost;
effective_cost = try_ready (next);
if (effective_cost >= 0
&& SCHED_GROUP_P (next)
&& advance < effective_cost)
advance = effective_cost;
}
else
{
gcc_assert (XEXP (link, 1) == 0);
fix_recovery_deps (RECOVERY_BLOCK (insn));
}
}
if (issue_rate > 1
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
{
if (reload_completed)
PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
last_clock_var = clock_var;
}
return advance;
}
static rtx
unlink_other_notes (rtx insn, rtx tail)
{
rtx prev = PREV_INSN (insn);
while (insn != tail && NOTE_NOT_BB_P (insn))
{
rtx next = NEXT_INSN (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
if (prev)
NEXT_INSN (prev) = next;
if (next)
PREV_INSN (next) = prev;
if (bb)
{
gcc_assert (BB_HEAD (bb) != insn);
if (BB_END (bb) == insn)
BB_END (bb) = prev;
}
if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
{
PREV_INSN (insn) = note_list;
if (note_list)
NEXT_INSN (note_list) = insn;
note_list = insn;
}
insn = next;
}
return insn;
}
static rtx
unlink_line_notes (rtx insn, rtx tail)
{
rtx prev = PREV_INSN (insn);
while (insn != tail && NOTE_NOT_BB_P (insn))
{
rtx next = NEXT_INSN (insn);
if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
{
basic_block bb = BLOCK_FOR_INSN (insn);
if (prev)
NEXT_INSN (prev) = next;
if (next)
PREV_INSN (next) = prev;
if (bb)
{
gcc_assert (BB_HEAD (bb) != insn);
if (BB_END (bb) == insn)
BB_END (bb) = prev;
}
LINE_NOTE (insn) = insn;
}
else
prev = insn;
insn = next;
}
return insn;
}
void
get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
{
rtx beg_head = BB_HEAD (beg);
rtx beg_tail = BB_END (beg);
rtx end_head = BB_HEAD (end);
rtx end_tail = BB_END (end);
if (LABEL_P (beg_head))
beg_head = NEXT_INSN (beg_head);
while (beg_head != beg_tail)
if (NOTE_P (beg_head))
beg_head = NEXT_INSN (beg_head);
else
break;
*headp = beg_head;
if (beg == end)
end_head = beg_head;
else if (LABEL_P (end_head))
end_head = NEXT_INSN (end_head);
while (end_head != end_tail)
if (NOTE_P (end_tail))
end_tail = PREV_INSN (end_tail);
else
break;
*tailp = end_tail;
}
int
no_real_insns_p (rtx head, rtx tail)
{
while (head != NEXT_INSN (tail))
{
if (!NOTE_P (head) && !LABEL_P (head))
return 0;
head = NEXT_INSN (head);
}
return 1;
}
void
rm_line_notes (rtx head, rtx tail)
{
rtx next_tail;
rtx insn;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
rtx prev;
if (NOTE_NOT_BB_P (insn))
{
prev = insn;
insn = unlink_line_notes (insn, next_tail);
gcc_assert (prev != tail && prev != head && insn != next_tail);
}
}
}
void
save_line_notes (int b, rtx head, rtx tail)
{
rtx next_tail;
rtx line = line_note_head[b];
rtx insn;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
line = insn;
else
LINE_NOTE (insn) = line;
}
void
restore_line_notes (rtx head, rtx tail)
{
rtx line, note, prev, new;
int added_notes = 0;
rtx next_tail, insn;
head = head;
next_tail = NEXT_INSN (tail);
for (line = head; line; line = PREV_INSN (line))
if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
break;
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
line = insn;
else if (!NOTE_P (insn)
&& INSN_UID (insn) < old_max_uid
&& (note = LINE_NOTE (insn)) != 0
&& note != line
&& (line == 0
#ifdef USE_MAPPED_LOCATION
|| NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
#else
|| NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
|| NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
#endif
))
{
line = note;
prev = PREV_INSN (insn);
if (LINE_NOTE (note))
{
LINE_NOTE (note) = 0;
PREV_INSN (note) = prev;
NEXT_INSN (prev) = note;
PREV_INSN (insn) = note;
NEXT_INSN (note) = insn;
set_block_for_insn (note, BLOCK_FOR_INSN (insn));
}
else
{
added_notes++;
new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
#ifndef USE_MAPPED_LOCATION
NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
#endif
}
}
if (sched_verbose && added_notes)
fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
}
void
rm_redundant_line_notes (void)
{
rtx line = 0;
rtx insn = get_insns ();
int active_insn = 0;
int notes = 0;
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
{
if (active_insn == 0)
{
notes++;
SET_INSN_DELETED (insn);
}
else if (line
#ifdef USE_MAPPED_LOCATION
&& NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
#else
&& NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
&& NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
#endif
)
{
notes++;
SET_INSN_DELETED (line);
line = insn;
}
else
line = insn;
active_insn = 0;
}
else if (!((NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
|| (NONJUMP_INSN_P (insn)
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER))))
active_insn++;
if (sched_verbose && notes)
fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
}
void
rm_other_notes (rtx head, rtx tail)
{
rtx next_tail;
rtx insn;
note_list = 0;
if (head == tail && (! INSN_P (head)))
return;
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
rtx prev;
if (NOTE_NOT_BB_P (insn))
{
prev = insn;
insn = unlink_other_notes (insn, next_tail);
gcc_assert (prev != tail && prev != head && insn != next_tail);
}
}
}
static int
find_set_reg_weight (rtx x)
{
if (GET_CODE (x) == CLOBBER
&& register_operand (SET_DEST (x), VOIDmode))
return 1;
if (GET_CODE (x) == SET
&& register_operand (SET_DEST (x), VOIDmode))
{
if (REG_P (SET_DEST (x)))
{
if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
return 1;
else
return 0;
}
return 1;
}
return 0;
}
static void
find_insn_reg_weight (basic_block bb)
{
rtx insn, next_tail, head, tail;
get_ebb_head_tail (bb, bb, &head, &tail);
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
find_insn_reg_weight1 (insn);
}
static void
find_insn_reg_weight1 (rtx insn)
{
int reg_weight = 0;
rtx x;
if (! INSN_P (insn))
return;
x = PATTERN (insn);
reg_weight += find_set_reg_weight (x);
if (GET_CODE (x) == PARALLEL)
{
int j;
for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
{
x = XVECEXP (PATTERN (insn), 0, j);
reg_weight += find_set_reg_weight (x);
}
}
for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
{
if (REG_NOTE_KIND (x) == REG_DEAD
|| REG_NOTE_KIND (x) == REG_UNUSED)
reg_weight--;
}
INSN_REG_WEIGHT (insn) = reg_weight;
}
static void
queue_to_ready (struct ready_list *ready)
{
rtx insn;
rtx link;
q_ptr = NEXT_Q (q_ptr);
for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
{
insn = XEXP (link, 0);
q_size -= 1;
if (sched_verbose >= 2)
fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
(*current_sched_info->print_insn) (insn, 0));
if (!reload_completed
&& ready->n_ready > MAX_SCHED_READY_INSNS
&& !SCHED_GROUP_P (insn))
{
if (sched_verbose >= 2)
fprintf (sched_dump, "requeued because ready full\n");
queue_insn (insn, 1);
}
else
{
ready_add (ready, insn, false);
if (sched_verbose >= 2)
fprintf (sched_dump, "moving to ready without stalls\n");
}
}
free_INSN_LIST_list (&insn_queue[q_ptr]);
if (ready->n_ready == 0)
{
int stalls;
for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
{
if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
{
for (; link; link = XEXP (link, 1))
{
insn = XEXP (link, 0);
q_size -= 1;
if (sched_verbose >= 2)
fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
(*current_sched_info->print_insn) (insn, 0));
ready_add (ready, insn, false);
if (sched_verbose >= 2)
fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
}
free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
advance_one_cycle ();
break;
}
advance_one_cycle ();
}
q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
clock_var += stalls;
}
}
static bool
ok_for_early_queue_removal (rtx insn)
{
int n_cycles;
rtx prev_insn = last_scheduled_insn;
if (targetm.sched.is_costly_dependence)
{
for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
{
for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
{
rtx dep_link = 0;
int dep_cost;
if (!NOTE_P (prev_insn))
{
dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
if (dep_link)
{
dep_cost = insn_cost (prev_insn, dep_link, insn) ;
if (targetm.sched.is_costly_dependence (prev_insn, insn,
dep_link, dep_cost,
flag_sched_stalled_insns_dep - n_cycles))
return false;
}
}
if (GET_MODE (prev_insn) == TImode)
break;
}
if (!prev_insn)
break;
prev_insn = PREV_INSN (prev_insn);
}
}
return true;
}
static int
early_queue_to_ready (state_t state, struct ready_list *ready)
{
rtx insn;
rtx link;
rtx next_link;
rtx prev_link;
bool move_to_ready;
int cost;
state_t temp_state = alloca (dfa_state_size);
int stalls;
int insns_removed = 0;
if (! flag_sched_stalled_insns)
return 0;
for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
{
if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
{
if (sched_verbose > 6)
fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
prev_link = 0;
while (link)
{
next_link = XEXP (link, 1);
insn = XEXP (link, 0);
if (insn && sched_verbose > 6)
print_rtl_single (sched_dump, insn);
memcpy (temp_state, state, dfa_state_size);
if (recog_memoized (insn) < 0)
cost = 0;
else
cost = state_transition (temp_state, insn);
if (sched_verbose >= 6)
fprintf (sched_dump, "transition cost = %d\n", cost);
move_to_ready = false;
if (cost < 0)
{
move_to_ready = ok_for_early_queue_removal (insn);
if (move_to_ready == true)
{
q_size -= 1;
ready_add (ready, insn, false);
if (prev_link)
XEXP (prev_link, 1) = next_link;
else
insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
free_INSN_LIST_node (link);
if (sched_verbose >= 2)
fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
(*current_sched_info->print_insn) (insn, 0));
insns_removed++;
if (insns_removed == flag_sched_stalled_insns)
return insns_removed;
}
}
if (move_to_ready == false)
prev_link = link;
link = next_link;
}
}
}
return insns_removed;
}
static void
debug_ready_list (struct ready_list *ready)
{
rtx *p;
int i;
if (ready->n_ready == 0)
{
fprintf (sched_dump, "\n");
return;
}
p = ready_lastpos (ready);
for (i = 0; i < ready->n_ready; i++)
fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
fprintf (sched_dump, "\n");
}
static void
reemit_notes (rtx insn)
{
rtx note, last = insn;
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
{
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
{
enum insn_note note_type = INTVAL (XEXP (note, 0));
last = emit_note_before (note_type, last);
remove_note (insn, note);
}
}
}
static void
move_insn (rtx insn)
{
rtx last = last_scheduled_insn;
if (PREV_INSN (insn) != last)
{
basic_block bb;
rtx note;
int jump_p = 0;
bb = BLOCK_FOR_INSN (insn);
gcc_assert (BB_HEAD (bb) != insn);
if (BB_END (bb) == insn)
{
jump_p = control_flow_insn_p (insn);
gcc_assert (!jump_p
|| ((current_sched_info->flags & SCHED_RGN)
&& IS_SPECULATION_BRANCHY_CHECK_P (insn))
|| (current_sched_info->flags & SCHED_EBB));
gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
BB_END (bb) = PREV_INSN (insn);
}
gcc_assert (BB_END (bb) != last);
if (jump_p)
{
rtx nt = current_sched_info->next_tail;
note = NEXT_INSN (insn);
while (NOTE_NOT_BB_P (note) && note != nt)
note = NEXT_INSN (note);
if (note != nt
&& (LABEL_P (note)
|| BARRIER_P (note)))
note = NEXT_INSN (note);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
}
else
note = insn;
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
NEXT_INSN (note) = NEXT_INSN (last);
PREV_INSN (NEXT_INSN (last)) = note;
NEXT_INSN (last) = insn;
PREV_INSN (insn) = last;
bb = BLOCK_FOR_INSN (last);
if (jump_p)
{
fix_jump_move (insn);
if (BLOCK_FOR_INSN (insn) != bb)
move_block_after_check (insn);
gcc_assert (BB_END (bb) == last);
}
set_block_for_insn (insn, bb);
if (BB_END (bb) == last)
BB_END (bb) = insn;
}
reemit_notes (insn);
SCHED_GROUP_P (insn) = 0;
}
struct choice_entry
{
int index;
int rest;
int n;
state_t state;
};
static struct choice_entry *choice_stack;
static int cycle_issued_insns;
static int max_lookahead_tries;
static int cached_first_cycle_multipass_dfa_lookahead = 0;
static int cached_issue_rate = 0;
static int
max_issue (struct ready_list *ready, int *index, int max_points)
{
int n, i, all, n_ready, best, delay, tries_num, points = -1;
struct choice_entry *top;
rtx insn;
best = 0;
memcpy (choice_stack->state, curr_state, dfa_state_size);
top = choice_stack;
top->rest = cached_first_cycle_multipass_dfa_lookahead;
top->n = 0;
n_ready = ready->n_ready;
for (all = i = 0; i < n_ready; i++)
if (!ready_try [i])
all++;
i = 0;
tries_num = 0;
for (;;)
{
if (top->rest == 0 || i >= n_ready)
{
if (top == choice_stack)
break;
if (best < top - choice_stack && ready_try [0])
{
best = top - choice_stack;
*index = choice_stack [1].index;
points = top->n;
if (top->n == max_points || best == all)
break;
}
i = top->index;
ready_try [i] = 0;
top--;
memcpy (curr_state, top->state, dfa_state_size);
}
else if (!ready_try [i])
{
tries_num++;
if (tries_num > max_lookahead_tries)
break;
insn = ready_element (ready, i);
delay = state_transition (curr_state, insn);
if (delay < 0)
{
if (state_dead_lock_p (curr_state))
top->rest = 0;
else
top->rest--;
n = top->n;
if (memcmp (top->state, curr_state, dfa_state_size) != 0)
n += ISSUE_POINTS (insn);
top++;
top->rest = cached_first_cycle_multipass_dfa_lookahead;
top->index = i;
top->n = n;
memcpy (top->state, curr_state, dfa_state_size);
ready_try [i] = 1;
i = -1;
}
}
i++;
}
while (top != choice_stack)
{
ready_try [top->index] = 0;
top--;
}
memcpy (curr_state, choice_stack->state, dfa_state_size);
if (sched_verbose >= 4)
fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
(*current_sched_info->print_insn) (ready_element (ready, *index),
0),
points, max_points);
return best;
}
static rtx
choose_ready (struct ready_list *ready)
{
int lookahead = 0;
if (targetm.sched.first_cycle_multipass_dfa_lookahead)
lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
return ready_remove_first (ready);
else
{
int index = 0, i, n;
rtx insn;
int more_issue, max_points, try_data = 1, try_control = 1;
if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
{
cached_first_cycle_multipass_dfa_lookahead = lookahead;
max_lookahead_tries = 100;
for (i = 0; i < issue_rate; i++)
max_lookahead_tries *= lookahead;
}
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
return ready_remove_first (ready);
if (spec_info
&& spec_info->flags & (PREFER_NON_DATA_SPEC
| PREFER_NON_CONTROL_SPEC))
{
for (i = 0, n = ready->n_ready; i < n; i++)
{
rtx x;
ds_t s;
x = ready_element (ready, i);
s = TODO_SPEC (x);
if (spec_info->flags & PREFER_NON_DATA_SPEC
&& !(s & DATA_SPEC))
{
try_data = 0;
if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
|| !try_control)
break;
}
if (spec_info->flags & PREFER_NON_CONTROL_SPEC
&& !(s & CONTROL_SPEC))
{
try_control = 0;
if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
break;
}
}
}
if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
|| (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
|| (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
&& !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
(insn)))
{
change_queue_index (insn, 1);
return 0;
}
max_points = ISSUE_POINTS (insn);
more_issue = issue_rate - cycle_issued_insns - 1;
for (i = 1; i < ready->n_ready; i++)
{
insn = ready_element (ready, i);
ready_try [i]
= (INSN_CODE (insn) < 0
|| (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
|| (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
|| (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
&& !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
(insn)));
if (!ready_try [i] && more_issue-- > 0)
max_points += ISSUE_POINTS (insn);
}
if (max_issue (ready, &index, max_points) == 0)
return ready_remove_first (ready);
else
return ready_remove (ready, index);
}
}
void
schedule_block (basic_block *target_bb, int rgn_n_insns1)
{
struct ready_list ready;
int i, first_cycle_insn_p;
int can_issue_more;
state_t temp_state = NULL;
int sort_p, advance, start_clock_var;
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
rtx head = NEXT_INSN (prev_head);
rtx tail = PREV_INSN (next_tail);
gcc_assert (head != tail || INSN_P (head));
added_recovery_block_p = false;
if (sched_verbose)
dump_new_block_header (0, *target_bb, head, tail);
state_reset (curr_state);
readyp = &ready;
ready.vec = NULL;
ready_try = NULL;
choice_stack = NULL;
rgn_n_insns = -1;
extend_ready (rgn_n_insns1 + 1);
ready.first = ready.veclen - 1;
ready.n_ready = 0;
temp_state = alloca (dfa_state_size);
if (targetm.sched.md_init)
targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
last_scheduled_insn = prev_head;
gcc_assert (NOTE_P (last_scheduled_insn)
&& BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
q_ptr = 0;
q_size = 0;
insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
clock_var = -1;
(*current_sched_info->init_ready_list) ();
if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
{
ready_sort (&ready);
for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
if (!SCHED_GROUP_P (ready_element (&ready, i)))
break;
if (sched_verbose >= 2)
{
fprintf (sched_dump,
";;\t\tReady list on entry: %d insns\n", ready.n_ready);
fprintf (sched_dump,
";;\t\t before reload => truncated to %d insns\n", i);
}
while (i < ready.n_ready)
queue_insn (ready_remove (&ready, i), 1);
}
restore_bb_notes (*target_bb);
last_clock_var = -1;
advance = 0;
sort_p = TRUE;
while ((*current_sched_info->schedule_more_p) ())
{
do
{
start_clock_var = clock_var;
clock_var++;
advance_one_cycle ();
queue_to_ready (&ready);
gcc_assert (ready.n_ready);
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
debug_ready_list (&ready);
}
advance -= clock_var - start_clock_var;
}
while (advance > 0);
if (sort_p)
{
ready_sort (&ready);
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
debug_ready_list (&ready);
}
}
if (sort_p && targetm.sched.reorder
&& (ready.n_ready == 0
|| !SCHED_GROUP_P (ready_element (&ready, 0))))
can_issue_more =
targetm.sched.reorder (sched_dump, sched_verbose,
ready_lastpos (&ready),
&ready.n_ready, clock_var);
else
can_issue_more = issue_rate;
first_cycle_insn_p = 1;
cycle_issued_insns = 0;
for (;;)
{
rtx insn;
int cost;
bool asm_p = false;
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\tReady list (t = %3d): ",
clock_var);
debug_ready_list (&ready);
}
if (ready.n_ready == 0
&& can_issue_more
&& reload_completed)
{
if (sched_verbose >= 6)
fprintf(sched_dump,";;\t\tSecond chance\n");
memcpy (temp_state, curr_state, dfa_state_size);
if (early_queue_to_ready (temp_state, &ready))
ready_sort (&ready);
}
if (ready.n_ready == 0 || !can_issue_more
|| state_dead_lock_p (curr_state)
|| !(*current_sched_info->schedule_more_p) ())
break;
if (sort_p)
{
insn = choose_ready (&ready);
if (!insn)
continue;
}
else
insn = ready_remove_first (&ready);
if (targetm.sched.dfa_new_cycle
&& targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
insn, last_clock_var,
clock_var, &sort_p))
{
ready_add (&ready, insn, true);
break;
}
sort_p = TRUE;
memcpy (temp_state, curr_state, dfa_state_size);
if (recog_memoized (insn) < 0)
{
asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0);
if (!first_cycle_insn_p && asm_p)
cost = 1;
else
cost = 0;
}
else
{
cost = state_transition (temp_state, insn);
if (cost < 0)
cost = 0;
else if (cost == 0)
cost = 1;
}
if (cost >= 1)
{
queue_insn (insn, cost);
if (SCHED_GROUP_P (insn))
{
advance = cost;
break;
}
continue;
}
if (current_sched_info->can_schedule_ready_p
&& ! (*current_sched_info->can_schedule_ready_p) (insn))
{
TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
continue;
}
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
if (control_flow_insn_p (last_scheduled_insn)
|| current_sched_info->advance_target_bb (*target_bb, insn))
{
*target_bb = current_sched_info->advance_target_bb
(*target_bb, 0);
if (sched_verbose)
{
rtx x;
x = next_real_insn (last_scheduled_insn);
gcc_assert (x);
dump_new_block_header (1, *target_bb, x, tail);
}
last_scheduled_insn = bb_note (*target_bb);
}
(*current_sched_info->begin_schedule_ready) (insn,
last_scheduled_insn);
move_insn (insn);
last_scheduled_insn = insn;
if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
{
cycle_issued_insns++;
memcpy (curr_state, temp_state, dfa_state_size);
}
if (targetm.sched.variable_issue)
can_issue_more =
targetm.sched.variable_issue (sched_dump, sched_verbose,
insn, can_issue_more);
else if (GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
can_issue_more--;
advance = schedule_insn (insn);
if (advance == 0 && asm_p)
advance = 1;
if (advance != 0)
break;
first_cycle_insn_p = 0;
if (ready.n_ready > 0)
ready_sort (&ready);
if (targetm.sched.reorder2
&& (ready.n_ready == 0
|| !SCHED_GROUP_P (ready_element (&ready, 0))))
{
can_issue_more =
targetm.sched.reorder2 (sched_dump, sched_verbose,
ready.n_ready
? ready_lastpos (&ready) : NULL,
&ready.n_ready, clock_var);
}
}
}
if (sched_verbose)
{
fprintf (sched_dump, ";;\tReady list (final): ");
debug_ready_list (&ready);
}
if (current_sched_info->queue_must_finish_empty)
gcc_assert (!q_size && !ready.n_ready);
else
{
for (i = ready.n_ready - 1; i >= 0; i--)
{
rtx x;
x = ready_element (&ready, i);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
}
if (q_size)
for (i = 0; i <= max_insn_queue_index; i++)
{
rtx link;
for (link = insn_queue[i]; link; link = XEXP (link, 1))
{
rtx x;
x = XEXP (link, 0);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
}
free_INSN_LIST_list (&insn_queue[i]);
}
}
if (!current_sched_info->queue_must_finish_empty
|| added_recovery_block_p)
{
fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
}
if (targetm.sched.md_finish)
targetm.sched.md_finish (sched_dump, sched_verbose);
head = NEXT_INSN (prev_head);
tail = last_scheduled_insn;
if (note_list != 0)
{
basic_block head_bb = BLOCK_FOR_INSN (head);
rtx note_head = note_list;
while (PREV_INSN (note_head))
{
set_block_for_insn (note_head, head_bb);
note_head = PREV_INSN (note_head);
}
set_block_for_insn (note_head, head_bb);
PREV_INSN (note_head) = PREV_INSN (head);
NEXT_INSN (PREV_INSN (head)) = note_head;
PREV_INSN (head) = note_list;
NEXT_INSN (note_list) = head;
head = note_head;
}
if (sched_verbose)
{
fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
clock_var, INSN_UID (head));
fprintf (sched_dump, ";; new tail = %d\n\n",
INSN_UID (tail));
}
current_sched_info->head = head;
current_sched_info->tail = tail;
free (ready.vec);
free (ready_try);
for (i = 0; i <= rgn_n_insns; i++)
free (choice_stack [i].state);
free (choice_stack);
}
int
set_priorities (rtx head, rtx tail)
{
rtx insn;
int n_insn;
int sched_max_insns_priority =
current_sched_info->sched_max_insns_priority;
rtx prev_head;
if (head == tail && (! INSN_P (head)))
return 0;
n_insn = 0;
prev_head = PREV_INSN (head);
for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
{
if (!INSN_P (insn))
continue;
n_insn++;
(void) priority (insn);
if (INSN_PRIORITY_KNOWN (insn))
sched_max_insns_priority =
MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
}
current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
return n_insn;
}
static int luid;
void
sched_init (void)
{
basic_block b;
rtx insn;
int i;
memcpy (¤t_sched_info_var, current_sched_info,
sizeof (current_sched_info_var));
current_sched_info = ¤t_sched_info_var;
#ifdef HAVE_cc0
flag_schedule_speculative_load = 0;
#endif
sched_verbose = sched_verbose_param;
if (sched_verbose_param == 0 && dump_file)
sched_verbose = 1;
sched_dump = ((sched_verbose_param >= 10 || !dump_file)
? stderr : dump_file);
if (targetm.sched.set_sched_flags)
{
spec_info = &spec_info_var;
targetm.sched.set_sched_flags (spec_info);
if (current_sched_info->flags & DO_SPECULATION)
spec_info->weakness_cutoff =
(PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
else
spec_info = 0;
#ifdef ENABLE_CHECKING
check_sched_flags ();
#endif
}
else
spec_info = 0;
if (targetm.sched.issue_rate)
issue_rate = targetm.sched.issue_rate ();
else
issue_rate = 1;
if (cached_issue_rate != issue_rate)
{
cached_issue_rate = issue_rate;
cached_first_cycle_multipass_dfa_lookahead = 0;
}
old_max_uid = 0;
h_i_d = 0;
extend_h_i_d ();
for (i = 0; i < old_max_uid; i++)
{
h_i_d[i].cost = -1;
h_i_d[i].todo_spec = HARD_DEP;
h_i_d[i].queue_index = QUEUE_NOWHERE;
h_i_d[i].tick = INVALID_TICK;
h_i_d[i].inter_tick = INVALID_TICK;
}
if (targetm.sched.init_dfa_pre_cycle_insn)
targetm.sched.init_dfa_pre_cycle_insn ();
if (targetm.sched.init_dfa_post_cycle_insn)
targetm.sched.init_dfa_post_cycle_insn ();
dfa_start ();
dfa_state_size = state_size ();
curr_state = xmalloc (dfa_state_size);
h_i_d[0].luid = 0;
luid = 1;
FOR_EACH_BB (b)
for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
{
INSN_LUID (insn) = luid;
if (!NOTE_P (insn))
++luid;
if (insn == BB_END (b))
break;
}
init_dependency_caches (luid);
init_alias_analysis ();
line_note_head = 0;
old_last_basic_block = 0;
glat_start = 0;
glat_end = 0;
extend_bb (0);
if (current_sched_info->flags & USE_GLAT)
init_glat ();
FOR_EACH_BB_REVERSE (b)
find_insn_reg_weight (b);
if (targetm.sched.md_init_global)
targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
before_recovery = 0;
#ifdef ENABLE_CHECKING
check_cfg (0, 0);
#endif
}
void
sched_finish (void)
{
free (h_i_d);
free (curr_state);
dfa_finish ();
free_dependency_caches ();
end_alias_analysis ();
free (line_note_head);
free_glat ();
if (targetm.sched.md_finish_global)
targetm.sched.md_finish_global (sched_dump, sched_verbose);
if (spec_info && spec_info->dump)
{
char c = reload_completed ? 'a' : 'b';
fprintf (spec_info->dump,
";; %s:\n", current_function_name ());
fprintf (spec_info->dump,
";; Procedure %cr-begin-data-spec motions == %d\n",
c, nr_begin_data);
fprintf (spec_info->dump,
";; Procedure %cr-be-in-data-spec motions == %d\n",
c, nr_be_in_data);
fprintf (spec_info->dump,
";; Procedure %cr-begin-control-spec motions == %d\n",
c, nr_begin_control);
fprintf (spec_info->dump,
";; Procedure %cr-be-in-control-spec motions == %d\n",
c, nr_be_in_control);
}
#ifdef ENABLE_CHECKING
if (!reload_completed)
check_cfg (0, 0);
#endif
current_sched_info = NULL;
}
static void
fix_inter_tick (rtx head, rtx tail)
{
bitmap_head processed;
int next_clock = clock_var + 1;
bitmap_initialize (&processed, 0);
for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
{
if (INSN_P (head))
{
int tick;
rtx link;
tick = INSN_TICK (head);
gcc_assert (tick >= MIN_TICK);
if (!bitmap_bit_p (&processed, INSN_LUID (head)))
{
bitmap_set_bit (&processed, INSN_LUID (head));
tick -= next_clock;
if (tick < MIN_TICK)
tick = MIN_TICK;
INSN_TICK (head) = tick;
}
for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
{
rtx next;
next = XEXP (link, 0);
tick = INSN_TICK (next);
if (tick != INVALID_TICK
&& !bitmap_bit_p (&processed, INSN_LUID (next)))
{
bitmap_set_bit (&processed, INSN_LUID (next));
tick -= next_clock;
if (tick < MIN_TICK)
tick = MIN_TICK;
if (tick > INTER_TICK (next))
INTER_TICK (next) = tick;
else
tick = INTER_TICK (next);
INSN_TICK (next) = tick;
}
}
}
}
bitmap_clear (&processed);
}
int
try_ready (rtx next)
{
ds_t old_ts, *ts;
rtx link;
ts = &TODO_SPEC (next);
old_ts = *ts;
gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
&& ((old_ts & HARD_DEP)
|| (old_ts & SPECULATIVE)));
if (!(current_sched_info->flags & DO_SPECULATION))
{
if (!LOG_LINKS (next))
*ts &= ~HARD_DEP;
}
else
{
*ts &= ~SPECULATIVE & ~HARD_DEP;
link = LOG_LINKS (next);
if (link)
{
if (DEP_STATUS (link) & SPECULATIVE)
{
*ts = DEP_STATUS (link) & SPECULATIVE;
while ((link = XEXP (link, 1)))
*ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
if (dep_weak (*ts) < spec_info->weakness_cutoff)
*ts = (*ts & ~SPECULATIVE) | HARD_DEP;
}
else
*ts |= HARD_DEP;
}
}
if (*ts & HARD_DEP)
gcc_assert (*ts == old_ts
&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
else if (current_sched_info->new_ready)
*ts = current_sched_info->new_ready (next, *ts);
if ((*ts & SPECULATIVE)
&& *ts != old_ts)
{
int res;
rtx new_pat;
gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
res = speculate_insn (next, *ts, &new_pat);
switch (res)
{
case -1:
*ts = (*ts & ~SPECULATIVE) | HARD_DEP;
break;
case 0:
if (!ORIG_PAT (next))
ORIG_PAT (next) = PATTERN (next);
break;
case 1:
if (!ORIG_PAT (next))
ORIG_PAT (next) = PATTERN (next);
change_pattern (next, new_pat);
break;
default:
gcc_unreachable ();
}
}
gcc_assert (!ORIG_PAT (next)
|| !IS_SPECULATION_BRANCHY_CHECK_P (next));
if (*ts & HARD_DEP)
{
change_queue_index (next, QUEUE_NOWHERE);
return -1;
}
else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
{
change_pattern (next, ORIG_PAT (next));
ORIG_PAT (next) = 0;
}
if (sched_verbose >= 2)
{
int s = TODO_SPEC (next);
fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
(*current_sched_info->print_insn) (next, 0));
if (spec_info && spec_info->dump)
{
if (s & BEGIN_DATA)
fprintf (spec_info->dump, "; data-spec;");
if (s & BEGIN_CONTROL)
fprintf (spec_info->dump, "; control-spec;");
if (s & BE_IN_CONTROL)
fprintf (spec_info->dump, "; in-control-spec;");
}
fprintf (sched_dump, "\n");
}
adjust_priority (next);
return fix_tick_ready (next);
}
static int
fix_tick_ready (rtx next)
{
rtx link;
int tick, delay;
link = RESOLVED_DEPS (next);
if (link)
{
int full_p;
tick = INSN_TICK (next);
full_p = tick == INVALID_TICK;
do
{
rtx pro;
int tick1;
pro = XEXP (link, 0);
gcc_assert (INSN_TICK (pro) >= MIN_TICK);
tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
if (tick1 > tick)
tick = tick1;
}
while ((link = XEXP (link, 1)) && full_p);
}
else
tick = -1;
INSN_TICK (next) = tick;
delay = tick - clock_var;
if (delay <= 0)
delay = QUEUE_READY;
change_queue_index (next, delay);
return delay;
}
static void
change_queue_index (rtx next, int delay)
{
int i = QUEUE_INDEX (next);
gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
&& delay != 0);
gcc_assert (i != QUEUE_SCHEDULED);
if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
|| (delay < 0 && delay == i))
return;
if (i == QUEUE_READY)
ready_remove_insn (next);
else if (i >= 0)
queue_remove (next);
if (delay == QUEUE_READY)
ready_add (readyp, next, false);
else if (delay >= 1)
queue_insn (next, delay);
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\ttick updated: insn %s",
(*current_sched_info->print_insn) (next, 0));
if (delay == QUEUE_READY)
fprintf (sched_dump, " into ready\n");
else if (delay >= 1)
fprintf (sched_dump, " into queue with cost=%d\n", delay);
else
fprintf (sched_dump, " removed from ready or queue lists\n");
}
}
static void
resolve_dep (rtx next, rtx insn)
{
rtx dep;
INSN_DEP_COUNT (next)--;
dep = remove_list_elem (insn, &LOG_LINKS (next));
XEXP (dep, 1) = RESOLVED_DEPS (next);
RESOLVED_DEPS (next) = dep;
gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
&& (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
}
static void
extend_h_i_d (void)
{
int new_max_uid = get_max_uid() + 1;
h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
old_max_uid = new_max_uid;
if (targetm.sched.h_i_d_extended)
targetm.sched.h_i_d_extended ();
}
static void
extend_ready (int n_new_insns)
{
int i;
readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
rgn_n_insns + 1, sizeof (char));
rgn_n_insns += n_new_insns;
choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
rgn_n_insns + 1);
for (i = rgn_n_insns; n_new_insns--; i--)
choice_stack[i].state = xmalloc (dfa_state_size);
}
static void
extend_global (rtx insn)
{
gcc_assert (INSN_P (insn));
extend_h_i_d ();
init_h_i_d (insn);
extend_dependency_caches (1, 0);
}
static void
extend_all (rtx insn)
{
extend_global (insn);
extend_ready (1);
(*current_sched_info->add_remove_insn) (insn, 0);
}
static void
init_h_i_d (rtx insn)
{
INSN_LUID (insn) = luid++;
INSN_COST (insn) = -1;
TODO_SPEC (insn) = HARD_DEP;
QUEUE_INDEX (insn) = QUEUE_NOWHERE;
INSN_TICK (insn) = INVALID_TICK;
INTER_TICK (insn) = INVALID_TICK;
find_insn_reg_weight1 (insn);
}
static void
generate_recovery_code (rtx insn)
{
if (TODO_SPEC (insn) & BEGIN_SPEC)
begin_speculative_block (insn);
if (TODO_SPEC (insn) & BE_IN_SPEC)
add_to_speculative_block (insn);
}
static void
process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
{
for (; link; link = XEXP (link, 1))
{
ds_t ds;
rtx consumer;
consumer = XEXP (link, 0);
ds = DEP_STATUS (link);
if (
fs
&& (ds & DEP_TYPES) == DEP_TRUE)
{
gcc_assert (!(ds & BE_IN_SPEC));
if (
ds & BEGIN_SPEC)
{
if (
dep_weak (ds) <= dep_weak (fs))
ds = (ds & ~BEGIN_SPEC) | fs;
}
else
ds |= fs;
}
add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
}
}
static void
begin_speculative_block (rtx insn)
{
if (TODO_SPEC (insn) & BEGIN_DATA)
nr_begin_data++;
if (TODO_SPEC (insn) & BEGIN_CONTROL)
nr_begin_control++;
create_check_block_twin (insn, false);
TODO_SPEC (insn) &= ~BEGIN_SPEC;
}
static void
add_to_speculative_block (rtx insn)
{
ds_t ts;
rtx link, twins = NULL;
ts = TODO_SPEC (insn);
gcc_assert (!(ts & ~BE_IN_SPEC));
if (ts & BE_IN_DATA)
nr_be_in_data++;
if (ts & BE_IN_CONTROL)
nr_be_in_control++;
TODO_SPEC (insn) &= ~BE_IN_SPEC;
gcc_assert (!TODO_SPEC (insn));
DONE_SPEC (insn) |= ts;
for (link = LOG_LINKS (insn); link;)
{
rtx check;
check = XEXP (link, 0);
if (IS_SPECULATION_SIMPLE_CHECK_P (check))
{
create_check_block_twin (check, true);
link = LOG_LINKS (insn);
}
else
link = XEXP (link, 1);
}
clear_priorities (insn);
do
{
rtx link, check, twin;
basic_block rec;
link = LOG_LINKS (insn);
gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
&& (DEP_STATUS (link) & BE_IN_SPEC)
&& (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
check = XEXP (link, 0);
gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
&& QUEUE_INDEX (check) == QUEUE_NOWHERE);
rec = BLOCK_FOR_INSN (check);
twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
extend_global (twin);
RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
if (sched_verbose && spec_info->dump)
fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
INSN_UID (twin), rec->index);
twins = alloc_INSN_LIST (twin, twins);
do
{
add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
do
{
link = XEXP (link, 1);
if (link)
{
check = XEXP (link, 0);
if (BLOCK_FOR_INSN (check) == rec)
break;
}
else
break;
}
while (1);
}
while (link);
process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
for (link = LOG_LINKS (insn); link;)
{
check = XEXP (link, 0);
if (BLOCK_FOR_INSN (check) == rec)
{
delete_back_forw_dep (insn, check);
link = LOG_LINKS (insn);
}
else
link = XEXP (link, 1);
}
}
while (LOG_LINKS (insn));
while (twins)
{
rtx twin;
twin = XEXP (twins, 0);
calc_priorities (twin);
add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
twin = XEXP (twins, 1);
free_INSN_LIST_node (twins);
twins = twin;
}
}
void *
xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
{
gcc_assert (new_nmemb >= old_nmemb);
p = XRESIZEVAR (void, p, new_nmemb * size);
memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
return p;
}
static dw_t
dep_weak (ds_t ds)
{
ds_t res = 1, dt;
int n = 0;
dt = FIRST_SPEC_TYPE;
do
{
if (ds & dt)
{
res *= (ds_t) get_dep_weak (ds, dt);
n++;
}
if (dt == LAST_SPEC_TYPE)
break;
dt <<= SPEC_TYPE_SHIFT;
}
while (1);
gcc_assert (n);
while (--n)
res /= MAX_DEP_WEAK;
if (res < MIN_DEP_WEAK)
res = MIN_DEP_WEAK;
gcc_assert (res <= MAX_DEP_WEAK);
return (dw_t) res;
}
static edge
find_fallthru_edge (basic_block pred)
{
edge e;
edge_iterator ei;
basic_block succ;
succ = pred->next_bb;
gcc_assert (succ->prev_bb == pred);
if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
{
FOR_EACH_EDGE (e, ei, pred->succs)
if (e->flags & EDGE_FALLTHRU)
{
gcc_assert (e->dest == succ);
return e;
}
}
else
{
FOR_EACH_EDGE (e, ei, succ->preds)
if (e->flags & EDGE_FALLTHRU)
{
gcc_assert (e->src == pred);
return e;
}
}
return NULL;
}
static void
init_before_recovery (void)
{
basic_block last;
edge e;
last = EXIT_BLOCK_PTR->prev_bb;
e = find_fallthru_edge (last);
if (e)
{
basic_block single, empty;
rtx x, label;
single = create_empty_bb (last);
empty = create_empty_bb (single);
single->count = last->count;
empty->count = last->count;
single->frequency = last->frequency;
empty->frequency = last->frequency;
BB_COPY_PARTITION (single, last);
BB_COPY_PARTITION (empty, last);
redirect_edge_succ (e, single);
make_single_succ_edge (single, empty, 0);
make_single_succ_edge (empty, EXIT_BLOCK_PTR,
EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
label = block_label (empty);
x = emit_jump_insn_after (gen_jump (label), BB_END (single));
JUMP_LABEL (x) = label;
LABEL_NUSES (label)++;
extend_global (x);
emit_barrier_after (x);
add_block (empty, 0);
add_block (single, 0);
before_recovery = single;
if (sched_verbose >= 2 && spec_info->dump)
fprintf (spec_info->dump,
";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
last->index, single->index, empty->index);
}
else
before_recovery = last;
}
static basic_block
create_recovery_block (void)
{
rtx label;
rtx barrier;
basic_block rec;
added_recovery_block_p = true;
if (!before_recovery)
init_before_recovery ();
barrier = get_last_bb_insn (before_recovery);
gcc_assert (BARRIER_P (barrier));
label = emit_label_after (gen_label_rtx (), barrier);
rec = create_basic_block (label, label, before_recovery);
emit_barrier_after (BB_END (rec));
if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
BB_SET_PARTITION (rec, BB_COLD_PARTITION);
if (sched_verbose && spec_info->dump)
fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
rec->index);
before_recovery = rec;
return rec;
}
static void
create_check_block_twin (rtx insn, bool mutate_p)
{
basic_block rec;
rtx label, check, twin, link;
ds_t fs;
gcc_assert (ORIG_PAT (insn)
&& (!mutate_p
|| (IS_SPECULATION_SIMPLE_CHECK_P (insn)
&& !(TODO_SPEC (insn) & SPECULATIVE))));
if (mutate_p || targetm.sched.needs_block_p (insn))
{
rec = create_recovery_block ();
label = BB_HEAD (rec);
}
else
{
rec = EXIT_BLOCK_PTR;
label = 0;
}
check = targetm.sched.gen_check (insn, label, mutate_p);
if (rec != EXIT_BLOCK_PTR)
{
check = emit_jump_insn_before (check, insn);
JUMP_LABEL (check) = label;
LABEL_NUSES (label)++;
}
else
check = emit_insn_before (check, insn);
extend_all (check);
RECOVERY_BLOCK (check) = rec;
if (sched_verbose && spec_info->dump)
fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
(*current_sched_info->print_insn) (check, 0));
gcc_assert (ORIG_PAT (insn));
if (rec != EXIT_BLOCK_PTR)
{
rtx link;
for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
if (DEP_STATUS (link) & DEP_OUTPUT)
{
RESOLVED_DEPS (check) =
alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
}
twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
extend_global (twin);
if (sched_verbose && spec_info->dump)
fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
INSN_UID (twin), rec->index);
}
else
{
ORIG_PAT (check) = ORIG_PAT (insn);
HAS_INTERNAL_DEP (check) = 1;
twin = check;
}
RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
if (rec != EXIT_BLOCK_PTR)
{
basic_block first_bb, second_bb;
rtx jump;
edge e;
int edge_flags;
first_bb = BLOCK_FOR_INSN (check);
e = split_block (first_bb, check);
gcc_assert (e->src == first_bb);
second_bb = e->dest;
if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
edge_flags = EDGE_CROSSING;
else
edge_flags = 0;
e = make_edge (first_bb, rec, edge_flags);
add_block (second_bb, first_bb);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
label = block_label (second_bb);
jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
JUMP_LABEL (jump) = label;
LABEL_NUSES (label)++;
extend_global (jump);
if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
{
if (flag_reorder_blocks_and_partition
&& targetm.have_named_sections
)
{
REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
NULL_RTX,
REG_NOTES (jump));
}
edge_flags = EDGE_CROSSING;
}
else
edge_flags = 0;
make_single_succ_edge (rec, second_bb, edge_flags);
add_block (rec, EXIT_BLOCK_PTR);
}
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
{
ds_t ds;
ds = DEP_STATUS (link);
if (ds & BEGIN_SPEC)
{
gcc_assert (!mutate_p);
ds &= ~BEGIN_SPEC;
}
if (rec != EXIT_BLOCK_PTR)
{
add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
}
else
add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
}
for (link = LOG_LINKS (insn); link;)
if ((DEP_STATUS (link) & BEGIN_SPEC)
|| mutate_p)
{
delete_back_forw_dep (insn, XEXP (link, 0));
link = LOG_LINKS (insn);
}
else
link = XEXP (link, 1);
fs = 0;
gcc_assert (!DONE_SPEC (insn));
if (!mutate_p)
{
ds_t ts = TODO_SPEC (insn);
DONE_SPEC (insn) = ts & BEGIN_SPEC;
CHECK_SPEC (check) = ts & BEGIN_SPEC;
if (ts & BEGIN_DATA)
fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
if (ts & BEGIN_CONTROL)
fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
}
else
CHECK_SPEC (check) = CHECK_SPEC (insn);
process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
if (rec != EXIT_BLOCK_PTR)
{
if (!mutate_p)
{
add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
}
else
{
if (spec_info->dump)
fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
(*current_sched_info->print_insn) (insn, 0));
for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
delete_back_forw_dep (XEXP (link, 0), insn);
if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
try_ready (check);
sched_remove_insn (insn);
}
add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
}
else
add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
if (!mutate_p)
{
clear_priorities (twin);
calc_priorities (twin);
}
}
static void
fix_recovery_deps (basic_block rec)
{
rtx note, insn, link, jump, ready_list = 0;
bitmap_head in_ready;
bitmap_initialize (&in_ready, 0);
note = NEXT_INSN (BB_HEAD (rec));
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
insn = BB_END (rec);
gcc_assert (JUMP_P (insn));
insn = PREV_INSN (insn);
do
{
for (link = INSN_DEPEND (insn); link;)
{
rtx consumer;
consumer = XEXP (link, 0);
if (BLOCK_FOR_INSN (consumer) != rec)
{
delete_back_forw_dep (consumer, insn);
if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
{
ready_list = alloc_INSN_LIST (consumer, ready_list);
bitmap_set_bit (&in_ready, INSN_LUID (consumer));
}
link = INSN_DEPEND (insn);
}
else
{
gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
link = XEXP (link, 1);
}
}
insn = PREV_INSN (insn);
}
while (insn != note);
bitmap_clear (&in_ready);
for (link = ready_list; link; link = XEXP (link, 1))
try_ready (XEXP (link, 0));
free_INSN_LIST_list (&ready_list);
insn = BB_HEAD (rec);
jump = BB_END (rec);
gcc_assert (LABEL_P (insn));
insn = NEXT_INSN (insn);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
add_jump_dependencies (insn, jump);
}
static void
associate_line_notes_with_blocks (basic_block b)
{
rtx line;
for (line = BB_HEAD (b); line; line = PREV_INSN (line))
if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
{
line_note_head[b->index] = line;
break;
}
for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
{
if (INSN_P (line))
break;
if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
line_note_head[b->index] = line;
}
}
static void
change_pattern (rtx insn, rtx new_pat)
{
int t;
t = validate_change (insn, &PATTERN (insn), new_pat, 0);
gcc_assert (t);
INSN_COST (insn) = -1;
INSN_TICK (insn) = INVALID_TICK;
dfa_clear_single_insn_cache (insn);
}
static int
speculate_insn (rtx insn, ds_t request, rtx *new_pat)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION
&& (request & SPECULATIVE));
if (!NONJUMP_INSN_P (insn)
|| HAS_INTERNAL_DEP (insn)
|| SCHED_GROUP_P (insn)
|| side_effects_p (PATTERN (insn))
|| (request & spec_info->mask) != request)
return -1;
gcc_assert (!IS_SPECULATION_CHECK_P (insn));
if (request & BE_IN_SPEC)
{
if (may_trap_p (PATTERN (insn)))
return -1;
if (!(request & BEGIN_SPEC))
return 0;
}
return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
}
static void
dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
{
if (!i)
fprintf (sched_dump,
";; ======================================================\n");
else
fprintf (sched_dump,
";; =====================ADVANCING TO=====================\n");
fprintf (sched_dump,
";; -- basic block %d from %d to %d -- %s reload\n",
bb->index, INSN_UID (head), INSN_UID (tail),
(reload_completed ? "after" : "before"));
fprintf (sched_dump,
";; ======================================================\n");
fprintf (sched_dump, "\n");
}
void
unlink_bb_notes (basic_block first, basic_block last)
{
if (first == last)
return;
bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
if (last->next_bb != EXIT_BLOCK_PTR)
bb_header[last->next_bb->index] = 0;
first = first->next_bb;
do
{
rtx prev, label, note, next;
label = BB_HEAD (last);
if (LABEL_P (label))
note = NEXT_INSN (label);
else
note = label;
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
prev = PREV_INSN (label);
next = NEXT_INSN (note);
gcc_assert (prev && next);
NEXT_INSN (prev) = next;
PREV_INSN (next) = prev;
bb_header[last->index] = label;
if (last == first)
break;
last = last->prev_bb;
}
while (1);
}
static void
restore_bb_notes (basic_block first)
{
if (!bb_header)
return;
first = first->next_bb;
while (first != EXIT_BLOCK_PTR
&& bb_header[first->index])
{
rtx prev, label, note, next;
label = bb_header[first->index];
prev = PREV_INSN (label);
next = NEXT_INSN (prev);
if (LABEL_P (label))
note = NEXT_INSN (label);
else
note = label;
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
bb_header[first->index] = 0;
NEXT_INSN (prev) = label;
NEXT_INSN (note) = next;
PREV_INSN (next) = note;
first = first->next_bb;
}
free (bb_header);
bb_header = 0;
}
static void
extend_bb (basic_block bb)
{
rtx insn;
if (write_symbols != NO_DEBUG)
{
line_note_head = xrecalloc (line_note_head, last_basic_block,
old_last_basic_block,
sizeof (*line_note_head));
if (bb)
associate_line_notes_with_blocks (bb);
else
FOR_EACH_BB (bb)
associate_line_notes_with_blocks (bb);
}
old_last_basic_block = last_basic_block;
if (current_sched_info->flags & USE_GLAT)
{
glat_start = xrealloc (glat_start,
last_basic_block * sizeof (*glat_start));
glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
}
insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
if (NEXT_INSN (insn) == 0
|| (!NOTE_P (insn)
&& !LABEL_P (insn)
&& !BARRIER_P (NEXT_INSN (insn))))
{
emit_note_after (NOTE_INSN_DELETED, insn);
BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
}
}
void
add_block (basic_block bb, basic_block ebb)
{
gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
&& bb->il.rtl->global_live_at_start == 0
&& bb->il.rtl->global_live_at_end == 0);
extend_bb (bb);
glat_start[bb->index] = 0;
glat_end[bb->index] = 0;
if (current_sched_info->add_block)
current_sched_info->add_block (bb, ebb);
}
static void
fix_jump_move (rtx jump)
{
basic_block bb, jump_bb, jump_bb_next;
bb = BLOCK_FOR_INSN (PREV_INSN (jump));
jump_bb = BLOCK_FOR_INSN (jump);
jump_bb_next = jump_bb->next_bb;
gcc_assert (current_sched_info->flags & SCHED_EBB
|| IS_SPECULATION_BRANCHY_CHECK_P (jump));
if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
BB_END (jump_bb) = BB_END (jump_bb_next);
if (BB_END (bb) != PREV_INSN (jump))
BB_END (jump_bb_next) = BB_END (bb);
else
BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
BB_END (bb) = PREV_INSN (jump);
update_bb_for_insn (jump_bb_next);
}
static void
move_block_after_check (rtx jump)
{
basic_block bb, jump_bb, jump_bb_next;
VEC(edge,gc) *t;
bb = BLOCK_FOR_INSN (PREV_INSN (jump));
jump_bb = BLOCK_FOR_INSN (jump);
jump_bb_next = jump_bb->next_bb;
update_bb_for_insn (jump_bb);
gcc_assert (IS_SPECULATION_CHECK_P (jump)
|| IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
unlink_block (jump_bb_next);
link_block (jump_bb_next, bb);
t = bb->succs;
bb->succs = 0;
move_succs (&(jump_bb->succs), bb);
move_succs (&(jump_bb_next->succs), jump_bb);
move_succs (&t, jump_bb_next);
if (current_sched_info->fix_recovery_cfg)
current_sched_info->fix_recovery_cfg
(bb->index, jump_bb->index, jump_bb_next->index);
}
static void
move_succs (VEC(edge,gc) **succsp, basic_block to)
{
edge e;
edge_iterator ei;
gcc_assert (to->succs == 0);
to->succs = *succsp;
FOR_EACH_EDGE (e, ei, to->succs)
e->src = to;
*succsp = 0;
}
static void
init_glat (void)
{
basic_block bb;
FOR_ALL_BB (bb)
init_glat1 (bb);
}
static void
init_glat1 (basic_block bb)
{
gcc_assert (bb->il.rtl->global_live_at_start != 0
&& bb->il.rtl->global_live_at_end != 0);
glat_start[bb->index] = bb->il.rtl->global_live_at_start;
glat_end[bb->index] = bb->il.rtl->global_live_at_end;
if (current_sched_info->flags & DETACH_LIFE_INFO)
{
bb->il.rtl->global_live_at_start = 0;
bb->il.rtl->global_live_at_end = 0;
}
}
void
attach_life_info (void)
{
basic_block bb;
FOR_ALL_BB (bb)
attach_life_info1 (bb);
}
static void
attach_life_info1 (basic_block bb)
{
gcc_assert (bb->il.rtl->global_live_at_start == 0
&& bb->il.rtl->global_live_at_end == 0);
if (glat_start[bb->index])
{
gcc_assert (glat_end[bb->index]);
bb->il.rtl->global_live_at_start = glat_start[bb->index];
bb->il.rtl->global_live_at_end = glat_end[bb->index];
glat_start[bb->index] = 0;
glat_end[bb->index] = 0;
#ifdef ENABLE_CHECKING
if (bb->index < NUM_FIXED_BLOCKS
|| current_sched_info->region_head_or_leaf_p (bb, 0))
{
glat_start[bb->index] = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (glat_start[bb->index],
bb->il.rtl->global_live_at_start);
}
if (bb->index < NUM_FIXED_BLOCKS
|| current_sched_info->region_head_or_leaf_p (bb, 1))
{
glat_end[bb->index] = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
}
#endif
}
else
{
gcc_assert (!glat_end[bb->index]);
bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
}
}
static void
free_glat (void)
{
#ifdef ENABLE_CHECKING
if (current_sched_info->flags & DETACH_LIFE_INFO)
{
basic_block bb;
FOR_ALL_BB (bb)
{
if (glat_start[bb->index])
FREE_REG_SET (glat_start[bb->index]);
if (glat_end[bb->index])
FREE_REG_SET (glat_end[bb->index]);
}
}
#endif
free (glat_start);
free (glat_end);
}
static void
sched_remove_insn (rtx insn)
{
change_queue_index (insn, QUEUE_NOWHERE);
current_sched_info->add_remove_insn (insn, 1);
remove_insn (insn);
}
static void
clear_priorities (rtx insn)
{
rtx link;
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
{
rtx pro;
pro = XEXP (link, 0);
if (INSN_PRIORITY_KNOWN (pro))
{
INSN_PRIORITY_KNOWN (pro) = 0;
clear_priorities (pro);
}
}
}
static void
calc_priorities (rtx insn)
{
rtx link;
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
{
rtx pro;
pro = XEXP (link, 0);
if (!INSN_PRIORITY_KNOWN (pro))
{
priority (pro);
calc_priorities (pro);
}
}
}
static void
add_jump_dependencies (rtx insn, rtx jump)
{
do
{
insn = NEXT_INSN (insn);
if (insn == jump)
break;
if (!INSN_DEPEND (insn))
add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
}
while (1);
gcc_assert (LOG_LINKS (jump));
}
rtx
bb_note (basic_block bb)
{
rtx note;
note = BB_HEAD (bb);
if (LABEL_P (note))
note = NEXT_INSN (note);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
return note;
}
#ifdef ENABLE_CHECKING
extern void debug_spec_status (ds_t);
void
debug_spec_status (ds_t s)
{
FILE *f = stderr;
if (s & BEGIN_DATA)
fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
if (s & BE_IN_DATA)
fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
if (s & BEGIN_CONTROL)
fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
if (s & BE_IN_CONTROL)
fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
if (s & HARD_DEP)
fprintf (f, "HARD_DEP; ");
if (s & DEP_TRUE)
fprintf (f, "DEP_TRUE; ");
if (s & DEP_ANTI)
fprintf (f, "DEP_ANTI; ");
if (s & DEP_OUTPUT)
fprintf (f, "DEP_OUTPUT; ");
fprintf (f, "\n");
}
static int
has_edge_p (VEC(edge,gc) *el, int type)
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, el)
if (e->flags & type)
return 1;
return 0;
}
static void
check_cfg (rtx head, rtx tail)
{
rtx next_tail;
basic_block bb = 0;
int not_first = 0, not_last;
if (head == NULL)
head = get_insns ();
if (tail == NULL)
tail = get_last_insn ();
next_tail = NEXT_INSN (tail);
do
{
not_last = head != tail;
if (not_first)
gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
if (not_last)
gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
if (LABEL_P (head)
|| (NOTE_INSN_BASIC_BLOCK_P (head)
&& (!not_first
|| (not_first && !LABEL_P (PREV_INSN (head))))))
{
gcc_assert (bb == 0);
bb = BLOCK_FOR_INSN (head);
if (bb != 0)
gcc_assert (BB_HEAD (bb) == head);
else
gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
}
if (bb == 0)
{
gcc_assert (!inside_basic_block_p (head));
head = NEXT_INSN (head);
}
else
{
gcc_assert (inside_basic_block_p (head)
|| NOTE_P (head));
gcc_assert (BLOCK_FOR_INSN (head) == bb);
if (LABEL_P (head))
{
head = NEXT_INSN (head);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
}
else
{
if (control_flow_insn_p (head))
{
gcc_assert (BB_END (bb) == head);
if (any_uncondjump_p (head))
gcc_assert (EDGE_COUNT (bb->succs) == 1
&& BARRIER_P (NEXT_INSN (head)));
else if (any_condjump_p (head))
gcc_assert (
(EDGE_COUNT (bb->succs) > 1
&& !BARRIER_P (NEXT_INSN (head)))
|| (EDGE_COUNT (bb->succs) == 1
&& (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
== JUMP_LABEL (head))));
}
if (BB_END (bb) == head)
{
if (EDGE_COUNT (bb->succs) > 1)
gcc_assert (control_flow_insn_p (head)
|| has_edge_p (bb->succs, EDGE_COMPLEX));
bb = 0;
}
head = NEXT_INSN (head);
}
}
not_first = 1;
}
while (head != next_tail);
gcc_assert (bb == 0);
}
static void
check_sched_flags (void)
{
unsigned int f = current_sched_info->flags;
if (flag_sched_stalled_insns)
gcc_assert (!(f & DO_SPECULATION));
if (f & DO_SPECULATION)
gcc_assert (!flag_sched_stalled_insns
&& (f & DETACH_LIFE_INFO)
&& spec_info
&& spec_info->mask);
if (f & DETACH_LIFE_INFO)
gcc_assert (f & USE_GLAT);
}
void
check_reg_live (bool fatal_p)
{
basic_block bb;
FOR_ALL_BB (bb)
{
int i;
i = bb->index;
if (glat_start[i])
{
bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
glat_start[i]);
if (!b)
{
gcc_assert (!fatal_p);
fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
}
}
if (glat_end[i])
{
bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
glat_end[i]);
if (!b)
{
gcc_assert (!fatal_p);
fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
}
}
}
}
#endif
#endif