Bring in a subset of gcc fixes that were back ported to
the GCC 4.1 branch and are available under GPLv2. 2007-11-07 Eric Botcazou <ebotcazou@libertysurf.fr> PR rtl-optimization/33822 * rtl.h (REG_OFFSET): Fix comment. * var-tracking.c (INT_MEM_OFFSET): New macro. (var_mem_set): Use it. (var_mem_delete_and_set): Likewise. (var_mem_delete): Likewise. (vt_get_decl_and_offset): Likewise. (offset_valid_for_tracked_p): New predicate. (count_uses): Do not track locations with invalid offsets. (add_uses): Likewise. (add_stores): Likewise. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=129972 2007-11-16 Richard Guenther <rguenther@suse.de> PR middle-end/34030 * fold-const.c (fold_binary): Use correct types for folding 1 << X & Y to Y >> X & 1. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=130242 2008-01-14 Eric Botcazou <ebotcazou@adacore.com> PR rtl-optimization/31944 * cse.c (remove_pseudo_from_table): New function. (merge_equiv_classes): Use above function to remove pseudo-registers. (invalidate): Likewise http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=131524 2008-01-24 Kaveh R. Ghazi <ghazi@caip.rutgers.edu> Backport: 2007-11-07 Kenneth Zadeck <zadeck@naturalbridge.com> PR middle-end/33826 * ipa-pure-const (static_execute): Added code to keep recursive functions from being marked as pure or const. * ipa-utils (searchc): Fixed comment. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=131807 2008-02-01 Kaveh R. Ghazi <ghazi@caip.rutgers.edu> Backport: 2007-08-02 Nathan Froyd <froydnj@codesourcery.com> PR middle-end/25445 * varasm.c (default_binds_local_p_1): Consult flag_whole_program if we are compiling with -fPIC. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=132061 2008-02-04 Richard Guenther <rguenther@suse.de> PR middle-end/33631 * expr.c (count_type_elements): Give for unions instead of guessing. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=132101 2008-02-14 Alan Modra <amodra@bigpond.net.au> PR target/34393 * config/rs6000/rs6000.md (restore_stack_block): Force operands[1] to a reg. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=132309 2008-03-25 Richard Guenther <rguenther@suse.de> Backport from mainline: 2008-02-12 Richard Guenther <rguenther@suse.de> PR middle-end/35163 * fold-const.c (fold_widened_comparison): Use get_unwidened in value-preserving mode. Disallow final truncation. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=133509 2008-11-30 Eric Botcazou <ebotcazou@adacore.com> PR target/38287 * config/sparc/sparc.md (divsi3 expander): Remove constraints. (divsi3_sp32): Add new alternative with 'K' for operand #2. (cmp_sdiv_cc_set): Factor common string. (udivsi3_sp32): Add new alternative with 'K' for operand #2. Add TARGET_V9 case. (cmp_udiv_cc_set): Factor common string. http://gcc.gnu.org/viewcvs?root=gcc&view=rev&rev=142298 Reviewed by: mm Approved by: jhb (mentor) MFC after: 1 week
This commit is contained in:
parent
e3fd0bc1b2
commit
c6e2105845
@ -10075,6 +10075,7 @@
|
||||
""
|
||||
"
|
||||
{
|
||||
operands[1] = force_reg (Pmode, operands[1]);
|
||||
operands[2] = gen_reg_rtx (Pmode);
|
||||
operands[3] = gen_frame_mem (Pmode, operands[0]);
|
||||
operands[4] = gen_frame_mem (Pmode, operands[1]);
|
||||
|
@ -5071,14 +5071,11 @@
|
||||
[(set_attr "type" "multi")
|
||||
(set_attr "length" "2")])
|
||||
|
||||
;; The V8 architecture specifies that there must be 3 instructions between
|
||||
;; a Y register write and a use of it for correct results.
|
||||
|
||||
(define_expand "divsi3"
|
||||
[(parallel [(set (match_operand:SI 0 "register_operand" "=r,r")
|
||||
(div:SI (match_operand:SI 1 "register_operand" "r,r")
|
||||
(match_operand:SI 2 "input_operand" "rI,m")))
|
||||
(clobber (match_scratch:SI 3 "=&r,&r"))])]
|
||||
[(parallel [(set (match_operand:SI 0 "register_operand" "")
|
||||
(div:SI (match_operand:SI 1 "register_operand" "")
|
||||
(match_operand:SI 2 "input_operand" "")))
|
||||
(clobber (match_scratch:SI 3 ""))])]
|
||||
"TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
|
||||
{
|
||||
if (TARGET_ARCH64)
|
||||
@ -5091,24 +5088,40 @@
|
||||
}
|
||||
})
|
||||
|
||||
;; The V8 architecture specifies that there must be at least 3 instructions
|
||||
;; between a write to the Y register and a use of it for correct results.
|
||||
;; We try to fill one of them with a simple constant or a memory load.
|
||||
|
||||
(define_insn "divsi3_sp32"
|
||||
[(set (match_operand:SI 0 "register_operand" "=r,r")
|
||||
(div:SI (match_operand:SI 1 "register_operand" "r,r")
|
||||
(match_operand:SI 2 "input_operand" "rI,m")))
|
||||
(clobber (match_scratch:SI 3 "=&r,&r"))]
|
||||
"(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS)
|
||||
&& TARGET_ARCH32"
|
||||
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
|
||||
(div:SI (match_operand:SI 1 "register_operand" "r,r,r")
|
||||
(match_operand:SI 2 "input_operand" "rI,K,m")))
|
||||
(clobber (match_scratch:SI 3 "=&r,&r,&r"))]
|
||||
"(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && TARGET_ARCH32"
|
||||
{
|
||||
if (which_alternative == 0)
|
||||
if (TARGET_V9)
|
||||
return "sra\t%1, 31, %3\n\twr\t%3, 0, %%y\n\tsdiv\t%1, %2, %0";
|
||||
else
|
||||
return "sra\t%1, 31, %3\n\twr\t%3, 0, %%y\n\tnop\n\tnop\n\tnop\n\tsdiv\t%1, %2, %0";
|
||||
else
|
||||
if (TARGET_V9)
|
||||
return "sra\t%1, 31, %3\n\twr\t%3, 0, %%y\n\tld\t%2, %3\n\tsdiv\t%1, %3, %0";
|
||||
else
|
||||
return "sra\t%1, 31, %3\n\twr\t%3, 0, %%y\n\tld\t%2, %3\n\tnop\n\tnop\n\tsdiv\t%1, %3, %0";
|
||||
output_asm_insn ("sra\t%1, 31, %3", operands);
|
||||
output_asm_insn ("wr\t%3, 0, %%y", operands);
|
||||
|
||||
switch (which_alternative)
|
||||
{
|
||||
case 0:
|
||||
if (TARGET_V9)
|
||||
return "sdiv\t%1, %2, %0";
|
||||
else
|
||||
return "nop\n\tnop\n\tnop\n\tsdiv\t%1, %2, %0";
|
||||
case 1:
|
||||
if (TARGET_V9)
|
||||
return "sethi\t%%hi(%a2), %3\n\tsdiv\t%1, %3, %0";
|
||||
else
|
||||
return "sethi\t%%hi(%a2), %3\n\tnop\n\tnop\n\tsdiv\t%1, %3, %0";
|
||||
case 2:
|
||||
if (TARGET_V9)
|
||||
return "ld\t%2, %3\n\tsdiv\t%1, %3, %0";
|
||||
else
|
||||
return "ld\t%2, %3\n\tnop\n\tnop\n\tsdiv\t%1, %3, %0";
|
||||
default:
|
||||
gcc_unreachable ();
|
||||
}
|
||||
}
|
||||
[(set_attr "type" "multi")
|
||||
(set (attr "length")
|
||||
@ -5143,10 +5156,13 @@
|
||||
(clobber (match_scratch:SI 3 "=&r"))]
|
||||
"TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
|
||||
{
|
||||
output_asm_insn ("sra\t%1, 31, %3", operands);
|
||||
output_asm_insn ("wr\t%3, 0, %%y", operands);
|
||||
|
||||
if (TARGET_V9)
|
||||
return "sra\t%1, 31, %3\n\twr\t%3, 0, %%y\n\tsdivcc\t%1, %2, %0";
|
||||
return "sdivcc\t%1, %2, %0";
|
||||
else
|
||||
return "sra\t%1, 31, %3\n\twr\t%3, 0, %%y\n\tnop\n\tnop\n\tnop\n\tsdivcc\t%1, %2, %0";
|
||||
return "nop\n\tnop\n\tnop\n\tsdivcc\t%1, %2, %0";
|
||||
}
|
||||
[(set_attr "type" "multi")
|
||||
(set (attr "length")
|
||||
@ -5161,29 +5177,48 @@
|
||||
"TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
|
||||
"")
|
||||
|
||||
;; The V8 architecture specifies that there must be 3 instructions between
|
||||
;; a Y register write and a use of it for correct results.
|
||||
;; The V8 architecture specifies that there must be at least 3 instructions
|
||||
;; between a write to the Y register and a use of it for correct results.
|
||||
;; We try to fill one of them with a simple constant or a memory load.
|
||||
|
||||
(define_insn "udivsi3_sp32"
|
||||
[(set (match_operand:SI 0 "register_operand" "=r,&r,&r")
|
||||
(udiv:SI (match_operand:SI 1 "nonimmediate_operand" "r,r,m")
|
||||
(match_operand:SI 2 "input_operand" "rI,m,r")))]
|
||||
"(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS)
|
||||
&& TARGET_ARCH32"
|
||||
[(set (match_operand:SI 0 "register_operand" "=r,&r,&r,&r")
|
||||
(udiv:SI (match_operand:SI 1 "nonimmediate_operand" "r,r,r,m")
|
||||
(match_operand:SI 2 "input_operand" "rI,K,m,r")))]
|
||||
"(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && TARGET_ARCH32"
|
||||
{
|
||||
output_asm_insn ("wr\t%%g0, %%g0, %%y", operands);
|
||||
output_asm_insn ("wr\t%%g0, 0, %%y", operands);
|
||||
|
||||
switch (which_alternative)
|
||||
{
|
||||
default:
|
||||
return "nop\n\tnop\n\tnop\n\tudiv\t%1, %2, %0";
|
||||
case 0:
|
||||
if (TARGET_V9)
|
||||
return "udiv\t%1, %2, %0";
|
||||
else
|
||||
return "nop\n\tnop\n\tnop\n\tudiv\t%1, %2, %0";
|
||||
case 1:
|
||||
return "ld\t%2, %0\n\tnop\n\tnop\n\tudiv\t%1, %0, %0";
|
||||
if (TARGET_V9)
|
||||
return "sethi\t%%hi(%a2), %0\n\tudiv\t%1, %0, %0";
|
||||
else
|
||||
return "sethi\t%%hi(%a2), %0\n\tnop\n\tnop\n\tudiv\t%1, %0, %0";
|
||||
case 2:
|
||||
return "ld\t%1, %0\n\tnop\n\tnop\n\tudiv\t%0, %2, %0";
|
||||
if (TARGET_V9)
|
||||
return "ld\t%2, %0\n\tudiv\t%1, %0, %0";
|
||||
else
|
||||
return "ld\t%2, %0\n\tnop\n\tnop\n\tudiv\t%1, %0, %0";
|
||||
case 3:
|
||||
if (TARGET_V9)
|
||||
return "ld\t%1, %0\n\tudiv\t%0, %2, %0";
|
||||
else
|
||||
return "ld\t%1, %0\n\tnop\n\tnop\n\tudiv\t%0, %2, %0";
|
||||
default:
|
||||
gcc_unreachable ();
|
||||
}
|
||||
}
|
||||
[(set_attr "type" "multi")
|
||||
(set_attr "length" "5")])
|
||||
(set (attr "length")
|
||||
(if_then_else (eq_attr "isa" "v9")
|
||||
(const_int 3) (const_int 5)))])
|
||||
|
||||
(define_insn "udivsi3_sp64"
|
||||
[(set (match_operand:SI 0 "register_operand" "=r")
|
||||
@ -5209,13 +5244,14 @@
|
||||
(const_int 0)))
|
||||
(set (match_operand:SI 0 "register_operand" "=r")
|
||||
(udiv:SI (match_dup 1) (match_dup 2)))]
|
||||
"TARGET_V8
|
||||
|| TARGET_DEPRECATED_V8_INSNS"
|
||||
"TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
|
||||
{
|
||||
output_asm_insn ("wr\t%%g0, 0, %%y", operands);
|
||||
|
||||
if (TARGET_V9)
|
||||
return "wr\t%%g0, %%g0, %%y\n\tudivcc\t%1, %2, %0";
|
||||
return "udivcc\t%1, %2, %0";
|
||||
else
|
||||
return "wr\t%%g0, %%g0, %%y\n\tnop\n\tnop\n\tnop\n\tudivcc\t%1, %2, %0";
|
||||
return "nop\n\tnop\n\tnop\n\tudivcc\t%1, %2, %0";
|
||||
}
|
||||
[(set_attr "type" "multi")
|
||||
(set (attr "length")
|
||||
|
@ -583,7 +583,8 @@ static void delete_reg_equiv (unsigned int);
|
||||
static int mention_regs (rtx);
|
||||
static int insert_regs (rtx, struct table_elt *, int);
|
||||
static void remove_from_table (struct table_elt *, unsigned);
|
||||
static struct table_elt *lookup (rtx, unsigned, enum machine_mode);
|
||||
static void remove_pseudo_from_table (rtx, unsigned);
|
||||
static struct table_elt *lookup (rtx, unsigned, enum machine_mode);
|
||||
static struct table_elt *lookup_for_remove (rtx, unsigned, enum machine_mode);
|
||||
static rtx lookup_as_function (rtx, enum rtx_code);
|
||||
static struct table_elt *insert (rtx, struct table_elt *, unsigned,
|
||||
@ -1381,6 +1382,19 @@ remove_from_table (struct table_elt *elt, unsigned int hash)
|
||||
table_size--;
|
||||
}
|
||||
|
||||
/* Same as above, but X is a pseudo-register. */
|
||||
|
||||
static void
|
||||
remove_pseudo_from_table (rtx x, unsigned int hash)
|
||||
{
|
||||
struct table_elt *elt;
|
||||
|
||||
/* Because a pseudo-register can be referenced in more than one
|
||||
mode, we might have to remove more than one table entry. */
|
||||
while ((elt = lookup_for_remove (x, hash, VOIDmode)))
|
||||
remove_from_table (elt, hash);
|
||||
}
|
||||
|
||||
/* Look up X in the hash table and return its table element,
|
||||
or 0 if X is not in the table.
|
||||
|
||||
@ -1707,7 +1721,10 @@ merge_equiv_classes (struct table_elt *class1, struct table_elt *class2)
|
||||
delete_reg_equiv (REGNO (exp));
|
||||
}
|
||||
|
||||
remove_from_table (elt, hash);
|
||||
if (REG_P (exp) && REGNO (exp) >= FIRST_PSEUDO_REGISTER)
|
||||
remove_pseudo_from_table (exp, hash);
|
||||
else
|
||||
remove_from_table (elt, hash);
|
||||
|
||||
if (insert_regs (exp, class1, 0) || need_rehash)
|
||||
{
|
||||
@ -1803,14 +1820,7 @@ invalidate (rtx x, enum machine_mode full_mode)
|
||||
SUBREG_TICKED (regno) = -1;
|
||||
|
||||
if (regno >= FIRST_PSEUDO_REGISTER)
|
||||
{
|
||||
/* Because a register can be referenced in more than one mode,
|
||||
we might have to remove more than one table entry. */
|
||||
struct table_elt *elt;
|
||||
|
||||
while ((elt = lookup_for_remove (x, hash, GET_MODE (x))))
|
||||
remove_from_table (elt, hash);
|
||||
}
|
||||
remove_pseudo_from_table (x, hash);
|
||||
else
|
||||
{
|
||||
HOST_WIDE_INT in_table
|
||||
|
@ -4750,14 +4750,7 @@ count_type_elements (tree type, bool allow_flexarr)
|
||||
|
||||
case UNION_TYPE:
|
||||
case QUAL_UNION_TYPE:
|
||||
{
|
||||
/* Ho hum. How in the world do we guess here? Clearly it isn't
|
||||
right to count the fields. Guess based on the number of words. */
|
||||
HOST_WIDE_INT n = int_size_in_bytes (type);
|
||||
if (n < 0)
|
||||
return -1;
|
||||
return n / UNITS_PER_WORD;
|
||||
}
|
||||
return -1;
|
||||
|
||||
case COMPLEX_TYPE:
|
||||
return 2;
|
||||
|
@ -6657,12 +6657,14 @@ fold_widened_comparison (enum tree_code code, tree type, tree arg0, tree arg1)
|
||||
if (TYPE_PRECISION (TREE_TYPE (arg0)) <= TYPE_PRECISION (shorter_type))
|
||||
return NULL_TREE;
|
||||
|
||||
arg1_unw = get_unwidened (arg1, shorter_type);
|
||||
arg1_unw = get_unwidened (arg1, NULL_TREE);
|
||||
|
||||
/* If possible, express the comparison in the shorter mode. */
|
||||
if ((code == EQ_EXPR || code == NE_EXPR
|
||||
|| TYPE_UNSIGNED (TREE_TYPE (arg0)) == TYPE_UNSIGNED (shorter_type))
|
||||
&& (TREE_TYPE (arg1_unw) == shorter_type
|
||||
|| (TYPE_PRECISION (shorter_type)
|
||||
>= TYPE_PRECISION (TREE_TYPE (arg1_unw)))
|
||||
|| (TREE_CODE (arg1_unw) == INTEGER_CST
|
||||
&& (TREE_CODE (shorter_type) == INTEGER_TYPE
|
||||
|| TREE_CODE (shorter_type) == BOOLEAN_TYPE)
|
||||
@ -10647,24 +10649,24 @@ fold_binary (enum tree_code code, tree type, tree op0, tree op1)
|
||||
tree arg01 = TREE_OPERAND (arg0, 1);
|
||||
if (TREE_CODE (arg00) == LSHIFT_EXPR
|
||||
&& integer_onep (TREE_OPERAND (arg00, 0)))
|
||||
return
|
||||
fold_build2 (code, type,
|
||||
build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
|
||||
build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
|
||||
arg01, TREE_OPERAND (arg00, 1)),
|
||||
fold_convert (TREE_TYPE (arg0),
|
||||
integer_one_node)),
|
||||
arg1);
|
||||
else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
|
||||
&& integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
|
||||
return
|
||||
fold_build2 (code, type,
|
||||
build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
|
||||
build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
|
||||
arg00, TREE_OPERAND (arg01, 1)),
|
||||
fold_convert (TREE_TYPE (arg0),
|
||||
integer_one_node)),
|
||||
arg1);
|
||||
{
|
||||
tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
|
||||
arg01, TREE_OPERAND (arg00, 1));
|
||||
tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
|
||||
build_int_cst (TREE_TYPE (arg0), 1));
|
||||
return fold_build2 (code, type,
|
||||
fold_convert (TREE_TYPE (arg1), tem), arg1);
|
||||
}
|
||||
else if (TREE_CODE (arg01) == LSHIFT_EXPR
|
||||
&& integer_onep (TREE_OPERAND (arg01, 0)))
|
||||
{
|
||||
tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
|
||||
arg00, TREE_OPERAND (arg01, 1));
|
||||
tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
|
||||
build_int_cst (TREE_TYPE (arg0), 1));
|
||||
return fold_build2 (code, type,
|
||||
fold_convert (TREE_TYPE (arg1), tem), arg1);
|
||||
}
|
||||
}
|
||||
|
||||
/* If this is an NE or EQ comparison of zero against the result of a
|
||||
|
@ -639,6 +639,7 @@ static_execute (void)
|
||||
for (i = 0; i < order_pos; i++ )
|
||||
{
|
||||
enum pure_const_state_e pure_const_state = IPA_CONST;
|
||||
int count = 0;
|
||||
node = order[i];
|
||||
|
||||
/* Find the worst state for any node in the cycle. */
|
||||
@ -655,11 +656,40 @@ static_execute (void)
|
||||
if (!w_l->state_set_in_source)
|
||||
{
|
||||
struct cgraph_edge *e;
|
||||
count++;
|
||||
|
||||
/* FIXME!!! Because of pr33826, we cannot have either
|
||||
immediate or transitive recursive functions marked as
|
||||
pure or const because dce can delete a function that
|
||||
is in reality an infinite loop. A better solution
|
||||
than just outlawing them is to add another bit the
|
||||
functions to distinguish recursive from non recursive
|
||||
pure and const function. This would allow the
|
||||
recursive ones to be cse'd but not dce'd. In this
|
||||
same vein, we could allow functions with loops to
|
||||
also be cse'd but not dce'd.
|
||||
|
||||
Unfortunately we are late in stage 3, and the fix
|
||||
described above is is not appropriate. */
|
||||
if (count > 1)
|
||||
{
|
||||
pure_const_state = IPA_NEITHER;
|
||||
break;
|
||||
}
|
||||
|
||||
for (e = w->callees; e; e = e->next_callee)
|
||||
{
|
||||
struct cgraph_node *y = e->callee;
|
||||
/* Only look at the master nodes and skip external nodes. */
|
||||
y = cgraph_master_clone (y);
|
||||
|
||||
/* Check for immediate recursive functions. See the
|
||||
FIXME above. */
|
||||
if (w == y)
|
||||
{
|
||||
pure_const_state = IPA_NEITHER;
|
||||
break;
|
||||
}
|
||||
if (y)
|
||||
{
|
||||
funct_state y_l = get_function_state (y);
|
||||
|
@ -78,7 +78,7 @@ struct searchc_env {
|
||||
has been customized for cgraph_nodes. The env parameter is because
|
||||
it is recursive and there are no nested functions here. This
|
||||
function should only be called from itself or
|
||||
cgraph_reduced_inorder. ENV is a stack env and would be
|
||||
ipa_utils_reduced_inorder. ENV is a stack env and would be
|
||||
unnecessary if C had nested functions. V is the node to start
|
||||
searching from. */
|
||||
|
||||
|
@ -1189,8 +1189,8 @@ do { \
|
||||
refer to part of a DECL. */
|
||||
#define REG_EXPR(RTX) (REG_ATTRS (RTX) == 0 ? 0 : REG_ATTRS (RTX)->decl)
|
||||
|
||||
/* For a MEM rtx, the offset from the start of MEM_DECL, if known, as a
|
||||
RTX that is always a CONST_INT. */
|
||||
/* For a REG rtx, the offset from the start of REG_EXPR, if known, as an
|
||||
HOST_WIDE_INT. */
|
||||
#define REG_OFFSET(RTX) (REG_ATTRS (RTX) == 0 ? 0 : REG_ATTRS (RTX)->offset)
|
||||
|
||||
/* Copy the attributes that apply to memory locations from RHS to LHS. */
|
||||
|
@ -259,6 +259,9 @@ typedef struct variable_def
|
||||
/* Pointer to the BB's information specific to variable tracking pass. */
|
||||
#define VTI(BB) ((variable_tracking_info) (BB)->aux)
|
||||
|
||||
/* Macro to access MEM_OFFSET as an HOST_WIDE_INT. Evaluates MEM twice. */
|
||||
#define INT_MEM_OFFSET(mem) (MEM_OFFSET (mem) ? INTVAL (MEM_OFFSET (mem)) : 0)
|
||||
|
||||
/* Alloc pool for struct attrs_def. */
|
||||
static alloc_pool attrs_pool;
|
||||
|
||||
@ -927,7 +930,7 @@ static void
|
||||
var_mem_set (dataflow_set *set, rtx loc)
|
||||
{
|
||||
tree decl = MEM_EXPR (loc);
|
||||
HOST_WIDE_INT offset = MEM_OFFSET (loc) ? INTVAL (MEM_OFFSET (loc)) : 0;
|
||||
HOST_WIDE_INT offset = INT_MEM_OFFSET (loc);
|
||||
|
||||
decl = var_debug_decl (decl);
|
||||
|
||||
@ -945,7 +948,7 @@ static void
|
||||
var_mem_delete_and_set (dataflow_set *set, rtx loc, bool modify)
|
||||
{
|
||||
tree decl = MEM_EXPR (loc);
|
||||
HOST_WIDE_INT offset = MEM_OFFSET (loc) ? INTVAL (MEM_OFFSET (loc)) : 0;
|
||||
HOST_WIDE_INT offset = INT_MEM_OFFSET (loc);
|
||||
|
||||
decl = var_debug_decl (decl);
|
||||
|
||||
@ -962,7 +965,7 @@ static void
|
||||
var_mem_delete (dataflow_set *set, rtx loc, bool clobber)
|
||||
{
|
||||
tree decl = MEM_EXPR (loc);
|
||||
HOST_WIDE_INT offset = MEM_OFFSET (loc) ? INTVAL (MEM_OFFSET (loc)) : 0;
|
||||
HOST_WIDE_INT offset = INT_MEM_OFFSET (loc);
|
||||
|
||||
decl = var_debug_decl (decl);
|
||||
if (clobber)
|
||||
@ -1540,6 +1543,18 @@ track_expr_p (tree expr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Return true if OFFSET is a valid offset for a register or memory
|
||||
access we want to track. This is used to reject out-of-bounds
|
||||
accesses that can cause assertions to fail later. Note that we
|
||||
don't reject negative offsets because they can be generated for
|
||||
paradoxical subregs on big-endian architectures. */
|
||||
|
||||
static inline bool
|
||||
offset_valid_for_tracked_p (HOST_WIDE_INT offset)
|
||||
{
|
||||
return (-MAX_VAR_PARTS < offset) && (offset < MAX_VAR_PARTS);
|
||||
}
|
||||
|
||||
/* Determine whether a given LOC refers to the same variable part as
|
||||
EXPR+OFFSET. */
|
||||
|
||||
@ -1560,7 +1575,7 @@ same_variable_part_p (rtx loc, tree expr, HOST_WIDE_INT offset)
|
||||
else if (MEM_P (loc))
|
||||
{
|
||||
expr2 = MEM_EXPR (loc);
|
||||
offset2 = MEM_OFFSET (loc) ? INTVAL (MEM_OFFSET (loc)) : 0;
|
||||
offset2 = INT_MEM_OFFSET (loc);
|
||||
}
|
||||
else
|
||||
return false;
|
||||
@ -1590,7 +1605,8 @@ count_uses (rtx *loc, void *insn)
|
||||
}
|
||||
else if (MEM_P (*loc)
|
||||
&& MEM_EXPR (*loc)
|
||||
&& track_expr_p (MEM_EXPR (*loc)))
|
||||
&& track_expr_p (MEM_EXPR (*loc))
|
||||
&& offset_valid_for_tracked_p (INT_MEM_OFFSET (*loc)))
|
||||
{
|
||||
VTI (bb)->n_mos++;
|
||||
}
|
||||
@ -1626,14 +1642,19 @@ add_uses (rtx *loc, void *insn)
|
||||
basic_block bb = BLOCK_FOR_INSN ((rtx) insn);
|
||||
micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++;
|
||||
|
||||
mo->type = ((REG_EXPR (*loc) && track_expr_p (REG_EXPR (*loc)))
|
||||
? MO_USE : MO_USE_NO_VAR);
|
||||
if (REG_EXPR (*loc)
|
||||
&& track_expr_p (REG_EXPR (*loc))
|
||||
&& offset_valid_for_tracked_p (REG_OFFSET (*loc)))
|
||||
mo->type = MO_USE;
|
||||
else
|
||||
mo->type = MO_USE_NO_VAR;
|
||||
mo->u.loc = *loc;
|
||||
mo->insn = (rtx) insn;
|
||||
}
|
||||
else if (MEM_P (*loc)
|
||||
&& MEM_EXPR (*loc)
|
||||
&& track_expr_p (MEM_EXPR (*loc)))
|
||||
&& track_expr_p (MEM_EXPR (*loc))
|
||||
&& offset_valid_for_tracked_p (INT_MEM_OFFSET (*loc)))
|
||||
{
|
||||
basic_block bb = BLOCK_FOR_INSN ((rtx) insn);
|
||||
micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++;
|
||||
@ -1667,8 +1688,9 @@ add_stores (rtx loc, rtx expr, void *insn)
|
||||
micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++;
|
||||
|
||||
if (GET_CODE (expr) == CLOBBER
|
||||
|| ! REG_EXPR (loc)
|
||||
|| ! track_expr_p (REG_EXPR (loc)))
|
||||
|| !(REG_EXPR (loc)
|
||||
&& track_expr_p (REG_EXPR (loc))
|
||||
&& offset_valid_for_tracked_p (REG_OFFSET (loc))))
|
||||
mo->type = MO_CLOBBER;
|
||||
else if (GET_CODE (expr) == SET
|
||||
&& SET_DEST (expr) == loc
|
||||
@ -1683,7 +1705,8 @@ add_stores (rtx loc, rtx expr, void *insn)
|
||||
}
|
||||
else if (MEM_P (loc)
|
||||
&& MEM_EXPR (loc)
|
||||
&& track_expr_p (MEM_EXPR (loc)))
|
||||
&& track_expr_p (MEM_EXPR (loc))
|
||||
&& offset_valid_for_tracked_p (INT_MEM_OFFSET (loc)))
|
||||
{
|
||||
basic_block bb = BLOCK_FOR_INSN ((rtx) insn);
|
||||
micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++;
|
||||
@ -1694,8 +1717,7 @@ add_stores (rtx loc, rtx expr, void *insn)
|
||||
&& SET_DEST (expr) == loc
|
||||
&& same_variable_part_p (SET_SRC (expr),
|
||||
MEM_EXPR (loc),
|
||||
MEM_OFFSET (loc)
|
||||
? INTVAL (MEM_OFFSET (loc)) : 0))
|
||||
INT_MEM_OFFSET (loc)))
|
||||
mo->type = MO_COPY;
|
||||
else
|
||||
mo->type = MO_SET;
|
||||
@ -2726,7 +2748,7 @@ vt_get_decl_and_offset (rtx rtl, tree *declp, HOST_WIDE_INT *offsetp)
|
||||
if (MEM_ATTRS (rtl))
|
||||
{
|
||||
*declp = MEM_EXPR (rtl);
|
||||
*offsetp = MEM_OFFSET (rtl) ? INTVAL (MEM_OFFSET (rtl)) : 0;
|
||||
*offsetp = INT_MEM_OFFSET (rtl);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -5876,9 +5876,10 @@ default_binds_local_p_1 (tree exp, int shlib)
|
||||
else if (DECL_WEAK (exp))
|
||||
local_p = false;
|
||||
/* If PIC, then assume that any global name can be overridden by
|
||||
symbols resolved from other modules. */
|
||||
symbols resolved from other modules, unless we are compiling with
|
||||
-fwhole-program, which assumes that names are local. */
|
||||
else if (shlib)
|
||||
local_p = false;
|
||||
local_p = flag_whole_program;
|
||||
/* Uninitialized COMMON variable may be unified with symbols
|
||||
resolved from other modules. */
|
||||
else if (DECL_COMMON (exp)
|
||||
|
Loading…
x
Reference in New Issue
Block a user