From nobody@FreeBSD.org  Sun Dec 19 21:59:11 2010
Return-Path: <nobody@FreeBSD.org>
Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34])
	by hub.freebsd.org (Postfix) with ESMTP id 62D11106564A
	for <freebsd-gnats-submit@FreeBSD.org>; Sun, 19 Dec 2010 21:59:11 +0000 (UTC)
	(envelope-from nobody@FreeBSD.org)
Received: from red.freebsd.org (unknown [IPv6:2001:4f8:fff6::22])
	by mx1.freebsd.org (Postfix) with ESMTP id 505DA8FC0C
	for <freebsd-gnats-submit@FreeBSD.org>; Sun, 19 Dec 2010 21:59:11 +0000 (UTC)
Received: from red.freebsd.org (localhost [127.0.0.1])
	by red.freebsd.org (8.14.4/8.14.4) with ESMTP id oBJLxBOC031294
	for <freebsd-gnats-submit@FreeBSD.org>; Sun, 19 Dec 2010 21:59:11 GMT
	(envelope-from nobody@red.freebsd.org)
Received: (from nobody@localhost)
	by red.freebsd.org (8.14.4/8.14.4/Submit) id oBJLxBS3031293;
	Sun, 19 Dec 2010 21:59:11 GMT
	(envelope-from nobody)
Message-Id: <201012192159.oBJLxBS3031293@red.freebsd.org>
Date: Sun, 19 Dec 2010 21:59:11 GMT
From: "Pedro F. Giffuni" <giffunip@yahoo.com>
To: freebsd-gnats-submit@FreeBSD.org
Subject: Update base gcc with latest GPL2 patches  (FSF 4.2.2 prerelease)
X-Send-Pr-Version: www-3.1
X-GNATS-Notify:

>Number:         153298
>Category:       gnu
>Synopsis:       Update base gcc with latest GPL2 patches  (FSF 4.2.2 prerelease)
>Confidential:   no
>Severity:       non-critical
>Priority:       medium
>Responsible:    mm
>State:          closed
>Quarter:        
>Keywords:       
>Date-Required:  
>Class:          update
>Submitter-Id:   current-users
>Arrival-Date:   Sun Dec 19 22:00:22 UTC 2010
>Closed-Date:    Sat Apr 30 22:21:04 UTC 2011
>Last-Modified:  Mon May  2 08:40:11 UTC 2011
>Originator:     Pedro F. Giffuni
>Release:        8.2-BETA1
>Organization:
>Environment:
FreeBSD mogwai.giffuni.net 8.2-BETA1 FreeBSD 8.2-BETA1 #0: Sun Dec  5 02:13:37 UTC 2010     root@almeida.cse.buffalo.edu:/usr/obj/usr/src/sys/GENERIC  i386

>Description:
The latest revision of the FSF gcc-4.2 under the GPL2 was r127959.
In order to avoid confusion with the gcc-4.2.2 release (under the
GPL3), I updated the base system with all the changes up to
2007-07-25, right before the prerelease version bump.

After this I applied the fixes to the following gcc's PRs: 
middle-end/32563
debug/32610
c++/31337 
rtl-optimization/33148

Everything is under the GPL2. GCC still works and builds ports ;).
>How-To-Repeat:

>Fix:
Patch attached.

Patch attached with submission follows:

diff -ru gcc.orig/ChangeLog gcc/ChangeLog
--- gcc.orig/ChangeLog	2010-12-19 14:08:26.000000000 +0000
+++ gcc/ChangeLog	2010-12-19 16:25:49.000000000 +0000
@@ -1,3 +1,54 @@
+2007-07-25  Steve Ellcey  <sje@cup.hp.com>
+
+	PR target/32218
+	* tree-vect-patterns.c (vect_pattern_recog_1): Check for valid type.
+
+2007-07-25  Dorit Nuzman  <dorit@il.ibm.com>
+	    Devang Patel  <dpatel@apple.com>
+
+	PR tree-optimization/25413
+	* targhooks.c (default_builtin_vector_alignment_reachable): New.
+	* targhooks.h (default_builtin_vector_alignment_reachable): New.
+	* tree.h (contains_packed_reference): New.
+	* expr.c (contains_packed_reference): New.
+	* tree-vect-analyze.c (vector_alignment_reachable_p): New.
+	(vect_enhance_data_refs_alignment): Call
+	vector_alignment_reachable_p.
+	* target.h (vector_alignment_reachable): New builtin.
+	* target-def.h (TARGET_VECTOR_ALIGNMENT_REACHABLE): New.
+	* config/rs6000/rs6000.c (rs6000_vector_alignment_reachable): New.
+	(TARGET_VECTOR_ALIGNMENT_REACHABLE): Define.
+
+2007-07-24  Richard Guenther  <rguenther@suse.de>
+
+	Backport from mainline:
+	2007-07-16  Richard Guenther  <rguenther@suse.de>
+		    Uros Bizjak  <ubizjak@gmail.com>
+
+	* tree-if-conv.c (find_phi_replacement_condition): Unshare "*cond"
+	before forcing it to gimple operand.
+
+2007-07-24  Richard Guenther  <rguenther@suse.de>
+
+	PR tree-optimization/32723
+	Backport from mainline:
+	2007-03-09  Daniel Berlin  <dberlin@dberlin.org>
+
+	* tree-ssa-structalias.c (shared_bitmap_info_t): New structure.
+	(shared_bitmap_table): New variable.
+	(shared_bitmap_hash): New function.
+	(shared_bitmap_eq): Ditto
+	(shared_bitmap_lookup): Ditto.
+	(shared_bitmap_add): Ditto.
+	(find_what_p_points_to): Rewrite to use shared bitmap hashtable.
+	(init_alias_vars): Init shared bitmap hashtable.
+	(delete_points_to_sets): Delete shared bitmap hashtable.
+
+2007-07-23  Bernd Schmidt  <bernd.schmidt@analog.com>
+
+	* reload1.c (choose_reload_regs): Set reload_spill_index for regs
+	chosen during find_reloads.
+
 2007-07-19  Release Manager
 
 	* GCC 4.2.1 released.
diff -ru gcc.orig/config/rs6000/rs6000.c gcc/config/rs6000/rs6000.c
--- gcc.orig/config/rs6000/rs6000.c	2010-12-19 14:08:18.000000000 +0000
+++ gcc/config/rs6000/rs6000.c	2010-12-19 15:14:32.000000000 +0000
@@ -664,6 +664,7 @@
 static tree rs6000_builtin_mask_for_load (void);
 
 static void def_builtin (int, const char *, tree, int);
+static bool rs6000_vector_alignment_reachable (tree, bool);
 static void rs6000_init_builtins (void);
 static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
 static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
@@ -915,6 +916,9 @@
 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
 
+#undef TARGET_VECTOR_ALIGNMENT_REACHABLE
+#define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
+
 #undef TARGET_INIT_BUILTINS
 #define TARGET_INIT_BUILTINS rs6000_init_builtins
 
@@ -1584,6 +1588,37 @@
     return 0;
 }
 
+
+/* Return true iff, data reference of TYPE can reach vector alignment (16)
+   after applying N number of iterations.  This routine does not determine
+   how may iterations are required to reach desired alignment.  */
+
+static bool
+rs6000_vector_alignment_reachable (tree type ATTRIBUTE_UNUSED, bool is_packed)
+{
+  if (is_packed)
+    return false;
+
+  if (TARGET_32BIT)
+    {
+      if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
+        return true;
+
+      if (rs6000_alignment_flags ==  MASK_ALIGN_POWER)
+        return true;
+
+      return false;
+    }
+  else
+    {
+      if (TARGET_MACHO)
+        return false;
+
+      /* Assuming that all other types are naturally aligned. CHECKME!  */
+      return true;
+    }
+}
+
 /* Handle generic options of the form -mfoo=yes/no.
    NAME is the option name.
    VALUE is the option value.
Only in gcc: dwarf2aout.c
diff -ru gcc.orig/dwarf2out.c gcc/dwarf2out.c
--- gcc.orig/dwarf2out.c	2010-12-19 14:08:23.000000000 +0000
+++ gcc/dwarf2out.c	2010-12-19 16:51:06.000000000 +0000
@@ -10065,6 +10065,43 @@
   else if (initializer_constant_valid_p (init, type)
 	   && ! walk_tree (&init, reference_to_unused, NULL, NULL))
     {
+      /* Convert vector CONSTRUCTOR initializers to VECTOR_CST if
+	 possible.  */
+      if (TREE_CODE (type) == VECTOR_TYPE)
+	switch (TREE_CODE (init))
+	  {
+	  case VECTOR_CST:
+	    break;
+	  case CONSTRUCTOR:
+	    if (TREE_CONSTANT (init))
+	      {
+		VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (init);
+		bool constant_p = true;
+		tree value;
+		unsigned HOST_WIDE_INT ix;
+
+		/* Even when ctor is constant, it might contain non-*_CST
+		   elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't
+		   belong into VECTOR_CST nodes.  */
+		FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
+		  if (!CONSTANT_CLASS_P (value))
+		    {
+		      constant_p = false;
+		      break;
+		    }
+
+		if (constant_p)
+		  {
+		    init = build_vector_from_ctor (type, elts);
+		    break;
+		  }
+	      }
+	    /* FALLTHRU */
+
+	  default:
+	    return NULL;
+	  }
+
       rtl = expand_expr (init, NULL_RTX, VOIDmode, EXPAND_INITIALIZER);
 
       /* If expand_expr returns a MEM, it wasn't immediate.  */
@@ -13197,7 +13234,8 @@
 	 was generated within the original definition of an inline function) we
 	 have to generate a special (abbreviated) DW_TAG_structure_type,
 	 DW_TAG_union_type, or DW_TAG_enumeration_type DIE here.  */
-      if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE)
+      if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE
+	  && is_tagged_type (TREE_TYPE (decl)))
 	{
 	  gen_tagged_type_instantiation_die (TREE_TYPE (decl), context_die);
 	  break;
diff -ru gcc.orig/expr.c gcc/expr.c
--- gcc.orig/expr.c	2010-12-19 14:08:23.000000000 +0000
+++ gcc/expr.c	2010-12-19 16:17:49.000000000 +0000
@@ -5654,7 +5654,6 @@
   enum machine_mode mode = VOIDmode;
   tree offset = size_zero_node;
   tree bit_offset = bitsize_zero_node;
-  tree tem;
 
   /* First get the mode, signedness, and size.  We do this from just the
      outermost expression.  */
@@ -5690,6 +5689,8 @@
 	*pbitsize = tree_low_cst (size_tree, 1);
     }
 
+  *pmode = mode;
+
   /* Compute cumulative bit-offset for nested component-refs and array-refs,
      and find the ultimate containing object.  */
   while (1)
@@ -5774,21 +5775,69 @@
  done:
 
   /* If OFFSET is constant, see if we can return the whole thing as a
-     constant bit position.  Otherwise, split it up.  */
-  if (host_integerp (offset, 0)
-      && 0 != (tem = size_binop (MULT_EXPR,
-				 fold_convert (bitsizetype, offset),
-				 bitsize_unit_node))
-      && 0 != (tem = size_binop (PLUS_EXPR, tem, bit_offset))
-      && host_integerp (tem, 0))
-    *pbitpos = tree_low_cst (tem, 0), *poffset = 0;
-  else
-    *pbitpos = tree_low_cst (bit_offset, 0), *poffset = offset;
+     constant bit position.  Make sure to handle overflow during
+     this conversion.  */
+  if (host_integerp (offset, 0))
+    {
+      double_int tem = double_int_mul (tree_to_double_int (offset),
+				       uhwi_to_double_int (BITS_PER_UNIT));
+      tem = double_int_add (tem, tree_to_double_int (bit_offset));
+      if (double_int_fits_in_shwi_p (tem))
+	{
+	  *pbitpos = double_int_to_shwi (tem);
+	  *poffset = NULL_TREE;
+	  return exp;
+	}
+    }
+
+  /* Otherwise, split it up.  */
+  *pbitpos = tree_low_cst (bit_offset, 0);
+  *poffset = offset;
 
-  *pmode = mode;
   return exp;
 }
 
+/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
+   look for whether EXP or any nested component-refs within EXP is marked
+   as PACKED.  */
+
+bool
+contains_packed_reference (tree exp)
+{
+  bool packed_p = false;
+
+  while (1)
+    {
+      switch (TREE_CODE (exp))
+	{
+	case COMPONENT_REF:
+	  {
+	    tree field = TREE_OPERAND (exp, 1);
+	    packed_p = DECL_PACKED (field) 
+		       || TYPE_PACKED (TREE_TYPE (field))
+		       || TYPE_PACKED (TREE_TYPE (exp));
+	    if (packed_p)
+	      goto done;
+	  }
+	  break;
+
+	case BIT_FIELD_REF:
+	case ARRAY_REF:
+	case ARRAY_RANGE_REF:
+	case REALPART_EXPR:
+	case IMAGPART_EXPR:
+	case VIEW_CONVERT_EXPR:
+	  break;
+
+	default:
+	  goto done;
+	}
+      exp = TREE_OPERAND (exp, 0);
+    }
+ done:
+  return packed_p;
+}
+
 /* Return a tree of sizetype representing the size, in bytes, of the element
    of EXP, an ARRAY_REF.  */
 
diff -ru gcc.orig/gimplify.c gcc/gimplify.c
--- gcc.orig/gimplify.c	2010-12-19 14:08:24.000000000 +0000
+++ gcc/gimplify.c	2010-12-19 16:29:44.000000000 +0000
@@ -3532,8 +3532,16 @@
   gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
 	      || TREE_CODE (*expr_p) == INIT_EXPR);
 
-  /* For zero sized types only gimplify the left hand side and right hand side
-     as statements and throw away the assignment.  */
+  /* See if any simplifications can be done based on what the RHS is.  */
+  ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
+				  want_value);
+  if (ret != GS_UNHANDLED)
+    return ret;
+
+  /* For zero sized types only gimplify the left hand side and right hand
+     side as statements and throw away the assignment.  Do this after
+     gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
+     types properly.  */
   if (zero_sized_type (TREE_TYPE (*from_p)))
     {
       gimplify_stmt (from_p);
@@ -3544,12 +3552,6 @@
       return GS_ALL_DONE;
     }
 
-  /* See if any simplifications can be done based on what the RHS is.  */
-  ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
-				  want_value);
-  if (ret != GS_UNHANDLED)
-    return ret;
-
   /* If the value being copied is of variable width, compute the length
      of the copy into a WITH_SIZE_EXPR.   Note that we need to do this
      before gimplifying any of the operands so that we can resolve any
diff -ru gcc.orig/reload1.c gcc/reload1.c
--- gcc.orig/reload1.c	2010-12-19 14:08:24.000000000 +0000
+++ gcc/reload1.c	2010-12-19 14:26:26.000000000 +0000
@@ -5451,7 +5451,14 @@
   for (j = 0; j < n_reloads; j++)
     {
       reload_order[j] = j;
-      reload_spill_index[j] = -1;
+      if (rld[j].reg_rtx != NULL_RTX)
+	{
+	  gcc_assert (REG_P (rld[j].reg_rtx)
+		      && HARD_REGISTER_P (rld[j].reg_rtx));
+	  reload_spill_index[j] = REGNO (rld[j].reg_rtx);
+	}
+      else
+	reload_spill_index[j] = -1;
 
       if (rld[j].nregs > 1)
 	{
diff -ru gcc.orig/target-def.h gcc/target-def.h
--- gcc.orig/target-def.h	2010-12-19 14:08:25.000000000 +0000
+++ gcc/target-def.h	2010-12-19 15:10:58.000000000 +0000
@@ -337,9 +337,12 @@
    TARGET_SCHED_SET_SCHED_FLAGS}
 
 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD 0
+#define TARGET_VECTOR_ALIGNMENT_REACHABLE \
+  default_builtin_vector_alignment_reachable
 
 #define TARGET_VECTORIZE                                                \
-  {TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD}
+  {TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD,				\
+   TARGET_VECTOR_ALIGNMENT_REACHABLE}
 
 #define TARGET_DEFAULT_TARGET_FLAGS 0
 
diff -ru gcc.orig/target.h gcc/target.h
--- gcc.orig/target.h	2010-12-19 14:08:25.000000000 +0000
+++ gcc/target.h	2010-12-19 15:10:28.000000000 +0000
@@ -375,6 +375,10 @@
        by the vectorizer, and return the decl of the target builtin
        function.  */
     tree (* builtin_mask_for_load) (void);
+
+    /* Return true if vector alignment is reachable (by peeling N
+      interations) for the given type.  */
+     bool (* vector_alignment_reachable) (tree, bool);
   } vectorize;
 
   /* The initial value of target_flags.  */
diff -ru gcc.orig/targhooks.c gcc/targhooks.c
--- gcc.orig/targhooks.c	2010-12-19 14:08:25.000000000 +0000
+++ gcc/targhooks.c	2010-12-19 15:05:00.000000000 +0000
@@ -604,4 +604,20 @@
   return flag_pic ? 3 : 0;
 }
 
+bool
+default_builtin_vector_alignment_reachable (tree type, bool is_packed)
+{
+  if (is_packed)
+    return false;
+
+  /* Assuming that types whose size is > pointer-size are not guaranteed to be
+     naturally aligned.  */
+  if (tree_int_cst_compare (TYPE_SIZE (type), bitsize_int (POINTER_SIZE)) > 0)
+    return false;
+
+  /* Assuming that types whose size is <= pointer-size
+     are naturally aligned.  */
+  return true;
+}
+
 #include "gt-targhooks.h"
diff -ru gcc.orig/targhooks.h gcc/targhooks.h
--- gcc.orig/targhooks.h	2010-12-19 14:08:25.000000000 +0000
+++ gcc/targhooks.h	2010-12-19 15:05:37.000000000 +0000
@@ -57,6 +57,8 @@
 
 extern bool default_narrow_bitfield (void);
 
+extern bool default_builtin_vector_alignment_reachable (tree, bool);
+
 /* These are here, and not in hooks.[ch], because not all users of
    hooks.h include tm.h, and thus we don't have CUMULATIVE_ARGS.  */
 
diff -ru gcc.orig/tree-if-conv.c gcc/tree-if-conv.c
--- gcc.orig/tree-if-conv.c	2010-12-19 14:08:25.000000000 +0000
+++ gcc/tree-if-conv.c	2010-12-19 14:32:44.000000000 +0000
@@ -743,7 +743,7 @@
       if (TREE_CODE (*cond) == TRUTH_NOT_EXPR)
 	/* We can be smart here and choose inverted
 	   condition without switching bbs.  */
-	  *cond = invert_truthvalue (*cond);
+	*cond = invert_truthvalue (*cond);
       else
 	/* Select non loop header bb.  */
 	first_edge = second_edge;
@@ -762,9 +762,11 @@
 
   /* Create temp. for the condition. Vectorizer prefers to have gimple
      value as condition. Various targets use different means to communicate
-     condition in vector compare operation. Using gimple value allows compiler
-     to emit vector compare and select RTL without exposing compare's result.  */
-  *cond = force_gimple_operand (*cond, &new_stmts, false, NULL_TREE);
+     condition in vector compare operation. Using gimple value allows
+     compiler to emit vector compare and select RTL without exposing
+     compare's result.  */
+  *cond = force_gimple_operand (unshare_expr (*cond), &new_stmts,
+				false, NULL_TREE);
   if (new_stmts)
     bsi_insert_before (bsi, new_stmts, BSI_SAME_STMT);
   if (!is_gimple_reg (*cond) && !is_gimple_condexpr (*cond))
diff -ru gcc.orig/tree-ssa-structalias.c gcc/tree-ssa-structalias.c
--- gcc.orig/tree-ssa-structalias.c	2010-12-19 14:08:25.000000000 +0000
+++ gcc/tree-ssa-structalias.c	2010-12-19 14:30:17.000000000 +0000
@@ -4350,6 +4350,75 @@
   process_constraint (new_constraint (lhs, rhs));
 }
 
+/* Structure used to put solution bitmaps in a hashtable so they can
+   be shared among variables with the same points-to set.  */
+
+typedef struct shared_bitmap_info
+{
+  bitmap pt_vars;
+  hashval_t hashcode;
+} *shared_bitmap_info_t;
+
+static htab_t shared_bitmap_table;
+
+/* Hash function for a shared_bitmap_info_t */
+
+static hashval_t
+shared_bitmap_hash (const void *p)
+{
+  const shared_bitmap_info_t bi = (shared_bitmap_info_t) p;
+  return bi->hashcode;
+}
+
+/* Equality function for two shared_bitmap_info_t's. */
+
+static int
+shared_bitmap_eq (const void *p1, const void *p2)
+{
+  const shared_bitmap_info_t sbi1 = (shared_bitmap_info_t) p1;
+  const shared_bitmap_info_t sbi2 = (shared_bitmap_info_t) p2;
+  return bitmap_equal_p (sbi1->pt_vars, sbi2->pt_vars);
+}
+
+/* Lookup a bitmap in the shared bitmap hashtable, and return an already
+   existing instance if there is one, NULL otherwise.  */
+
+static bitmap
+shared_bitmap_lookup (bitmap pt_vars)
+{
+  void **slot;
+  struct shared_bitmap_info sbi;
+
+  sbi.pt_vars = pt_vars;
+  sbi.hashcode = bitmap_hash (pt_vars);
+  
+  slot = htab_find_slot_with_hash (shared_bitmap_table, &sbi,
+				   sbi.hashcode, NO_INSERT);
+  if (!slot)
+    return NULL;
+  else
+    return ((shared_bitmap_info_t) *slot)->pt_vars;
+}
+
+
+/* Add a bitmap to the shared bitmap hashtable.  */
+
+static void
+shared_bitmap_add (bitmap pt_vars)
+{
+  void **slot;
+  shared_bitmap_info_t sbi = XNEW (struct shared_bitmap_info);
+  
+  sbi->pt_vars = pt_vars;
+  sbi->hashcode = bitmap_hash (pt_vars);
+  
+  slot = htab_find_slot_with_hash (shared_bitmap_table, sbi,
+				   sbi->hashcode, INSERT);
+  gcc_assert (!*slot);
+  *slot = (void *) sbi;
+}
+
+
 /* Set bits in INTO corresponding to the variable uids in solution set
    FROM, which came from variable PTR.
    For variables that are actually dereferenced, we also use type
@@ -4460,7 +4529,9 @@
 	  struct ptr_info_def *pi = get_ptr_info (p);
 	  unsigned int i;
 	  bitmap_iterator bi;
-
+	  bitmap finished_solution;
+	  bitmap result;
+	  
 	  /* This variable may have been collapsed, let's get the real
 	     variable.  */
 	  vi = get_varinfo (find (vi->id));
@@ -4492,10 +4563,20 @@
 	  if (pi->pt_anything)
 	    return false;
 
-	  if (!pi->pt_vars)
-	    pi->pt_vars = BITMAP_GGC_ALLOC ();
+	  finished_solution = BITMAP_GGC_ALLOC ();
+	  set_uids_in_ptset (vi->decl, finished_solution, vi->solution);
+	  result = shared_bitmap_lookup (finished_solution);
 
-	  set_uids_in_ptset (vi->decl, pi->pt_vars, vi->solution);
+	  if (!result)
+	    {
+	      shared_bitmap_add (finished_solution);
+	      pi->pt_vars = finished_solution;
+	    }
+	  else
+	    {
+	      pi->pt_vars = result;
+	      bitmap_clear (finished_solution);
+	    }
 
 	  if (bitmap_empty_p (pi->pt_vars))
 	    pi->pt_vars = NULL;
@@ -4691,6 +4772,8 @@
   vi_for_tree = pointer_map_create ();
 
   memset (&stats, 0, sizeof (stats));
+  shared_bitmap_table = htab_create (511, shared_bitmap_hash,
+				     shared_bitmap_eq, free);
   init_base_vars ();
 }
 
@@ -4923,6 +5006,7 @@
   varinfo_t v;
   int i;
 
+  htab_delete (shared_bitmap_table);
   if (dump_file && (dump_flags & TDF_STATS))
     fprintf (dump_file, "Points to sets created:%d\n",
 	     stats.points_to_sets_created);
diff -ru gcc.orig/tree-vect-analyze.c gcc/tree-vect-analyze.c
--- gcc.orig/tree-vect-analyze.c	2010-12-19 14:08:25.000000000 +0000
+++ gcc/tree-vect-analyze.c	2010-12-19 15:16:30.000000000 +0000
@@ -25,6 +25,7 @@
 #include "tm.h"
 #include "ggc.h"
 #include "tree.h"
+#include "target.h"
 #include "basic-block.h"
 #include "diagnostic.h"
 #include "tree-flow.h"
@@ -911,6 +912,57 @@
 }
 
 
+/* Function vector_alignment_reachable_p
+
+   Return true if vector alignment for DR is reachable by peeling
+   a few loop iterations.  Return false otherwise.  */
+
+static bool
+vector_alignment_reachable_p (struct data_reference *dr)
+{
+  tree stmt = DR_STMT (dr);
+  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+
+  /* If misalignment is known at the compile time then allow peeling
+     only if natural alignment is reachable through peeling.  */
+  if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
+    {
+      HOST_WIDE_INT elmsize = 
+		int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
+      if (vect_print_dump_info (REPORT_DETAILS))
+	{
+	  fprintf (vect_dump, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
+	  fprintf (vect_dump, ". misalignment = %d. ", DR_MISALIGNMENT (dr));
+	}
+      if (DR_MISALIGNMENT (dr) % elmsize)
+	{
+	  if (vect_print_dump_info (REPORT_DETAILS))
+	    fprintf (vect_dump, "data size does not divide the misalignment.\n");
+	  return false;
+	}
+    }
+
+  if (!known_alignment_for_access_p (dr))
+    {
+      tree type = (TREE_TYPE (DR_REF (dr)));
+      tree ba = DR_BASE_OBJECT (dr);
+      bool is_packed = false;
+
+      if (ba)
+	is_packed = contains_packed_reference (ba);
+
+      if (vect_print_dump_info (REPORT_DETAILS))
+	fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed);
+      if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
+	return true;
+      else
+	return false;
+    }
+
+  return true;
+}
+
 /* Function vect_enhance_data_refs_alignment
 
    This pass will use loop versioning and loop peeling in order to enhance
@@ -1056,8 +1108,11 @@
   for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
     if (!DR_IS_READ (dr) && !aligned_access_p (dr))
       {
-	dr0 = dr;
-	do_peeling = true;
+        do_peeling = vector_alignment_reachable_p (dr);
+        if (do_peeling)
+          dr0 = dr;
+        if (!do_peeling && vect_print_dump_info (REPORT_DETAILS))
+          fprintf (vect_dump, "vector alignment may not be reachable");
 	break;
       }
 
diff -ru gcc.orig/tree.c gcc/tree.c
--- gcc.orig/tree.c	2010-12-19 14:08:25.000000000 +0000
+++ gcc/tree.c	2010-12-19 16:05:48.000000000 +0000
@@ -4540,7 +4540,8 @@
 	       && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0)
 	      || (! pos && TREE_INT_CST_HIGH (t) == -1
 		  && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0
-		  && !TYPE_UNSIGNED (TREE_TYPE (t)))
+		  && (!TYPE_UNSIGNED (TREE_TYPE (t))
+		      || TYPE_IS_SIZETYPE (TREE_TYPE (t))))
 	      || (pos && TREE_INT_CST_HIGH (t) == 0)));
 }
 
diff -ru gcc.orig/tree.h gcc/tree.h
--- gcc.orig/tree.h	2010-12-19 14:08:25.000000000 +0000
+++ gcc/tree.h	2010-12-19 15:06:19.000000000 +0000
@@ -4112,6 +4112,12 @@
 				 tree *, enum machine_mode *, int *, int *,
 				 bool);
 
+/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
+   look for whether EXP or any nested component-refs within EXP is marked
+   as PACKED.  */
+
+extern bool contains_packed_reference (tree exp);
+
 /* Return 1 if T is an expression that get_inner_reference handles.  */
 
 extern int handled_component_p (tree);


>Release-Note:
>Audit-Trail:

From: "Pedro F. Giffuni" <giffunip@yahoo.com>
To: bug-followup@FreeBSD.org
Cc:  
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches  (FSF 4.2.2 prerelease)
Date: Sun, 26 Dec 2010 10:42:06 -0800 (PST)

 This additional patch solves GCC PR 28796, It's originally
 from Apple's GCC so it's also covered by the GPL2.
 
 2006-10-24  Richard Guenther  <rguenther@suse.de>	       
 Radar 5675014
 PR middle-end/28796
 * builtins.c (fold_builtin_classify): Use HONOR_INFINITIES
 and HONOR_NANS instead of MODE_HAS_INFINITIES and MODE_HAS_NANS
 for deciding optimizations in consistency with fold-const.c
 (fold_builtin_unordered_cmp): Likewise.
 
 --- gcc.orig/builtins.c	2010-12-19 14:08:22.000000000 +0000
 +++ gcc/builtins.c	2010-12-26 11:51:18.000000000 +0000
 @@ -8720,7 +8720,7 @@
    switch (builtin_index)
      {
      case BUILT_IN_ISINF:
 -      if (!MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
 +      if (!HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
  	return omit_one_operand (type, integer_zero_node, arg);
  
        if (TREE_CODE (arg) == REAL_CST)
 @@ -8736,8 +8736,8 @@
        return NULL_TREE;
  
      case BUILT_IN_FINITE:
 -      if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg)))
 -	  && !MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
 +      if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg)))
 +	  && !HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
  	return omit_one_operand (type, integer_zero_node, arg);
  
        if (TREE_CODE (arg) == REAL_CST)
 @@ -8750,7 +8750,7 @@
        return NULL_TREE;
  
      case BUILT_IN_ISNAN:
 -      if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg))))
 +      if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg))))
  	return omit_one_operand (type, integer_zero_node, arg);
  
        if (TREE_CODE (arg) == REAL_CST)
 @@ -8833,13 +8833,13 @@
  
    if (unordered_code == UNORDERED_EXPR)
      {
 -      if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg0))))
 +      if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))))
  	return omit_two_operands (type, integer_zero_node, arg0, arg1);
        return fold_build2 (UNORDERED_EXPR, type, arg0, arg1);
      }
  
 -  code = MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg0))) ? unordered_code
 -						      : ordered_code;
 +  code = HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))) ? unordered_code
 +						   : ordered_code;
    return fold_build1 (TRUTH_NOT_EXPR, type,
  		      fold_build2 (code, type, arg0, arg1));
  }
 
 
 
       

From: Alexander Best <arundel@freebsd.org>
To: bug-followup@freebsd.org
Cc:  
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches (FSF 4.2.2 prerelease)
Date: Wed, 5 Jan 2011 03:04:53 +0000

 where did you optain the patch from the apple gcc version? i'd like to have a
 look at the code, since some time ago apple refused to hand over copyright of
 any of their gcc changes to the FSF [1].
 
 cheers.
 alex
 
 [1] http://gcc.gnu.org/ml/gcc/2010-09/msg00132.html
 
 -- 
 a13x

From: "Pedro F. Giffuni" <giffunip@yahoo.com>
To: bug-followup@FreeBSD.org
Cc: Alexander Best <arundel@freebsd.org>
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches  (FSF 4.2.2 prerelease)
Date: Sat, 8 Jan 2011 10:30:10 -0800 (PST)

 Hi Alexander;
 
 Apple released the sources to their gcc:
 http://opensource.apple.com/source/gcc/gcc-5646/
 
 I did a failed attempt to port the blocks support to
 our gcc so it was a matter of checking out a diff
 I made against gcc-2.4.1.
 
 The particular gcc PR 28796 is here:
 http://gcc.gnu.org/bugzilla/show_bug.cgi?id=28796
 
 cheers,
 
 Pedro.
 
 
       

From: Martin Matuska <mm@FreeBSD.org>
To: bug-followup@FreeBSD.org, giffunip@yahoo.com
Cc:  
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches  (FSF 4.2.2
 prerelease)
Date: Mon, 07 Mar 2011 16:37:01 +0100

 In the meantime I have backported some useful, non-intrusive,
 GPLv2-licensed CPU types from 4.3 (core2, geode, opteron-sse3).
 
 The latest vendor version in FreeBSD is gcc 4.2 SVN rev. 126787
 It would be nice to have a patch generated out of SVN - merging just the
 exact difference between 126787 and 127959.
 I might take a look at that.
 
 Thanks,
 mm

From: "Pedro F. Giffuni" <giffunip@yahoo.com>
To: bug-followup@FreeBSD.org, Martin Matuska <mm@FreeBSD.org>
Cc:  
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches  (FSF 4.2.2 prerelease)
Date: Mon, 7 Mar 2011 07:57:15 -0800 (PST)

 --- On Mon, 3/7/11, Martin Matuska <mm@FreeBSD.org> wrote:
 ...
 > In the meantime I have backported
 > some useful, non-intrusive,
 > GPLv2-licensed CPU types from 4.3 (core2, geode,
 > opteron-sse3).
 >
 
 Yes, thanks .. I've been following those to add them
 to llvm-gcc4 which adds supports SSE4 and blocks from
 Apple gcc.
  
 > The latest vendor version in FreeBSD is gcc 4.2 SVN rev.
 > 126787
 > It would be nice to have a patch generated out of SVN -
 > merging just the
 > exact difference between 126787 and 127959.
 > I might take a look at that.
 >
 
 The results should be the same as my patch but I
 specifically avoided the version bump to gcc
 4.2.2 (prerelease) to avoid license confusions.
 (I didn't care about the initial score support
 either and I separated the MIPS stuff in a
 different PR)
 
 cheers,
 
 Pedro.
 
 
       

From: Martin Matuska <mm@FreeBSD.org>
To: "Pedro F. Giffuni" <giffunip@yahoo.com>
Cc: bug-followup@FreeBSD.org
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches  (FSF 4.2.2
 prerelease)
Date: Mon, 07 Mar 2011 21:49:13 +0100

 What would also help - provide some demonstration for the patched
 issues, e.g. port XY does not compile with system gcc because of that
 problem and uses gcc from ports. Such things are good arguments for the
 integration of the patches (as we are quite conservative and most of our
 users don't seem to be bugged by these problems).

From: "Pedro F. Giffuni" <giffunip@yahoo.com>
To: Martin Matuska <mm@FreeBSD.org>
Cc: bug-followup@FreeBSD.org
Subject: Re: gnu/153298: Update base gcc with latest GPL2 patches  (FSF 4.2.2 prerelease)
Date: Mon, 7 Mar 2011 13:33:28 -0800 (PST)

 Hmm...
 I understand your position (surely shared by other developers)
 but this bugfixes come from gcc's stable branch and I will not
 go hunting for bugs in the ports tree that have already been
 reported in gcc GNATs.
 
 Some of the bugs don't affect building but at all but will have
 serious performance implications or will break debugging. 
 
 An example (both are bugs still in our base gcc):
 gcc PR tree-optimization/25413 is pretty serious as
 it means Pentium 4 SSE optimizations are broken.
 gcc PR tree-optimization/32723 is much less serious
 and code similar to the testcase is not easy to find
 in the ports tree (it compiles but uses a LOT of memory).
 
 I know that 25413 should be applied but it's ridiculous not
 to fix 32723 (which appears first in the changelog) just
 because it's not something people have noticed.
 
 Eventually people will notice these issues and will blame
 FreeBSD and not gcc... and I am starting to think they will
 be right to do so :(.
 
 
       

From: dfilter@FreeBSD.ORG (dfilter service)
To: bug-followup@FreeBSD.org
Cc:  
Subject: Re: gnu/153298: commit references a PR
Date: Thu, 10 Mar 2011 13:59:32 +0000 (UTC)

 Author: mm
 Date: Thu Mar 10 13:59:17 2011
 New Revision: 219451
 URL: http://svn.freebsd.org/changeset/base/219451
 
 Log:
   Vendor upgrade of gcc 4.2 to last GPLv2 revision.
   
   PR:		gnu/153298
   Obtained from:	gcc (gcc-4_2-branch, rev. 127959)
 
 Modified:
   vendor/gcc/dist/gcc/BASE-VER
   vendor/gcc/dist/gcc/ChangeLog
   vendor/gcc/dist/gcc/DATESTAMP
   vendor/gcc/dist/gcc/DEV-PHASE
   vendor/gcc/dist/gcc/config/mips/predicates.md
   vendor/gcc/dist/gcc/config/rs6000/rs6000.c
   vendor/gcc/dist/gcc/config/s390/s390.md
   vendor/gcc/dist/gcc/cp/ChangeLog
   vendor/gcc/dist/gcc/cp/call.c
   vendor/gcc/dist/gcc/cp/cp-tree.h
   vendor/gcc/dist/gcc/cp/cxx-pretty-print.c
   vendor/gcc/dist/gcc/cp/decl.c
   vendor/gcc/dist/gcc/cp/decl2.c
   vendor/gcc/dist/gcc/cp/error.c
   vendor/gcc/dist/gcc/cp/lex.c
   vendor/gcc/dist/gcc/cp/name-lookup.c
   vendor/gcc/dist/gcc/cp/pt.c
   vendor/gcc/dist/gcc/cp/semantics.c
   vendor/gcc/dist/gcc/cp/typeck.c
   vendor/gcc/dist/gcc/doc/contrib.texi
   vendor/gcc/dist/gcc/dwarf2out.c
   vendor/gcc/dist/gcc/expr.c
   vendor/gcc/dist/gcc/fold-const.c
   vendor/gcc/dist/gcc/gimplify.c
   vendor/gcc/dist/gcc/reload1.c
   vendor/gcc/dist/gcc/simplify-rtx.c
   vendor/gcc/dist/gcc/target-def.h
   vendor/gcc/dist/gcc/target.h
   vendor/gcc/dist/gcc/targhooks.c
   vendor/gcc/dist/gcc/targhooks.h
   vendor/gcc/dist/gcc/tree-if-conv.c
   vendor/gcc/dist/gcc/tree-ssa-structalias.c
   vendor/gcc/dist/gcc/tree-vect-analyze.c
   vendor/gcc/dist/gcc/tree-vect-patterns.c
   vendor/gcc/dist/gcc/tree.c
   vendor/gcc/dist/gcc/tree.h
   vendor/gcc/dist/libstdc++/ChangeLog
   vendor/gcc/dist/libstdc++/include/std/std_valarray.h
   vendor/gcc/dist/libstdc++/include/tr1/random
 
 Modified: vendor/gcc/dist/gcc/BASE-VER
 ==============================================================================
 --- vendor/gcc/dist/gcc/BASE-VER	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/BASE-VER	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1 +1 @@
 -4.2.1
 +4.2.2
 
 Modified: vendor/gcc/dist/gcc/ChangeLog
 ==============================================================================
 --- vendor/gcc/dist/gcc/ChangeLog	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/ChangeLog	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1,3 +1,121 @@
 +2007-08-31  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR rtl-optimization/33148
 +	* simplify-rtx.c (simplify_unary_operation_1): Only optimize
 +	(neg (lt X 0)) if X has scalar int mode.
 +
 +	PR debug/32914
 +	* dwarf2out.c (rtl_for_decl_init): If vector decl has CONSTRUCTOR
 +	initializer, use build_vector_from_ctor if possible to create
 +	VECTOR_CST out of it.  If vector initializer is not VECTOR_CST
 +	even after this, return NULL.
 +
 +2007-08-27  Jason Merrill  <jason@redhat.com>
 +
 +	PR c++/31337
 +	* gimplify.c (gimplify_modify_expr): Discard the assignment of 
 +	zero-sized types after calling gimplify_modify_expr_rhs.
 +
 +2007-08-24  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR debug/32610
 +	* dwarf2out.c (gen_decl_die): Don't call
 +	gen_tagged_type_instantiation_die if decl doesn't have tagged type.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Remove unused variable.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Do computation of bitoffset
 +	from offset in a way we can detect overflow reliably.
 +
 +2007-08-22  Richard Guenther  <rguenther@suse.de>
 +
 +	PR middle-end/32563
 +	* tree.c (host_integerp): Treat sizetype as signed as it is
 +	sign-extended.
 +
 +2007-08-20  Adam Nemet  <anemet@caviumnetworks.com>
 +
 +	* config/mips/predicates.md (const_call_insn_operand): Invoke
 +	SYMBOL_REF_LONG_CALL_P only on SYMBOL_REFs.
 +
 +2007-08-17  Chen liqin  <liqin@sunnorth.com.cn>
 +
 +        * config/score/score.md : Update pattern tablejump.
 +        * config/score/score.c : Update score_initialize_trampoline 
 +        function.
 +        * config/score/score.h (TRAMPOLINE_TEMPLATE): Added macro.
 +        (TRAMPOLINE_INSNS, TRAMPOLINE_SIZE) Update macro.
 +        * doc/contrib.texi: Add my entry.
 +
 +2007-08-02  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md ("*xordi3_cconly"): Change xr to xg.
 +
 +2007-08-01  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md (TF in GPR splitter): Change operand_subword
 +	parameter to TFmode.
 +
 +2007-07-30  Mark Mitchell  <mark@codesourcery.com>
 +
 +	* BASE-VER: Bump.
 +	* DEV-PHASE: Mark as prerelease.
 +
 +2007-07-25  Steve Ellcey  <sje@cup.hp.com>
 +
 +	PR target/32218
 +	* tree-vect-patterns.c (vect_pattern_recog_1): Check for valid type.
 +
 +2007-07-25  Dorit Nuzman  <dorit@il.ibm.com>
 +	    Devang Patel  <dpatel@apple.com>
 +
 +	PR tree-optimization/25413
 +	* targhooks.c (default_builtin_vector_alignment_reachable): New.
 +	* targhooks.h (default_builtin_vector_alignment_reachable): New.
 +	* tree.h (contains_packed_reference): New.
 +	* expr.c (contains_packed_reference): New.
 +	* tree-vect-analyze.c (vector_alignment_reachable_p): New.
 +	(vect_enhance_data_refs_alignment): Call
 +	vector_alignment_reachable_p.
 +	* target.h (vector_alignment_reachable): New builtin.
 +	* target-def.h (TARGET_VECTOR_ALIGNMENT_REACHABLE): New.
 +	* config/rs6000/rs6000.c (rs6000_vector_alignment_reachable): New.
 +	(TARGET_VECTOR_ALIGNMENT_REACHABLE): Define.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	Backport from mainline:
 +	2007-07-16  Richard Guenther  <rguenther@suse.de>
 +		    Uros Bizjak  <ubizjak@gmail.com>
 +
 +	* tree-if-conv.c (find_phi_replacement_condition): Unshare "*cond"
 +	before forcing it to gimple operand.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	PR tree-optimization/32723
 +	Backport from mainline:
 +	2007-03-09  Daniel Berlin  <dberlin@dberlin.org>
 +
 +        * tree-ssa-structalias.c (shared_bitmap_info_t): New structure.
 +        (shared_bitmap_table): New variable.
 +        (shared_bitmap_hash): New function.
 +        (shared_bitmap_eq): Ditto
 +        (shared_bitmap_lookup): Ditto.
 +        (shared_bitmap_add): Ditto.
 +        (find_what_p_points_to): Rewrite to use shared bitmap hashtable.
 +        (init_alias_vars): Init shared bitmap hashtable.
 +        (delete_points_to_sets): Delete shared bitmap hashtable.
 +
 +2007-07-23  Bernd Schmidt  <bernd.schmidt@analog.com>
 +
 +	* reload1.c (choose_reload_regs): Set reload_spill_index for regs
 +	chosen during find_reloads.
 +
  2007-07-19  Release Manager
  
  	* GCC 4.2.1 released.
 
 Modified: vendor/gcc/dist/gcc/DATESTAMP
 ==============================================================================
 --- vendor/gcc/dist/gcc/DATESTAMP	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/DATESTAMP	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1 +1 @@
 -20070719
 +20070831
 
 Modified: vendor/gcc/dist/gcc/DEV-PHASE
 ==============================================================================
 --- vendor/gcc/dist/gcc/DEV-PHASE	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/DEV-PHASE	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -0,0 +1 @@
 +prerelease
 
 Modified: vendor/gcc/dist/gcc/config/mips/predicates.md
 ==============================================================================
 --- vendor/gcc/dist/gcc/config/mips/predicates.md	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/config/mips/predicates.md	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -116,7 +116,9 @@
        /* If -mlong-calls, force all calls to use register addressing.  Also,
  	 if this function has the long_call attribute, we must use register
  	 addressing.  */
 -      return !TARGET_LONG_CALLS && !SYMBOL_REF_LONG_CALL_P (op);
 +      return (!TARGET_LONG_CALLS
 +	      && !(GET_CODE (op) == SYMBOL_REF
 +		   && SYMBOL_REF_LONG_CALL_P (op)));
  
      case SYMBOL_GOT_GLOBAL:
        /* Without explicit relocs, there is no special syntax for
 
 Modified: vendor/gcc/dist/gcc/config/rs6000/rs6000.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/config/rs6000/rs6000.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/config/rs6000/rs6000.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -664,6 +664,7 @@ static int rs6000_use_sched_lookahead (v
  static tree rs6000_builtin_mask_for_load (void);
  
  static void def_builtin (int, const char *, tree, int);
 +static bool rs6000_vector_alignment_reachable (tree, bool);
  static void rs6000_init_builtins (void);
  static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
  static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
 @@ -915,6 +916,9 @@ static const char alt_reg_names[][8] =
  #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
  #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
  
 +#undef TARGET_VECTOR_ALIGNMENT_REACHABLE
 +#define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
 +
  #undef TARGET_INIT_BUILTINS
  #define TARGET_INIT_BUILTINS rs6000_init_builtins
  
 @@ -1584,6 +1588,37 @@ rs6000_builtin_mask_for_load (void)
      return 0;
  }
  
 +
 +/* Return true iff, data reference of TYPE can reach vector alignment (16)
 +   after applying N number of iterations.  This routine does not determine
 +   how may iterations are required to reach desired alignment.  */
 +
 +static bool
 +rs6000_vector_alignment_reachable (tree type ATTRIBUTE_UNUSED, bool is_packed)
 +{
 +  if (is_packed)
 +    return false;
 +
 +  if (TARGET_32BIT)
 +    {
 +      if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
 +        return true;
 +
 +      if (rs6000_alignment_flags ==  MASK_ALIGN_POWER)
 +        return true;
 +
 +      return false;
 +    }
 +  else
 +    {
 +      if (TARGET_MACHO)
 +        return false;
 +
 +      /* Assuming that all other types are naturally aligned. CHECKME!  */
 +      return true;
 +    }
 +}
 +
  /* Handle generic options of the form -mfoo=yes/no.
     NAME is the option name.
     VALUE is the option value.
 
 Modified: vendor/gcc/dist/gcc/config/s390/s390.md
 ==============================================================================
 --- vendor/gcc/dist/gcc/config/s390/s390.md	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/config/s390/s390.md	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1500,7 +1500,7 @@
     && !s_operand (operands[1], VOIDmode)"
    [(set (match_dup 0) (match_dup 1))]
  {
 -  rtx addr = operand_subword (operands[0], 1, 0, DFmode);
 +  rtx addr = operand_subword (operands[0], 1, 0, TFmode);
    s390_load_address (addr, XEXP (operands[1], 0));
    operands[1] = replace_equiv_address (operands[1], addr);
  })
 @@ -5624,7 +5624,7 @@
    "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT"
    "@
     xgr\t%0,%2
 -   xr\t%0,%2"
 +   xg\t%0,%2"
    [(set_attr "op_type"  "RRE,RXY")])
  
  (define_insn "*xordi3_extimm"
 
 Modified: vendor/gcc/dist/gcc/cp/ChangeLog
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/ChangeLog	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/ChangeLog	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1,3 +1,66 @@
 +2007-08-24  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR c++/31941
 +	* error.c (resolve_virtual_fun_from_obj_type_ref): Handle
 +	TARGET_VTABLE_USES_DESCRIPTORS targets properly.
 +
 +	PR c++/32898
 +	* name-lookup.c (set_decl_namespace): lookup_qualified_name failure
 +	is error_mark_node rather than NULL_TREE.
 +	* pt.c (check_explicit_specialization): Likewise.
 +
 +2007-08-22  Jason Merrill  <jason@redhat.com>
 +
 +	PR c++/29365
 +	* pt.c (outermost_tinst_level): New function.
 +	* lex.c (in_main_input_context): New function.
 +	* cp-tree.h: Declare it.
 +	* decl2.c (constrain_class_visibility): Use it to avoid warning
 +	about uses of the anonymous namespace in the main input file.
 +
 +2007-08-20  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR c++/32992
 +	* typeck.c (check_return_expr): Don't NRV optimize vars in
 +	anonymous unions.
 +	* decl.c (finish_function): Comment fix.
 +
 +2007-08-18  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/32112
 +	* error.c (dump_decl): Deal with UNBOUND_CLASS_TEMPLATE.
 +	* cxx-pretty-print.c (pp_cxx_unqualified_id): Likewise.
 +
 +2007-08-10  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/17763
 +	* error.c (dump_expr): Consistently use the *_cxx_*
 +	variants of the pretty-print functions.
 +
 +2007-07-30  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/32108
 +	* semantics.c (finish_label_stmt): Reject the __label__
 +	extension outside function scopes.
 +
 +2007-07-28  Simon Martin  <simartin@users.sourceforge.net>
 +	    Mark Mitchell  <mark@codesourcery.com>
 +
 +	PR c++/30917
 +	* name-lookup.c (lookup_name_real): Non namespace-scope bindings can be
 +	hidden due to friend declarations in local classes.
 +
 +2007-07-27  Mark Mitchell  <mark@codesourcery.com>
 +
 +	PR c++/32346
 +	* call.c (convert_for_arg_passing): Only widen bitfields to their
 +	declared types if necessary.
 +
 +2007-07-24  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/30535
 +	* pt.c (unify): Never pass error_mark_node to template_decl_level.
 +
  2007-07-19  Release Manager
  
  	* GCC 4.2.1 released.
 
 Modified: vendor/gcc/dist/gcc/cp/call.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/call.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/call.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -4674,7 +4674,27 @@ type_passed_as (tree type)
  tree
  convert_for_arg_passing (tree type, tree val)
  {
 -  val = convert_bitfield_to_declared_type (val);
 +  tree bitfield_type;
 +
 +  /* If VAL is a bitfield, then -- since it has already been converted
 +     to TYPE -- it cannot have a precision greater than TYPE.  
 +
 +     If it has a smaller precision, we must widen it here.  For
 +     example, passing "int f:3;" to a function expecting an "int" will
 +     not result in any conversion before this point.
 +
 +     If the precision is the same we must not risk widening.  For
 +     example, the COMPONENT_REF for a 32-bit "long long" bitfield will
 +     often have type "int", even though the C++ type for the field is
 +     "long long".  If the value is being passed to a function
 +     expecting an "int", then no conversions will be required.  But,
 +     if we call convert_bitfield_to_declared_type, the bitfield will
 +     be converted to "long long".  */
 +  bitfield_type = is_bitfield_expr_with_lowered_type (val);
 +  if (bitfield_type 
 +      && TYPE_PRECISION (TREE_TYPE (val)) < TYPE_PRECISION (type))
 +    val = convert_to_integer (TYPE_MAIN_VARIANT (bitfield_type), val);
 +
    if (val == error_mark_node)
      ;
    /* Pass classes with copy ctors by invisible reference.  */
 
 Modified: vendor/gcc/dist/gcc/cp/cp-tree.h
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/cp-tree.h	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/cp-tree.h	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -4079,6 +4079,7 @@ extern void yyerror				(const char *);
  extern void yyhook				(int);
  extern bool cxx_init				(void);
  extern void cxx_finish				(void);
 +extern bool in_main_input_context		(void);
  
  /* in method.c */
  extern void init_method				(void);
 @@ -4161,6 +4162,7 @@ extern tree build_non_dependent_args		(t
  extern bool reregister_specialization		(tree, tree, tree);
  extern tree fold_non_dependent_expr		(tree);
  extern bool explicit_class_specialization_p     (tree);
 +extern tree outermost_tinst_level		(void);
  
  /* in repo.c */
  extern void init_repo				(void);
 
 Modified: vendor/gcc/dist/gcc/cp/cxx-pretty-print.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/cxx-pretty-print.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/cxx-pretty-print.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -204,6 +204,10 @@ pp_cxx_unqualified_id (cxx_pretty_printe
        pp_cxx_unqualified_id (pp, TEMPLATE_PARM_DECL (t));
        break;
  
 +    case UNBOUND_CLASS_TEMPLATE:
 +      pp_cxx_unqualified_id (pp, TYPE_NAME (t));
 +      break;
 +
      default:
        pp_unsupported_tree (pp, t);
        break;
 
 Modified: vendor/gcc/dist/gcc/cp/decl.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/decl.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/decl.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -11273,7 +11273,7 @@ finish_function (int flags)
    gcc_assert (stmts_are_full_exprs_p ());
  
    /* Set up the named return value optimization, if we can.  Candidate
 -     variables are selected in check_return_value.  */
 +     variables are selected in check_return_expr.  */
    if (current_function_return_value)
      {
        tree r = current_function_return_value;
 
 Modified: vendor/gcc/dist/gcc/cp/decl2.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/decl2.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/decl2.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1860,9 +1860,12 @@ constrain_class_visibility (tree type)
  	int subvis = type_visibility (ftype);
  
  	if (subvis == VISIBILITY_ANON)
 -	  warning (0, "\
 +	  {
 +	    if (!in_main_input_context ())
 +	      warning (0, "\
  %qT has a field %qD whose type uses the anonymous namespace",
  		   type, t);
 +	  }
  	else if (IS_AGGR_TYPE (ftype)
  		 && vis < VISIBILITY_HIDDEN
  		 && subvis >= VISIBILITY_HIDDEN)
 @@ -1877,9 +1880,12 @@ constrain_class_visibility (tree type)
        int subvis = type_visibility (TREE_TYPE (t));
  
        if (subvis == VISIBILITY_ANON)
 -	warning (0, "\
 +        {
 +	  if (!in_main_input_context())
 +	    warning (0, "\
  %qT has a base %qT whose type uses the anonymous namespace",
  		 type, TREE_TYPE (t));
 +	}
        else if (vis < VISIBILITY_HIDDEN
  	       && subvis >= VISIBILITY_HIDDEN)
  	warning (OPT_Wattributes, "\
 
 Modified: vendor/gcc/dist/gcc/cp/error.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/error.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/error.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -901,6 +901,10 @@ dump_decl (tree t, int flags)
  	pp_type_id (cxx_pp, t);
        break;
  
 +    case UNBOUND_CLASS_TEMPLATE:
 +      dump_type (t, flags);
 +      break;
 +
      default:
        pp_unsupported_tree (cxx_pp, t);
        /* Fall through to error.  */
 @@ -1301,10 +1305,14 @@ static tree
  resolve_virtual_fun_from_obj_type_ref (tree ref)
  {
    tree obj_type = TREE_TYPE (OBJ_TYPE_REF_OBJECT (ref));
 -  int index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1);
 +  HOST_WIDE_INT index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1);
    tree fun = BINFO_VIRTUALS (TYPE_BINFO (TREE_TYPE (obj_type)));
 -    while (index--)
 +  while (index)
 +    {
        fun = TREE_CHAIN (fun);
 +      index -= (TARGET_VTABLE_USES_DESCRIPTORS
 +		? TARGET_VTABLE_USES_DESCRIPTORS : 1);
 +    }
  
    return BV_FN (fun);
  }
 @@ -1420,13 +1428,13 @@ dump_expr (tree t, int flags)
  	    if (TREE_CODE (ob) == ADDR_EXPR)
  	      {
  		dump_expr (TREE_OPERAND (ob, 0), flags | TFF_EXPR_IN_PARENS);
 -		pp_dot (cxx_pp);
 +		pp_cxx_dot (cxx_pp);
  	      }
  	    else if (TREE_CODE (ob) != PARM_DECL
  		     || strcmp (IDENTIFIER_POINTER (DECL_NAME (ob)), "this"))
  	      {
  		dump_expr (ob, flags | TFF_EXPR_IN_PARENS);
 -		pp_arrow (cxx_pp);
 +		pp_cxx_arrow (cxx_pp);
  	      }
  	    args = TREE_CHAIN (args);
  	  }
 
 Modified: vendor/gcc/dist/gcc/cp/lex.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/lex.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/lex.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -827,3 +827,18 @@ make_aggr_type (enum tree_code code)
  
    return t;
  }
 +
 +/* Returns true if we are currently in the main source file, or in a
 +   template instantiation started from the main source file.  */
 +
 +bool
 +in_main_input_context (void)
 +{
 +  tree tl = outermost_tinst_level();
 +
 +  if (tl)
 +    return strcmp (main_input_filename,
 +		   LOCATION_FILE (TINST_LOCATION (tl))) == 0;
 +  else
 +    return strcmp (main_input_filename, input_filename) == 0;
 +}
 
 Modified: vendor/gcc/dist/gcc/cp/name-lookup.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/name-lookup.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/name-lookup.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -2924,7 +2924,7 @@ set_decl_namespace (tree decl, tree scop
  
    /* See whether this has been declared in the namespace.  */
    old = lookup_qualified_name (scope, DECL_NAME (decl), false, true);
 -  if (!old)
 +  if (old == error_mark_node)
      /* No old declaration at all.  */
      goto complain;
    if (!is_overloaded_fn (decl))
 @@ -3996,8 +3996,49 @@ lookup_name_real (tree name, int prefer_
  
  	if (binding)
  	  {
 -	    /* Only namespace-scope bindings can be hidden.  */
 -	    gcc_assert (!hidden_name_p (binding));
 +	    if (hidden_name_p (binding))
 +	      {
 +		/* A non namespace-scope binding can only be hidden if
 +		   we are in a local class, due to friend declarations.
 +		   In particular, consider:
 +
 +		   void f() {
 +		     struct A {
 +		       friend struct B;
 +		       void g() { B* b; } // error: B is hidden
 +		     }
 +		     struct B {};
 +		   }
 +
 +		   The standard says that "B" is a local class in "f"
 +		   (but not nested within "A") -- but that name lookup
 +		   for "B" does not find this declaration until it is
 +		   declared directly with "f".
 +
 +		   In particular:
 +
 +		   [class.friend]
 +
 +		   If a friend declaration appears in a local class and
 +		   the name specified is an unqualified name, a prior
 +		   declaration is looked up without considering scopes
 +		   that are outside the innermost enclosing non-class
 +		   scope. For a friend class declaration, if there is no
 +		   prior declaration, the class that is specified 
 +		   belongs to the innermost enclosing non-class scope,
 +		   but if it is subsequently referenced, its name is not
 +		   found by name lookup until a matching declaration is
 +		   provided in the innermost enclosing nonclass scope.
 +		*/
 +		gcc_assert (current_class_type &&
 +			    LOCAL_CLASS_P (current_class_type));
 +
 +		/* This binding comes from a friend declaration in the local
 +		   class. The standard (11.4.8) states that the lookup can
 +		   only succeed if there is a non-hidden declaration in the
 +		   current scope, which is not the case here.  */
 +		POP_TIMEVAR_AND_RETURN (TV_NAME_LOOKUP, NULL_TREE);
 +	      }
  	    val = binding;
  	    break;
  	  }
 
 Modified: vendor/gcc/dist/gcc/cp/pt.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/pt.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/pt.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1971,7 +1971,7 @@ check_explicit_specialization (tree decl
  		 context.  */
  	      fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
  					   false, true);
 -	      if (!fns || !is_overloaded_fn (fns))
 +	      if (fns == error_mark_node || !is_overloaded_fn (fns))
  		{
  		  error ("%qD is not a template function", dname);
  		  fns = error_mark_node;
 @@ -5288,6 +5288,15 @@ reopen_tinst_level (tree level)
    pop_tinst_level ();
  }
  
 +/* Returns the TINST_LEVEL which gives the original instantiation
 +   context.  */
 +
 +tree
 +outermost_tinst_level (void)
 +{
 +  return tree_last (current_tinst_level);
 +}
 +
  /* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL.  ARGS is the
     vector of template arguments, as for tsubst.
  
 @@ -10453,6 +10462,8 @@ unify (tree tparms, tree targs, tree par
      case TEMPLATE_TEMPLATE_PARM:
      case BOUND_TEMPLATE_TEMPLATE_PARM:
        tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0));
 +      if (tparm == error_mark_node)
 +	return 1;
  
        if (TEMPLATE_TYPE_LEVEL (parm)
  	  != template_decl_level (tparm))
 
 Modified: vendor/gcc/dist/gcc/cp/semantics.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/semantics.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/semantics.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -1320,8 +1320,13 @@ finish_label_stmt (tree name)
  void
  finish_label_decl (tree name)
  {
 -  tree decl = declare_local_label (name);
 -  add_decl_expr (decl);
 +  if (!at_function_scope_p ())
 +    {
 +      error ("__label__ declarations are only allowed in function scopes");
 +      return;
 +    }
 +
 +  add_decl_expr (declare_local_label (name));
  }
  
  /* When DECL goes out of scope, make sure that CLEANUP is executed.  */
 
 Modified: vendor/gcc/dist/gcc/cp/typeck.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/cp/typeck.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/cp/typeck.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -6604,6 +6604,7 @@ check_return_expr (tree retval, bool *no
  	  && TREE_CODE (retval) == VAR_DECL
  	  && DECL_CONTEXT (retval) == current_function_decl
  	  && ! TREE_STATIC (retval)
 +	  && ! DECL_ANON_UNION_VAR_P (retval)
  	  && (DECL_ALIGN (retval)
  	      >= DECL_ALIGN (DECL_RESULT (current_function_decl)))
  	  && same_type_p ((TYPE_MAIN_VARIANT
 
 Modified: vendor/gcc/dist/gcc/doc/contrib.texi
 ==============================================================================
 --- vendor/gcc/dist/gcc/doc/contrib.texi	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/doc/contrib.texi	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -514,6 +514,10 @@ patches.
  Robert Lipe for OpenServer support, new testsuites, testing, etc.
  
  @item
 +Chen Liqin for various S+core related fixes/improvement, and for
 +maintaining the S+core port.
 +
 +@item
  Weiwen Liu for testing and various bug fixes.
  
  @item
 
 Modified: vendor/gcc/dist/gcc/dwarf2out.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/dwarf2out.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/dwarf2out.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -10065,6 +10065,43 @@ rtl_for_decl_init (tree init, tree type)
    else if (initializer_constant_valid_p (init, type)
  	   && ! walk_tree (&init, reference_to_unused, NULL, NULL))
      {
 +      /* Convert vector CONSTRUCTOR initializers to VECTOR_CST if
 +	 possible.  */
 +      if (TREE_CODE (type) == VECTOR_TYPE)
 +	switch (TREE_CODE (init))
 +	  {
 +	  case VECTOR_CST:
 +	    break;
 +	  case CONSTRUCTOR:
 +	    if (TREE_CONSTANT (init))
 +	      {
 +		VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (init);
 +		bool constant_p = true;
 +		tree value;
 +		unsigned HOST_WIDE_INT ix;
 +
 +		/* Even when ctor is constant, it might contain non-*_CST
 +		   elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't
 +		   belong into VECTOR_CST nodes.  */
 +		FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
 +		  if (!CONSTANT_CLASS_P (value))
 +		    {
 +		      constant_p = false;
 +		      break;
 +		    }
 +
 +		if (constant_p)
 +		  {
 +		    init = build_vector_from_ctor (type, elts);
 +		    break;
 +		  }
 +	      }
 +	    /* FALLTHRU */
 +
 +	  default:
 +	    return NULL;
 +	  }
 +
        rtl = expand_expr (init, NULL_RTX, VOIDmode, EXPAND_INITIALIZER);
  
        /* If expand_expr returns a MEM, it wasn't immediate.  */
 @@ -13197,7 +13234,8 @@ gen_decl_die (tree decl, dw_die_ref cont
  	 was generated within the original definition of an inline function) we
  	 have to generate a special (abbreviated) DW_TAG_structure_type,
  	 DW_TAG_union_type, or DW_TAG_enumeration_type DIE here.  */
 -      if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE)
 +      if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE
 +	  && is_tagged_type (TREE_TYPE (decl)))
  	{
  	  gen_tagged_type_instantiation_die (TREE_TYPE (decl), context_die);
  	  break;
 
 Modified: vendor/gcc/dist/gcc/expr.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/expr.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/expr.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -5654,7 +5654,6 @@ get_inner_reference (tree exp, HOST_WIDE
    enum machine_mode mode = VOIDmode;
    tree offset = size_zero_node;
    tree bit_offset = bitsize_zero_node;
 -  tree tem;
  
    /* First get the mode, signedness, and size.  We do this from just the
       outermost expression.  */
 @@ -5690,6 +5689,8 @@ get_inner_reference (tree exp, HOST_WIDE
  	*pbitsize = tree_low_cst (size_tree, 1);
      }
  
 +  *pmode = mode;
 +
    /* Compute cumulative bit-offset for nested component-refs and array-refs,
       and find the ultimate containing object.  */
    while (1)
 @@ -5774,21 +5775,69 @@ get_inner_reference (tree exp, HOST_WIDE
   done:
  
    /* If OFFSET is constant, see if we can return the whole thing as a
 -     constant bit position.  Otherwise, split it up.  */
 -  if (host_integerp (offset, 0)
 -      && 0 != (tem = size_binop (MULT_EXPR,
 -				 fold_convert (bitsizetype, offset),
 -				 bitsize_unit_node))
 -      && 0 != (tem = size_binop (PLUS_EXPR, tem, bit_offset))
 -      && host_integerp (tem, 0))
 -    *pbitpos = tree_low_cst (tem, 0), *poffset = 0;
 -  else
 -    *pbitpos = tree_low_cst (bit_offset, 0), *poffset = offset;
 +     constant bit position.  Make sure to handle overflow during
 +     this conversion.  */
 +  if (host_integerp (offset, 0))
 +    {
 +      double_int tem = double_int_mul (tree_to_double_int (offset),
 +				       uhwi_to_double_int (BITS_PER_UNIT));
 +      tem = double_int_add (tem, tree_to_double_int (bit_offset));
 +      if (double_int_fits_in_shwi_p (tem))
 +	{
 +	  *pbitpos = double_int_to_shwi (tem);
 +	  *poffset = NULL_TREE;
 +	  return exp;
 +	}
 +    }
 +
 +  /* Otherwise, split it up.  */
 +  *pbitpos = tree_low_cst (bit_offset, 0);
 +  *poffset = offset;
  
 -  *pmode = mode;
    return exp;
  }
  
 +/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
 +   look for whether EXP or any nested component-refs within EXP is marked
 +   as PACKED.  */
 +
 +bool
 +contains_packed_reference (tree exp)
 +{
 +  bool packed_p = false;
 +
 +  while (1)
 +    {
 +      switch (TREE_CODE (exp))
 +	{
 +	case COMPONENT_REF:
 +	  {
 +	    tree field = TREE_OPERAND (exp, 1);
 +	    packed_p = DECL_PACKED (field) 
 +		       || TYPE_PACKED (TREE_TYPE (field))
 +		       || TYPE_PACKED (TREE_TYPE (exp));
 +	    if (packed_p)
 +	      goto done;
 +	  }
 +	  break;
 +
 +	case BIT_FIELD_REF:
 +	case ARRAY_REF:
 +	case ARRAY_RANGE_REF:
 +	case REALPART_EXPR:
 +	case IMAGPART_EXPR:
 +	case VIEW_CONVERT_EXPR:
 +	  break;
 +
 +	default:
 +	  goto done;
 +	}
 +      exp = TREE_OPERAND (exp, 0);
 +    }
 + done:
 +  return packed_p;
 +}
 +
  /* Return a tree of sizetype representing the size, in bytes, of the element
     of EXP, an ARRAY_REF.  */
  
 
 Modified: vendor/gcc/dist/gcc/fold-const.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/fold-const.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/fold-const.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -9387,6 +9387,7 @@ fold_binary (enum tree_code code, tree t
  
        /* ~X | X is -1.  */
        if (TREE_CODE (arg0) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg1))
  	  && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
  	{
  	  t1 = build_int_cst (type, -1);
 @@ -9396,6 +9397,7 @@ fold_binary (enum tree_code code, tree t
  
        /* X | ~X is -1.  */
        if (TREE_CODE (arg1) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg0))
  	  && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
  	{
  	  t1 = build_int_cst (type, -1);
 @@ -9503,6 +9505,7 @@ fold_binary (enum tree_code code, tree t
  
        /* ~X ^ X is -1.  */
        if (TREE_CODE (arg0) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg1))
  	  && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
  	{
  	  t1 = build_int_cst (type, -1);
 @@ -9512,6 +9515,7 @@ fold_binary (enum tree_code code, tree t
  
        /* X ^ ~X is -1.  */
        if (TREE_CODE (arg1) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg0))
  	  && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
  	{
  	  t1 = build_int_cst (type, -1);
 
 Modified: vendor/gcc/dist/gcc/gimplify.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/gimplify.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/gimplify.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -3532,8 +3532,16 @@ gimplify_modify_expr (tree *expr_p, tree
    gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
  	      || TREE_CODE (*expr_p) == INIT_EXPR);
  
 -  /* For zero sized types only gimplify the left hand side and right hand side
 -     as statements and throw away the assignment.  */
 +  /* See if any simplifications can be done based on what the RHS is.  */
 +  ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
 +				  want_value);
 +  if (ret != GS_UNHANDLED)
 +    return ret;
 +
 +  /* For zero sized types only gimplify the left hand side and right hand
 +     side as statements and throw away the assignment.  Do this after
 +     gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
 +     types properly.  */
    if (zero_sized_type (TREE_TYPE (*from_p)))
      {
        gimplify_stmt (from_p);
 @@ -3544,12 +3552,6 @@ gimplify_modify_expr (tree *expr_p, tree
        return GS_ALL_DONE;
      }
  
 -  /* See if any simplifications can be done based on what the RHS is.  */
 -  ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
 -				  want_value);
 -  if (ret != GS_UNHANDLED)
 -    return ret;
 -
    /* If the value being copied is of variable width, compute the length
       of the copy into a WITH_SIZE_EXPR.   Note that we need to do this
       before gimplifying any of the operands so that we can resolve any
 
 Modified: vendor/gcc/dist/gcc/reload1.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/reload1.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/reload1.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -5451,7 +5451,14 @@ choose_reload_regs (struct insn_chain *c
    for (j = 0; j < n_reloads; j++)
      {
        reload_order[j] = j;
 -      reload_spill_index[j] = -1;
 +      if (rld[j].reg_rtx != NULL_RTX)
 +	{
 +	  gcc_assert (REG_P (rld[j].reg_rtx)
 +		      && HARD_REGISTER_P (rld[j].reg_rtx));
 +	  reload_spill_index[j] = REGNO (rld[j].reg_rtx);
 +	}
 +      else
 +	reload_spill_index[j] = -1;
  
        if (rld[j].nregs > 1)
  	{
 
 Modified: vendor/gcc/dist/gcc/simplify-rtx.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/simplify-rtx.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/simplify-rtx.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -589,7 +589,8 @@ simplify_unary_operation_1 (enum rtx_cod
        /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
        /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
        if (GET_CODE (op) == LT
 -	  && XEXP (op, 1) == const0_rtx)
 +	  && XEXP (op, 1) == const0_rtx
 +	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
  	{
  	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
  	  int isize = GET_MODE_BITSIZE (inner);
 
 Modified: vendor/gcc/dist/gcc/target-def.h
 ==============================================================================
 --- vendor/gcc/dist/gcc/target-def.h	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/target-def.h	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -337,9 +337,12 @@ Foundation, 51 Franklin Street, Fifth Fl
     TARGET_SCHED_SET_SCHED_FLAGS}
  
  #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD 0
 +#define TARGET_VECTOR_ALIGNMENT_REACHABLE \
 +  default_builtin_vector_alignment_reachable
  
  #define TARGET_VECTORIZE                                                \
 -  {TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD}
 +  {TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD,				\
 +   TARGET_VECTOR_ALIGNMENT_REACHABLE}
  
  #define TARGET_DEFAULT_TARGET_FLAGS 0
  
 
 Modified: vendor/gcc/dist/gcc/target.h
 ==============================================================================
 --- vendor/gcc/dist/gcc/target.h	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/target.h	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -375,6 +375,10 @@ struct gcc_target
         by the vectorizer, and return the decl of the target builtin
         function.  */
      tree (* builtin_mask_for_load) (void);
 +
 +    /* Return true if vector alignment is reachable (by peeling N
 +      interations) for the given type.  */
 +     bool (* vector_alignment_reachable) (tree, bool);
    } vectorize;
  
    /* The initial value of target_flags.  */
 
 Modified: vendor/gcc/dist/gcc/targhooks.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/targhooks.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/targhooks.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -604,4 +604,20 @@ default_reloc_rw_mask (void)
    return flag_pic ? 3 : 0;
  }
  
 +bool
 +default_builtin_vector_alignment_reachable (tree type, bool is_packed)
 +{
 +  if (is_packed)
 +    return false;
 +
 +  /* Assuming that types whose size is > pointer-size are not guaranteed to be
 +     naturally aligned.  */
 +  if (tree_int_cst_compare (TYPE_SIZE (type), bitsize_int (POINTER_SIZE)) > 0)
 +    return false;
 +
 +  /* Assuming that types whose size is <= pointer-size
 +     are naturally aligned.  */
 +  return true;
 +}
 +
  #include "gt-targhooks.h"
 
 Modified: vendor/gcc/dist/gcc/targhooks.h
 ==============================================================================
 --- vendor/gcc/dist/gcc/targhooks.h	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/targhooks.h	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -57,6 +57,8 @@ extern const char * default_invalid_with
  
  extern bool default_narrow_bitfield (void);
  
 +extern bool default_builtin_vector_alignment_reachable (tree, bool);
 +
  /* These are here, and not in hooks.[ch], because not all users of
     hooks.h include tm.h, and thus we don't have CUMULATIVE_ARGS.  */
  
 
 Modified: vendor/gcc/dist/gcc/tree-if-conv.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/tree-if-conv.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/tree-if-conv.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -743,7 +743,7 @@ find_phi_replacement_condition (struct l
        if (TREE_CODE (*cond) == TRUTH_NOT_EXPR)
  	/* We can be smart here and choose inverted
  	   condition without switching bbs.  */
 -	  *cond = invert_truthvalue (*cond);
 +	*cond = invert_truthvalue (*cond);
        else
  	/* Select non loop header bb.  */
  	first_edge = second_edge;
 @@ -762,9 +762,11 @@ find_phi_replacement_condition (struct l
  
    /* Create temp. for the condition. Vectorizer prefers to have gimple
       value as condition. Various targets use different means to communicate
 -     condition in vector compare operation. Using gimple value allows compiler
 -     to emit vector compare and select RTL without exposing compare's result.  */
 -  *cond = force_gimple_operand (*cond, &new_stmts, false, NULL_TREE);
 +     condition in vector compare operation. Using gimple value allows
 +     compiler to emit vector compare and select RTL without exposing
 +     compare's result.  */
 +  *cond = force_gimple_operand (unshare_expr (*cond), &new_stmts,
 +				false, NULL_TREE);
    if (new_stmts)
      bsi_insert_before (bsi, new_stmts, BSI_SAME_STMT);
    if (!is_gimple_reg (*cond) && !is_gimple_condexpr (*cond))
 
 Modified: vendor/gcc/dist/gcc/tree-ssa-structalias.c
 ==============================================================================
 --- vendor/gcc/dist/gcc/tree-ssa-structalias.c	Thu Mar 10 11:23:43 2011	(r219450)
 +++ vendor/gcc/dist/gcc/tree-ssa-structalias.c	Thu Mar 10 13:59:17 2011	(r219451)
 @@ -4350,6 +4350,75 @@ intra_create_variable_infos (void)
    process_constraint (new_constraint (lhs, rhs));
  }
  
 +/* Structure used to put solution bitmaps in a hashtable so they can
 +   be shared among variables with the same points-to set.  */
 +
 +typedef struct shared_bitmap_info
 +{
 +  bitmap pt_vars;
 +  hashval_t hashcode;
 +} *shared_bitmap_info_t;
 +
 +static htab_t shared_bitmap_table;
 +
 +/* Hash function for a shared_bitmap_info_t */
 +
 +static hashval_t
 +shared_bitmap_hash (const void *p)
 +{
 +  const shared_bitmap_info_t bi = (shared_bitmap_info_t) p;
 +  return bi->hashcode;
 +}
 +
 +/* Equality function for two shared_bitmap_info_t's. */
 +
 +static int
 +shared_bitmap_eq (const void *p1, const void *p2)
 +{
 +  const shared_bitmap_info_t sbi1 = (shared_bitmap_info_t) p1;
 +  const shared_bitmap_info_t sbi2 = (shared_bitmap_info_t) p2;
 +  return bitmap_equal_p (sbi1->pt_vars, sbi2->pt_vars);
 +}
 +
 +/* Lookup a bitmap in the shared bitmap hashtable, and return an already
 +   existing instance if there is one, NULL otherwise.  */
 +
 +static bitmap
 +shared_bitmap_lookup (bitmap pt_vars)
 +{
 +  void **slot;
 +  struct shared_bitmap_info sbi;
 +
 +  sbi.pt_vars = pt_vars;
 +  sbi.hashcode = bitmap_hash (pt_vars);
 +  
 +  slot = htab_find_slot_with_hash (shared_bitmap_table, &sbi,
 
 *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
 _______________________________________________
 svn-src-all@freebsd.org mailing list
 http://lists.freebsd.org/mailman/listinfo/svn-src-all
 To unsubscribe, send any mail to "svn-src-all-unsubscribe@freebsd.org"
 
Responsible-Changed-From-To: freebsd-bugs->mm 
Responsible-Changed-By: mm 
Responsible-Changed-When: Tue Mar 29 20:57:02 UTC 2011 
Responsible-Changed-Why:  
I'll take it. 

http://www.freebsd.org/cgi/query-pr.cgi?pr=153298 
State-Changed-From-To: open->patched 
State-Changed-By: mm 
State-Changed-When: Tue Mar 29 20:57:36 UTC 2011 
State-Changed-Why:  
MFC pending. 

http://www.freebsd.org/cgi/query-pr.cgi?pr=153298 

From: dfilter@FreeBSD.ORG (dfilter service)
To: bug-followup@FreeBSD.org
Cc:  
Subject: Re: gnu/153298: commit references a PR
Date: Tue, 29 Mar 2011 20:54:10 +0000 (UTC)

 Author: mm
 Date: Tue Mar 29 20:53:51 2011
 New Revision: 220150
 URL: http://svn.freebsd.org/changeset/base/220150
 
 Log:
   Upgrade of base gcc and libstdc++ to the last GPLv2-licensed revision
   (rev. 127959 of gcc-4_2-branch).
   
   Resolved GCC bugs:
   	c++: 17763, 29365, 30535, 30917, 31337, 31941, 32108, 32112, 32346,
   	     32898, 32992
   	debug: 32610, 32914
   	libstdc++: 33084, 33128
   	middle-end: 32563
   	rtl-optimization: 33148
   	tree-optimization: 25413, 32723
   	target: 32218
   
   Tested by:	pointyhat (miwi)
   Obtained from:	gcc (gcc-4_2-branch up to rev. 127959)
   PR:		gnu/153298, gnu/153959, gnu/154385
   MFC after:	1 month
 
 Modified:
   head/contrib/gcc/BASE-VER
   head/contrib/gcc/ChangeLog
   head/contrib/gcc/DATESTAMP
   head/contrib/gcc/DEV-PHASE
   head/contrib/gcc/config/mips/predicates.md
   head/contrib/gcc/config/rs6000/rs6000.c
   head/contrib/gcc/config/s390/s390.md
   head/contrib/gcc/cp/ChangeLog
   head/contrib/gcc/cp/call.c
   head/contrib/gcc/cp/cp-tree.h
   head/contrib/gcc/cp/cxx-pretty-print.c
   head/contrib/gcc/cp/decl.c
   head/contrib/gcc/cp/decl2.c
   head/contrib/gcc/cp/error.c
   head/contrib/gcc/cp/lex.c
   head/contrib/gcc/cp/name-lookup.c
   head/contrib/gcc/cp/pt.c
   head/contrib/gcc/cp/semantics.c
   head/contrib/gcc/cp/typeck.c
   head/contrib/gcc/doc/contrib.texi
   head/contrib/gcc/dwarf2out.c
   head/contrib/gcc/expr.c
   head/contrib/gcc/fold-const.c
   head/contrib/gcc/gimplify.c
   head/contrib/gcc/reload1.c
   head/contrib/gcc/simplify-rtx.c
   head/contrib/gcc/target-def.h
   head/contrib/gcc/target.h
   head/contrib/gcc/targhooks.c
   head/contrib/gcc/targhooks.h
   head/contrib/gcc/tree-if-conv.c
   head/contrib/gcc/tree-ssa-structalias.c
   head/contrib/gcc/tree-vect-analyze.c
   head/contrib/gcc/tree-vect-patterns.c
   head/contrib/gcc/tree.c
   head/contrib/gcc/tree.h
   head/contrib/libstdc++/ChangeLog
   head/contrib/libstdc++/include/std/std_valarray.h
   head/contrib/libstdc++/include/tr1/random
   head/sys/sys/param.h
 Directory Properties:
   head/contrib/gcc/   (props changed)
   head/contrib/libstdc++/   (props changed)
 
 Modified: head/contrib/gcc/BASE-VER
 ==============================================================================
 --- head/contrib/gcc/BASE-VER	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/BASE-VER	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1 +1 @@
 -4.2.1
 +4.2.2
 
 Modified: head/contrib/gcc/ChangeLog
 ==============================================================================
 --- head/contrib/gcc/ChangeLog	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/ChangeLog	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1,3 +1,121 @@
 +2007-08-31  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR rtl-optimization/33148
 +	* simplify-rtx.c (simplify_unary_operation_1): Only optimize
 +	(neg (lt X 0)) if X has scalar int mode.
 +
 +	PR debug/32914
 +	* dwarf2out.c (rtl_for_decl_init): If vector decl has CONSTRUCTOR
 +	initializer, use build_vector_from_ctor if possible to create
 +	VECTOR_CST out of it.  If vector initializer is not VECTOR_CST
 +	even after this, return NULL.
 +
 +2007-08-27  Jason Merrill  <jason@redhat.com>
 +
 +	PR c++/31337
 +	* gimplify.c (gimplify_modify_expr): Discard the assignment of 
 +	zero-sized types after calling gimplify_modify_expr_rhs.
 +
 +2007-08-24  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR debug/32610
 +	* dwarf2out.c (gen_decl_die): Don't call
 +	gen_tagged_type_instantiation_die if decl doesn't have tagged type.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Remove unused variable.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Do computation of bitoffset
 +	from offset in a way we can detect overflow reliably.
 +
 +2007-08-22  Richard Guenther  <rguenther@suse.de>
 +
 +	PR middle-end/32563
 +	* tree.c (host_integerp): Treat sizetype as signed as it is
 +	sign-extended.
 +
 +2007-08-20  Adam Nemet  <anemet@caviumnetworks.com>
 +
 +	* config/mips/predicates.md (const_call_insn_operand): Invoke
 +	SYMBOL_REF_LONG_CALL_P only on SYMBOL_REFs.
 +
 +2007-08-17  Chen liqin  <liqin@sunnorth.com.cn>
 +
 +        * config/score/score.md : Update pattern tablejump.
 +        * config/score/score.c : Update score_initialize_trampoline 
 +        function.
 +        * config/score/score.h (TRAMPOLINE_TEMPLATE): Added macro.
 +        (TRAMPOLINE_INSNS, TRAMPOLINE_SIZE) Update macro.
 +        * doc/contrib.texi: Add my entry.
 +
 +2007-08-02  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md ("*xordi3_cconly"): Change xr to xg.
 +
 +2007-08-01  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md (TF in GPR splitter): Change operand_subword
 +	parameter to TFmode.
 +
 +2007-07-30  Mark Mitchell  <mark@codesourcery.com>
 +
 +	* BASE-VER: Bump.
 +	* DEV-PHASE: Mark as prerelease.
 +
 +2007-07-25  Steve Ellcey  <sje@cup.hp.com>
 +
 +	PR target/32218
 +	* tree-vect-patterns.c (vect_pattern_recog_1): Check for valid type.
 +
 +2007-07-25  Dorit Nuzman  <dorit@il.ibm.com>
 +	    Devang Patel  <dpatel@apple.com>
 +
 +	PR tree-optimization/25413
 +	* targhooks.c (default_builtin_vector_alignment_reachable): New.
 +	* targhooks.h (default_builtin_vector_alignment_reachable): New.
 +	* tree.h (contains_packed_reference): New.
 +	* expr.c (contains_packed_reference): New.
 +	* tree-vect-analyze.c (vector_alignment_reachable_p): New.
 +	(vect_enhance_data_refs_alignment): Call
 +	vector_alignment_reachable_p.
 +	* target.h (vector_alignment_reachable): New builtin.
 +	* target-def.h (TARGET_VECTOR_ALIGNMENT_REACHABLE): New.
 +	* config/rs6000/rs6000.c (rs6000_vector_alignment_reachable): New.
 +	(TARGET_VECTOR_ALIGNMENT_REACHABLE): Define.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	Backport from mainline:
 +	2007-07-16  Richard Guenther  <rguenther@suse.de>
 +		    Uros Bizjak  <ubizjak@gmail.com>
 +
 +	* tree-if-conv.c (find_phi_replacement_condition): Unshare "*cond"
 +	before forcing it to gimple operand.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	PR tree-optimization/32723
 +	Backport from mainline:
 +	2007-03-09  Daniel Berlin  <dberlin@dberlin.org>
 +
 +        * tree-ssa-structalias.c (shared_bitmap_info_t): New structure.
 +        (shared_bitmap_table): New variable.
 +        (shared_bitmap_hash): New function.
 +        (shared_bitmap_eq): Ditto
 +        (shared_bitmap_lookup): Ditto.
 +        (shared_bitmap_add): Ditto.
 +        (find_what_p_points_to): Rewrite to use shared bitmap hashtable.
 +        (init_alias_vars): Init shared bitmap hashtable.
 +        (delete_points_to_sets): Delete shared bitmap hashtable.
 +
 +2007-07-23  Bernd Schmidt  <bernd.schmidt@analog.com>
 +
 +	* reload1.c (choose_reload_regs): Set reload_spill_index for regs
 +	chosen during find_reloads.
 +
  2007-07-19  Release Manager
  
  	* GCC 4.2.1 released.
 
 Modified: head/contrib/gcc/DATESTAMP
 ==============================================================================
 --- head/contrib/gcc/DATESTAMP	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/DATESTAMP	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1 +1 @@
 -20070719
 +20070831
 
 Modified: head/contrib/gcc/DEV-PHASE
 ==============================================================================
 --- head/contrib/gcc/DEV-PHASE	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/DEV-PHASE	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -0,0 +1 @@
 +prerelease
 
 Modified: head/contrib/gcc/config/mips/predicates.md
 ==============================================================================
 --- head/contrib/gcc/config/mips/predicates.md	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/config/mips/predicates.md	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -116,7 +116,9 @@
        /* If -mlong-calls, force all calls to use register addressing.  Also,
  	 if this function has the long_call attribute, we must use register
  	 addressing.  */
 -      return !TARGET_LONG_CALLS && !SYMBOL_REF_LONG_CALL_P (op);
 +      return (!TARGET_LONG_CALLS
 +	      && !(GET_CODE (op) == SYMBOL_REF
 +		   && SYMBOL_REF_LONG_CALL_P (op)));
  
      case SYMBOL_GOT_GLOBAL:
        /* Without explicit relocs, there is no special syntax for
 
 Modified: head/contrib/gcc/config/rs6000/rs6000.c
 ==============================================================================
 --- head/contrib/gcc/config/rs6000/rs6000.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/config/rs6000/rs6000.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -664,6 +664,7 @@ static int rs6000_use_sched_lookahead (v
  static tree rs6000_builtin_mask_for_load (void);
  
  static void def_builtin (int, const char *, tree, int);
 +static bool rs6000_vector_alignment_reachable (tree, bool);
  static void rs6000_init_builtins (void);
  static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
  static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
 @@ -915,6 +916,9 @@ static const char alt_reg_names[][8] =
  #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
  #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
  
 +#undef TARGET_VECTOR_ALIGNMENT_REACHABLE
 +#define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
 +
  #undef TARGET_INIT_BUILTINS
  #define TARGET_INIT_BUILTINS rs6000_init_builtins
  
 @@ -1584,6 +1588,37 @@ rs6000_builtin_mask_for_load (void)
      return 0;
  }
  
 +
 +/* Return true iff, data reference of TYPE can reach vector alignment (16)
 +   after applying N number of iterations.  This routine does not determine
 +   how may iterations are required to reach desired alignment.  */
 +
 +static bool
 +rs6000_vector_alignment_reachable (tree type ATTRIBUTE_UNUSED, bool is_packed)
 +{
 +  if (is_packed)
 +    return false;
 +
 +  if (TARGET_32BIT)
 +    {
 +      if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
 +        return true;
 +
 +      if (rs6000_alignment_flags ==  MASK_ALIGN_POWER)
 +        return true;
 +
 +      return false;
 +    }
 +  else
 +    {
 +      if (TARGET_MACHO)
 +        return false;
 +
 +      /* Assuming that all other types are naturally aligned. CHECKME!  */
 +      return true;
 +    }
 +}
 +
  /* Handle generic options of the form -mfoo=yes/no.
     NAME is the option name.
     VALUE is the option value.
 
 Modified: head/contrib/gcc/config/s390/s390.md
 ==============================================================================
 --- head/contrib/gcc/config/s390/s390.md	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/config/s390/s390.md	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1500,7 +1500,7 @@
     && !s_operand (operands[1], VOIDmode)"
    [(set (match_dup 0) (match_dup 1))]
  {
 -  rtx addr = operand_subword (operands[0], 1, 0, DFmode);
 +  rtx addr = operand_subword (operands[0], 1, 0, TFmode);
    s390_load_address (addr, XEXP (operands[1], 0));
    operands[1] = replace_equiv_address (operands[1], addr);
  })
 @@ -5624,7 +5624,7 @@
    "s390_match_ccmode(insn, CCTmode) && TARGET_64BIT"
    "@
     xgr\t%0,%2
 -   xr\t%0,%2"
 +   xg\t%0,%2"
    [(set_attr "op_type"  "RRE,RXY")])
  
  (define_insn "*xordi3_extimm"
 
 Modified: head/contrib/gcc/cp/ChangeLog
 ==============================================================================
 --- head/contrib/gcc/cp/ChangeLog	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/ChangeLog	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1,3 +1,66 @@
 +2007-08-24  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR c++/31941
 +	* error.c (resolve_virtual_fun_from_obj_type_ref): Handle
 +	TARGET_VTABLE_USES_DESCRIPTORS targets properly.
 +
 +	PR c++/32898
 +	* name-lookup.c (set_decl_namespace): lookup_qualified_name failure
 +	is error_mark_node rather than NULL_TREE.
 +	* pt.c (check_explicit_specialization): Likewise.
 +
 +2007-08-22  Jason Merrill  <jason@redhat.com>
 +
 +	PR c++/29365
 +	* pt.c (outermost_tinst_level): New function.
 +	* lex.c (in_main_input_context): New function.
 +	* cp-tree.h: Declare it.
 +	* decl2.c (constrain_class_visibility): Use it to avoid warning
 +	about uses of the anonymous namespace in the main input file.
 +
 +2007-08-20  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR c++/32992
 +	* typeck.c (check_return_expr): Don't NRV optimize vars in
 +	anonymous unions.
 +	* decl.c (finish_function): Comment fix.
 +
 +2007-08-18  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/32112
 +	* error.c (dump_decl): Deal with UNBOUND_CLASS_TEMPLATE.
 +	* cxx-pretty-print.c (pp_cxx_unqualified_id): Likewise.
 +
 +2007-08-10  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/17763
 +	* error.c (dump_expr): Consistently use the *_cxx_*
 +	variants of the pretty-print functions.
 +
 +2007-07-30  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/32108
 +	* semantics.c (finish_label_stmt): Reject the __label__
 +	extension outside function scopes.
 +
 +2007-07-28  Simon Martin  <simartin@users.sourceforge.net>
 +	    Mark Mitchell  <mark@codesourcery.com>
 +
 +	PR c++/30917
 +	* name-lookup.c (lookup_name_real): Non namespace-scope bindings can be
 +	hidden due to friend declarations in local classes.
 +
 +2007-07-27  Mark Mitchell  <mark@codesourcery.com>
 +
 +	PR c++/32346
 +	* call.c (convert_for_arg_passing): Only widen bitfields to their
 +	declared types if necessary.
 +
 +2007-07-24  Paolo Carlini  <pcarlini@suse.de>
 +
 +	PR c++/30535
 +	* pt.c (unify): Never pass error_mark_node to template_decl_level.
 +
  2007-07-19  Release Manager
  
  	* GCC 4.2.1 released.
 
 Modified: head/contrib/gcc/cp/call.c
 ==============================================================================
 --- head/contrib/gcc/cp/call.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/call.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -4674,7 +4674,27 @@ type_passed_as (tree type)
  tree
  convert_for_arg_passing (tree type, tree val)
  {
 -  val = convert_bitfield_to_declared_type (val);
 +  tree bitfield_type;
 +
 +  /* If VAL is a bitfield, then -- since it has already been converted
 +     to TYPE -- it cannot have a precision greater than TYPE.  
 +
 +     If it has a smaller precision, we must widen it here.  For
 +     example, passing "int f:3;" to a function expecting an "int" will
 +     not result in any conversion before this point.
 +
 +     If the precision is the same we must not risk widening.  For
 +     example, the COMPONENT_REF for a 32-bit "long long" bitfield will
 +     often have type "int", even though the C++ type for the field is
 +     "long long".  If the value is being passed to a function
 +     expecting an "int", then no conversions will be required.  But,
 +     if we call convert_bitfield_to_declared_type, the bitfield will
 +     be converted to "long long".  */
 +  bitfield_type = is_bitfield_expr_with_lowered_type (val);
 +  if (bitfield_type 
 +      && TYPE_PRECISION (TREE_TYPE (val)) < TYPE_PRECISION (type))
 +    val = convert_to_integer (TYPE_MAIN_VARIANT (bitfield_type), val);
 +
    if (val == error_mark_node)
      ;
    /* Pass classes with copy ctors by invisible reference.  */
 
 Modified: head/contrib/gcc/cp/cp-tree.h
 ==============================================================================
 --- head/contrib/gcc/cp/cp-tree.h	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/cp-tree.h	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -4079,6 +4079,7 @@ extern void yyerror				(const char *);
  extern void yyhook				(int);
  extern bool cxx_init				(void);
  extern void cxx_finish				(void);
 +extern bool in_main_input_context		(void);
  
  /* in method.c */
  extern void init_method				(void);
 @@ -4161,6 +4162,7 @@ extern tree build_non_dependent_args		(t
  extern bool reregister_specialization		(tree, tree, tree);
  extern tree fold_non_dependent_expr		(tree);
  extern bool explicit_class_specialization_p     (tree);
 +extern tree outermost_tinst_level		(void);
  
  /* in repo.c */
  extern void init_repo				(void);
 
 Modified: head/contrib/gcc/cp/cxx-pretty-print.c
 ==============================================================================
 --- head/contrib/gcc/cp/cxx-pretty-print.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/cxx-pretty-print.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -204,6 +204,10 @@ pp_cxx_unqualified_id (cxx_pretty_printe
        pp_cxx_unqualified_id (pp, TEMPLATE_PARM_DECL (t));
        break;
  
 +    case UNBOUND_CLASS_TEMPLATE:
 +      pp_cxx_unqualified_id (pp, TYPE_NAME (t));
 +      break;
 +
      default:
        pp_unsupported_tree (pp, t);
        break;
 
 Modified: head/contrib/gcc/cp/decl.c
 ==============================================================================
 --- head/contrib/gcc/cp/decl.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/decl.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -11273,7 +11273,7 @@ finish_function (int flags)
    gcc_assert (stmts_are_full_exprs_p ());
  
    /* Set up the named return value optimization, if we can.  Candidate
 -     variables are selected in check_return_value.  */
 +     variables are selected in check_return_expr.  */
    if (current_function_return_value)
      {
        tree r = current_function_return_value;
 
 Modified: head/contrib/gcc/cp/decl2.c
 ==============================================================================
 --- head/contrib/gcc/cp/decl2.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/decl2.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1860,9 +1860,12 @@ constrain_class_visibility (tree type)
  	int subvis = type_visibility (ftype);
  
  	if (subvis == VISIBILITY_ANON)
 -	  warning (0, "\
 +	  {
 +	    if (!in_main_input_context ())
 +	      warning (0, "\
  %qT has a field %qD whose type uses the anonymous namespace",
  		   type, t);
 +	  }
  	else if (IS_AGGR_TYPE (ftype)
  		 && vis < VISIBILITY_HIDDEN
  		 && subvis >= VISIBILITY_HIDDEN)
 @@ -1877,9 +1880,12 @@ constrain_class_visibility (tree type)
        int subvis = type_visibility (TREE_TYPE (t));
  
        if (subvis == VISIBILITY_ANON)
 -	warning (0, "\
 +        {
 +	  if (!in_main_input_context())
 +	    warning (0, "\
  %qT has a base %qT whose type uses the anonymous namespace",
  		 type, TREE_TYPE (t));
 +	}
        else if (vis < VISIBILITY_HIDDEN
  	       && subvis >= VISIBILITY_HIDDEN)
  	warning (OPT_Wattributes, "\
 
 Modified: head/contrib/gcc/cp/error.c
 ==============================================================================
 --- head/contrib/gcc/cp/error.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/error.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -901,6 +901,10 @@ dump_decl (tree t, int flags)
  	pp_type_id (cxx_pp, t);
        break;
  
 +    case UNBOUND_CLASS_TEMPLATE:
 +      dump_type (t, flags);
 +      break;
 +
      default:
        pp_unsupported_tree (cxx_pp, t);
        /* Fall through to error.  */
 @@ -1301,10 +1305,14 @@ static tree
  resolve_virtual_fun_from_obj_type_ref (tree ref)
  {
    tree obj_type = TREE_TYPE (OBJ_TYPE_REF_OBJECT (ref));
 -  int index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1);
 +  HOST_WIDE_INT index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1);
    tree fun = BINFO_VIRTUALS (TYPE_BINFO (TREE_TYPE (obj_type)));
 -    while (index--)
 +  while (index)
 +    {
        fun = TREE_CHAIN (fun);
 +      index -= (TARGET_VTABLE_USES_DESCRIPTORS
 +		? TARGET_VTABLE_USES_DESCRIPTORS : 1);
 +    }
  
    return BV_FN (fun);
  }
 @@ -1420,13 +1428,13 @@ dump_expr (tree t, int flags)
  	    if (TREE_CODE (ob) == ADDR_EXPR)
  	      {
  		dump_expr (TREE_OPERAND (ob, 0), flags | TFF_EXPR_IN_PARENS);
 -		pp_dot (cxx_pp);
 +		pp_cxx_dot (cxx_pp);
  	      }
  	    else if (TREE_CODE (ob) != PARM_DECL
  		     || strcmp (IDENTIFIER_POINTER (DECL_NAME (ob)), "this"))
  	      {
  		dump_expr (ob, flags | TFF_EXPR_IN_PARENS);
 -		pp_arrow (cxx_pp);
 +		pp_cxx_arrow (cxx_pp);
  	      }
  	    args = TREE_CHAIN (args);
  	  }
 
 Modified: head/contrib/gcc/cp/lex.c
 ==============================================================================
 --- head/contrib/gcc/cp/lex.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/lex.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -827,3 +827,18 @@ make_aggr_type (enum tree_code code)
  
    return t;
  }
 +
 +/* Returns true if we are currently in the main source file, or in a
 +   template instantiation started from the main source file.  */
 +
 +bool
 +in_main_input_context (void)
 +{
 +  tree tl = outermost_tinst_level();
 +
 +  if (tl)
 +    return strcmp (main_input_filename,
 +		   LOCATION_FILE (TINST_LOCATION (tl))) == 0;
 +  else
 +    return strcmp (main_input_filename, input_filename) == 0;
 +}
 
 Modified: head/contrib/gcc/cp/name-lookup.c
 ==============================================================================
 --- head/contrib/gcc/cp/name-lookup.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/name-lookup.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -2924,7 +2924,7 @@ set_decl_namespace (tree decl, tree scop
  
    /* See whether this has been declared in the namespace.  */
    old = lookup_qualified_name (scope, DECL_NAME (decl), false, true);
 -  if (!old)
 +  if (old == error_mark_node)
      /* No old declaration at all.  */
      goto complain;
    if (!is_overloaded_fn (decl))
 @@ -3996,8 +3996,49 @@ lookup_name_real (tree name, int prefer_
  
  	if (binding)
  	  {
 -	    /* Only namespace-scope bindings can be hidden.  */
 -	    gcc_assert (!hidden_name_p (binding));
 +	    if (hidden_name_p (binding))
 +	      {
 +		/* A non namespace-scope binding can only be hidden if
 +		   we are in a local class, due to friend declarations.
 +		   In particular, consider:
 +
 +		   void f() {
 +		     struct A {
 +		       friend struct B;
 +		       void g() { B* b; } // error: B is hidden
 +		     }
 +		     struct B {};
 +		   }
 +
 +		   The standard says that "B" is a local class in "f"
 +		   (but not nested within "A") -- but that name lookup
 +		   for "B" does not find this declaration until it is
 +		   declared directly with "f".
 +
 +		   In particular:
 +
 +		   [class.friend]
 +
 +		   If a friend declaration appears in a local class and
 +		   the name specified is an unqualified name, a prior
 +		   declaration is looked up without considering scopes
 +		   that are outside the innermost enclosing non-class
 +		   scope. For a friend class declaration, if there is no
 +		   prior declaration, the class that is specified 
 +		   belongs to the innermost enclosing non-class scope,
 +		   but if it is subsequently referenced, its name is not
 +		   found by name lookup until a matching declaration is
 +		   provided in the innermost enclosing nonclass scope.
 +		*/
 +		gcc_assert (current_class_type &&
 +			    LOCAL_CLASS_P (current_class_type));
 +
 +		/* This binding comes from a friend declaration in the local
 +		   class. The standard (11.4.8) states that the lookup can
 +		   only succeed if there is a non-hidden declaration in the
 +		   current scope, which is not the case here.  */
 +		POP_TIMEVAR_AND_RETURN (TV_NAME_LOOKUP, NULL_TREE);
 +	      }
  	    val = binding;
  	    break;
  	  }
 
 Modified: head/contrib/gcc/cp/pt.c
 ==============================================================================
 --- head/contrib/gcc/cp/pt.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/pt.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1971,7 +1971,7 @@ check_explicit_specialization (tree decl
  		 context.  */
  	      fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
  					   false, true);
 -	      if (!fns || !is_overloaded_fn (fns))
 +	      if (fns == error_mark_node || !is_overloaded_fn (fns))
  		{
  		  error ("%qD is not a template function", dname);
  		  fns = error_mark_node;
 @@ -5288,6 +5288,15 @@ reopen_tinst_level (tree level)
    pop_tinst_level ();
  }
  
 +/* Returns the TINST_LEVEL which gives the original instantiation
 +   context.  */
 +
 +tree
 +outermost_tinst_level (void)
 +{
 +  return tree_last (current_tinst_level);
 +}
 +
  /* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL.  ARGS is the
     vector of template arguments, as for tsubst.
  
 @@ -10453,6 +10462,8 @@ unify (tree tparms, tree targs, tree par
      case TEMPLATE_TEMPLATE_PARM:
      case BOUND_TEMPLATE_TEMPLATE_PARM:
        tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0));
 +      if (tparm == error_mark_node)
 +	return 1;
  
        if (TEMPLATE_TYPE_LEVEL (parm)
  	  != template_decl_level (tparm))
 
 Modified: head/contrib/gcc/cp/semantics.c
 ==============================================================================
 --- head/contrib/gcc/cp/semantics.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/semantics.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -1320,8 +1320,13 @@ finish_label_stmt (tree name)
  void
  finish_label_decl (tree name)
  {
 -  tree decl = declare_local_label (name);
 -  add_decl_expr (decl);
 +  if (!at_function_scope_p ())
 +    {
 +      error ("__label__ declarations are only allowed in function scopes");
 +      return;
 +    }
 +
 +  add_decl_expr (declare_local_label (name));
  }
  
  /* When DECL goes out of scope, make sure that CLEANUP is executed.  */
 
 Modified: head/contrib/gcc/cp/typeck.c
 ==============================================================================
 --- head/contrib/gcc/cp/typeck.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/cp/typeck.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -6604,6 +6604,7 @@ check_return_expr (tree retval, bool *no
  	  && TREE_CODE (retval) == VAR_DECL
  	  && DECL_CONTEXT (retval) == current_function_decl
  	  && ! TREE_STATIC (retval)
 +	  && ! DECL_ANON_UNION_VAR_P (retval)
  	  && (DECL_ALIGN (retval)
  	      >= DECL_ALIGN (DECL_RESULT (current_function_decl)))
  	  && same_type_p ((TYPE_MAIN_VARIANT
 
 Modified: head/contrib/gcc/doc/contrib.texi
 ==============================================================================
 --- head/contrib/gcc/doc/contrib.texi	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/doc/contrib.texi	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -514,6 +514,10 @@ patches.
  Robert Lipe for OpenServer support, new testsuites, testing, etc.
  
  @item
 +Chen Liqin for various S+core related fixes/improvement, and for
 +maintaining the S+core port.
 +
 +@item
  Weiwen Liu for testing and various bug fixes.
  
  @item
 
 Modified: head/contrib/gcc/dwarf2out.c
 ==============================================================================
 --- head/contrib/gcc/dwarf2out.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/dwarf2out.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -10065,6 +10065,43 @@ rtl_for_decl_init (tree init, tree type)
    else if (initializer_constant_valid_p (init, type)
  	   && ! walk_tree (&init, reference_to_unused, NULL, NULL))
      {
 +      /* Convert vector CONSTRUCTOR initializers to VECTOR_CST if
 +	 possible.  */
 +      if (TREE_CODE (type) == VECTOR_TYPE)
 +	switch (TREE_CODE (init))
 +	  {
 +	  case VECTOR_CST:
 +	    break;
 +	  case CONSTRUCTOR:
 +	    if (TREE_CONSTANT (init))
 +	      {
 +		VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (init);
 +		bool constant_p = true;
 +		tree value;
 +		unsigned HOST_WIDE_INT ix;
 +
 +		/* Even when ctor is constant, it might contain non-*_CST
 +		   elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't
 +		   belong into VECTOR_CST nodes.  */
 +		FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
 +		  if (!CONSTANT_CLASS_P (value))
 +		    {
 +		      constant_p = false;
 +		      break;
 +		    }
 +
 +		if (constant_p)
 +		  {
 +		    init = build_vector_from_ctor (type, elts);
 +		    break;
 +		  }
 +	      }
 +	    /* FALLTHRU */
 +
 +	  default:
 +	    return NULL;
 +	  }
 +
        rtl = expand_expr (init, NULL_RTX, VOIDmode, EXPAND_INITIALIZER);
  
        /* If expand_expr returns a MEM, it wasn't immediate.  */
 @@ -13197,7 +13234,8 @@ gen_decl_die (tree decl, dw_die_ref cont
  	 was generated within the original definition of an inline function) we
  	 have to generate a special (abbreviated) DW_TAG_structure_type,
  	 DW_TAG_union_type, or DW_TAG_enumeration_type DIE here.  */
 -      if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE)
 +      if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE
 +	  && is_tagged_type (TREE_TYPE (decl)))
  	{
  	  gen_tagged_type_instantiation_die (TREE_TYPE (decl), context_die);
  	  break;
 
 Modified: head/contrib/gcc/expr.c
 ==============================================================================
 --- head/contrib/gcc/expr.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/expr.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -5654,7 +5654,6 @@ get_inner_reference (tree exp, HOST_WIDE
    enum machine_mode mode = VOIDmode;
    tree offset = size_zero_node;
    tree bit_offset = bitsize_zero_node;
 -  tree tem;
  
    /* First get the mode, signedness, and size.  We do this from just the
       outermost expression.  */
 @@ -5690,6 +5689,8 @@ get_inner_reference (tree exp, HOST_WIDE
  	*pbitsize = tree_low_cst (size_tree, 1);
      }
  
 +  *pmode = mode;
 +
    /* Compute cumulative bit-offset for nested component-refs and array-refs,
       and find the ultimate containing object.  */
    while (1)
 @@ -5774,21 +5775,69 @@ get_inner_reference (tree exp, HOST_WIDE
   done:
  
    /* If OFFSET is constant, see if we can return the whole thing as a
 -     constant bit position.  Otherwise, split it up.  */
 -  if (host_integerp (offset, 0)
 -      && 0 != (tem = size_binop (MULT_EXPR,
 -				 fold_convert (bitsizetype, offset),
 -				 bitsize_unit_node))
 -      && 0 != (tem = size_binop (PLUS_EXPR, tem, bit_offset))
 -      && host_integerp (tem, 0))
 -    *pbitpos = tree_low_cst (tem, 0), *poffset = 0;
 -  else
 -    *pbitpos = tree_low_cst (bit_offset, 0), *poffset = offset;
 +     constant bit position.  Make sure to handle overflow during
 +     this conversion.  */
 +  if (host_integerp (offset, 0))
 +    {
 +      double_int tem = double_int_mul (tree_to_double_int (offset),
 +				       uhwi_to_double_int (BITS_PER_UNIT));
 +      tem = double_int_add (tem, tree_to_double_int (bit_offset));
 +      if (double_int_fits_in_shwi_p (tem))
 +	{
 +	  *pbitpos = double_int_to_shwi (tem);
 +	  *poffset = NULL_TREE;
 +	  return exp;
 +	}
 +    }
 +
 +  /* Otherwise, split it up.  */
 +  *pbitpos = tree_low_cst (bit_offset, 0);
 +  *poffset = offset;
  
 -  *pmode = mode;
    return exp;
  }
  
 +/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
 +   look for whether EXP or any nested component-refs within EXP is marked
 +   as PACKED.  */
 +
 +bool
 +contains_packed_reference (tree exp)
 +{
 +  bool packed_p = false;
 +
 +  while (1)
 +    {
 +      switch (TREE_CODE (exp))
 +	{
 +	case COMPONENT_REF:
 +	  {
 +	    tree field = TREE_OPERAND (exp, 1);
 +	    packed_p = DECL_PACKED (field) 
 +		       || TYPE_PACKED (TREE_TYPE (field))
 +		       || TYPE_PACKED (TREE_TYPE (exp));
 +	    if (packed_p)
 +	      goto done;
 +	  }
 +	  break;
 +
 +	case BIT_FIELD_REF:
 +	case ARRAY_REF:
 +	case ARRAY_RANGE_REF:
 +	case REALPART_EXPR:
 +	case IMAGPART_EXPR:
 +	case VIEW_CONVERT_EXPR:
 +	  break;
 +
 +	default:
 +	  goto done;
 +	}
 +      exp = TREE_OPERAND (exp, 0);
 +    }
 + done:
 +  return packed_p;
 +}
 +
  /* Return a tree of sizetype representing the size, in bytes, of the element
     of EXP, an ARRAY_REF.  */
  
 
 Modified: head/contrib/gcc/fold-const.c
 ==============================================================================
 --- head/contrib/gcc/fold-const.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/fold-const.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -9387,6 +9387,7 @@ fold_binary (enum tree_code code, tree t
  
        /* ~X | X is -1.  */
        if (TREE_CODE (arg0) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg1))
  	  && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
  	{
  	  t1 = build_int_cst (type, -1);
 @@ -9396,6 +9397,7 @@ fold_binary (enum tree_code code, tree t
  
        /* X | ~X is -1.  */
        if (TREE_CODE (arg1) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg0))
  	  && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
  	{
  	  t1 = build_int_cst (type, -1);
 @@ -9503,6 +9505,7 @@ fold_binary (enum tree_code code, tree t
  
        /* ~X ^ X is -1.  */
        if (TREE_CODE (arg0) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg1))
  	  && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
  	{
  	  t1 = build_int_cst (type, -1);
 @@ -9512,6 +9515,7 @@ fold_binary (enum tree_code code, tree t
  
        /* X ^ ~X is -1.  */
        if (TREE_CODE (arg1) == BIT_NOT_EXPR
 +	  && INTEGRAL_TYPE_P (TREE_TYPE (arg0))
  	  && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
  	{
  	  t1 = build_int_cst (type, -1);
 
 Modified: head/contrib/gcc/gimplify.c
 ==============================================================================
 --- head/contrib/gcc/gimplify.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/gimplify.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -3532,8 +3532,16 @@ gimplify_modify_expr (tree *expr_p, tree
    gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
  	      || TREE_CODE (*expr_p) == INIT_EXPR);
  
 -  /* For zero sized types only gimplify the left hand side and right hand side
 -     as statements and throw away the assignment.  */
 +  /* See if any simplifications can be done based on what the RHS is.  */
 +  ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
 +				  want_value);
 +  if (ret != GS_UNHANDLED)
 +    return ret;
 +
 +  /* For zero sized types only gimplify the left hand side and right hand
 +     side as statements and throw away the assignment.  Do this after
 +     gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
 +     types properly.  */
    if (zero_sized_type (TREE_TYPE (*from_p)))
      {
        gimplify_stmt (from_p);
 @@ -3544,12 +3552,6 @@ gimplify_modify_expr (tree *expr_p, tree
        return GS_ALL_DONE;
      }
  
 -  /* See if any simplifications can be done based on what the RHS is.  */
 -  ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
 -				  want_value);
 -  if (ret != GS_UNHANDLED)
 -    return ret;
 -
    /* If the value being copied is of variable width, compute the length
       of the copy into a WITH_SIZE_EXPR.   Note that we need to do this
       before gimplifying any of the operands so that we can resolve any
 
 Modified: head/contrib/gcc/reload1.c
 ==============================================================================
 --- head/contrib/gcc/reload1.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/reload1.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -5451,7 +5451,14 @@ choose_reload_regs (struct insn_chain *c
    for (j = 0; j < n_reloads; j++)
      {
        reload_order[j] = j;
 -      reload_spill_index[j] = -1;
 +      if (rld[j].reg_rtx != NULL_RTX)
 +	{
 +	  gcc_assert (REG_P (rld[j].reg_rtx)
 +		      && HARD_REGISTER_P (rld[j].reg_rtx));
 +	  reload_spill_index[j] = REGNO (rld[j].reg_rtx);
 +	}
 +      else
 +	reload_spill_index[j] = -1;
  
        if (rld[j].nregs > 1)
  	{
 
 Modified: head/contrib/gcc/simplify-rtx.c
 ==============================================================================
 --- head/contrib/gcc/simplify-rtx.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/simplify-rtx.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -589,7 +589,8 @@ simplify_unary_operation_1 (enum rtx_cod
        /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
        /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
        if (GET_CODE (op) == LT
 -	  && XEXP (op, 1) == const0_rtx)
 +	  && XEXP (op, 1) == const0_rtx
 +	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
  	{
  	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
  	  int isize = GET_MODE_BITSIZE (inner);
 
 Modified: head/contrib/gcc/target-def.h
 ==============================================================================
 --- head/contrib/gcc/target-def.h	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/target-def.h	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -337,9 +337,12 @@ Foundation, 51 Franklin Street, Fifth Fl
     TARGET_SCHED_SET_SCHED_FLAGS}
  
  #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD 0
 +#define TARGET_VECTOR_ALIGNMENT_REACHABLE \
 +  default_builtin_vector_alignment_reachable
  
  #define TARGET_VECTORIZE                                                \
 -  {TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD}
 +  {TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD,				\
 +   TARGET_VECTOR_ALIGNMENT_REACHABLE}
  
  #define TARGET_DEFAULT_TARGET_FLAGS 0
  
 
 Modified: head/contrib/gcc/target.h
 ==============================================================================
 --- head/contrib/gcc/target.h	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/target.h	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -375,6 +375,10 @@ struct gcc_target
         by the vectorizer, and return the decl of the target builtin
         function.  */
      tree (* builtin_mask_for_load) (void);
 +
 +    /* Return true if vector alignment is reachable (by peeling N
 +      interations) for the given type.  */
 +     bool (* vector_alignment_reachable) (tree, bool);
    } vectorize;
  
    /* The initial value of target_flags.  */
 
 Modified: head/contrib/gcc/targhooks.c
 ==============================================================================
 --- head/contrib/gcc/targhooks.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/targhooks.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -604,4 +604,20 @@ default_reloc_rw_mask (void)
    return flag_pic ? 3 : 0;
  }
  
 +bool
 +default_builtin_vector_alignment_reachable (tree type, bool is_packed)
 +{
 +  if (is_packed)
 +    return false;
 +
 +  /* Assuming that types whose size is > pointer-size are not guaranteed to be
 +     naturally aligned.  */
 +  if (tree_int_cst_compare (TYPE_SIZE (type), bitsize_int (POINTER_SIZE)) > 0)
 +    return false;
 +
 +  /* Assuming that types whose size is <= pointer-size
 +     are naturally aligned.  */
 +  return true;
 +}
 +
  #include "gt-targhooks.h"
 
 Modified: head/contrib/gcc/targhooks.h
 ==============================================================================
 --- head/contrib/gcc/targhooks.h	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/targhooks.h	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -57,6 +57,8 @@ extern const char * default_invalid_with
  
  extern bool default_narrow_bitfield (void);
  
 +extern bool default_builtin_vector_alignment_reachable (tree, bool);
 +
  /* These are here, and not in hooks.[ch], because not all users of
     hooks.h include tm.h, and thus we don't have CUMULATIVE_ARGS.  */
  
 
 Modified: head/contrib/gcc/tree-if-conv.c
 ==============================================================================
 --- head/contrib/gcc/tree-if-conv.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/tree-if-conv.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -743,7 +743,7 @@ find_phi_replacement_condition (struct l
        if (TREE_CODE (*cond) == TRUTH_NOT_EXPR)
  	/* We can be smart here and choose inverted
  	   condition without switching bbs.  */
 -	  *cond = invert_truthvalue (*cond);
 +	*cond = invert_truthvalue (*cond);
        else
  	/* Select non loop header bb.  */
  	first_edge = second_edge;
 @@ -762,9 +762,11 @@ find_phi_replacement_condition (struct l
  
    /* Create temp. for the condition. Vectorizer prefers to have gimple
       value as condition. Various targets use different means to communicate
 -     condition in vector compare operation. Using gimple value allows compiler
 -     to emit vector compare and select RTL without exposing compare's result.  */
 -  *cond = force_gimple_operand (*cond, &new_stmts, false, NULL_TREE);
 +     condition in vector compare operation. Using gimple value allows
 +     compiler to emit vector compare and select RTL without exposing
 +     compare's result.  */
 +  *cond = force_gimple_operand (unshare_expr (*cond), &new_stmts,
 +				false, NULL_TREE);
    if (new_stmts)
      bsi_insert_before (bsi, new_stmts, BSI_SAME_STMT);
    if (!is_gimple_reg (*cond) && !is_gimple_condexpr (*cond))
 
 Modified: head/contrib/gcc/tree-ssa-structalias.c
 ==============================================================================
 --- head/contrib/gcc/tree-ssa-structalias.c	Tue Mar 29 20:23:56 2011	(r220149)
 +++ head/contrib/gcc/tree-ssa-structalias.c	Tue Mar 29 20:53:51 2011	(r220150)
 @@ -4350,6 +4350,75 @@ intra_create_variable_infos (void)
    process_constraint (new_constraint (lhs, rhs));
  }
  
 +/* Structure used to put solution bitmaps in a hashtable so they can
 +   be shared among variables with the same points-to set.  */
 +
 +typedef struct shared_bitmap_info
 +{
 +  bitmap pt_vars;
 +  hashval_t hashcode;
 +} *shared_bitmap_info_t;
 +
 +static htab_t shared_bitmap_table;
 +
 +/* Hash function for a shared_bitmap_info_t */
 +
 +static hashval_t
 +shared_bitmap_hash (const void *p)
 +{
 +  const shared_bitmap_info_t bi = (shared_bitmap_info_t) p;
 +  return bi->hashcode;
 +}
 +
 +/* Equality function for two shared_bitmap_info_t's. */
 +
 +static int
 +shared_bitmap_eq (const void *p1, const void *p2)
 +{
 +  const shared_bitmap_info_t sbi1 = (shared_bitmap_info_t) p1;
 +  const shared_bitmap_info_t sbi2 = (shared_bitmap_info_t) p2;
 +  return bitmap_equal_p (sbi1->pt_vars, sbi2->pt_vars);
 +}
 +
 +/* Lookup a bitmap in the shared bitmap hashtable, and return an already
 +   existing instance if there is one, NULL otherwise.  */
 +
 +static bitmap
 +shared_bitmap_lookup (bitmap pt_vars)
 +{
 +  void **slot;
 +  struct shared_bitmap_info sbi;
 +
 +  sbi.pt_vars = pt_vars;
 +  sbi.hashcode = bitmap_hash (pt_vars);
 +  
 +  slot = htab_find_slot_with_hash (shared_bitmap_table, &sbi,
 
 *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
 _______________________________________________
 svn-src-all@freebsd.org mailing list
 http://lists.freebsd.org/mailman/listinfo/svn-src-all
 To unsubscribe, send any mail to "svn-src-all-unsubscribe@freebsd.org"
 

From: dfilter@FreeBSD.ORG (dfilter service)
To: bug-followup@FreeBSD.org
Cc:  
Subject: Re: gnu/153298: commit references a PR
Date: Sat, 30 Apr 2011 22:07:30 +0000 (UTC)

 Author: mm
 Date: Sat Apr 30 22:07:04 2011
 New Revision: 221274
 URL: http://svn.freebsd.org/changeset/base/221274
 
 Log:
   MFC 219374, 219376, 219639, 219640, 219697, 219711, 220150:
   
   MFC r219374:
   Backport Intel Core 2 and AMD Geode CPU types from gcc-4.3 (GPLv2)
   These options are supported in this shape in all newer GCC versions.
   
   Source:	gcc-4_3-branch (rev. 118090, 118973, 120846; GPLv2)
   
   MFC r219376:
   Add AMD Geode CPU type to bsd.cpu.mk and examples/etc/make.conf
   For CPUTYPE=core2 use -march=core2
   
   MFC r219639:
   Backport SSSE3 instruction set support to base gcc.
   Enabled by default for -march=core2
   
   Source:	gcc-4_3-branch (rev. 117958, 121687, 121726, 123639; GPLv2)
   
   MFC r219640:
   Add ssse3 capability for CPUTYPE=core2 to MACHINE_CPU in bsd.cpu.mk
   
   MFC r219697:
   Fix -march/-mtune=native autodetection for Intel Core 2 CPUs
   
   Source:	gcc-4_3-branch (partial rev. 119454; GPLv2)
   
   MFC r219711:
   Backport missing tunings for -march=core2:
   - enable extra 80387 mathematical constants (ext_80387_constants)
   - enable compare and exchange 16 bytes (cmpxchg16b)
   
   Verified against llvm-gcc (and apple gcc)
   Source:	gcc-4_3-branch (ref. svn revs. 119260, 121140; GPLv2)
   
   MFC r220150:
   Upgrade of base gcc and libstdc++ to the last GPLv2-licensed revision
   (rev. 127959 of gcc-4_2-branch).
   
   Resolved GCC bugs:
   	c++: 17763, 29365, 30535, 30917, 31337, 31941, 32108, 32112, 32346,
   	     32898, 32992
   	debug: 32610, 32914
   	libstdc++: 33084, 33128
   	middle-end: 32563
   	rtl-optimization: 33148
   	tree-optimization: 25413, 32723
   	target: 32218
   
   Source:	gcc-4_2-branch (up to rev. 127959)
   
   Obtained from:	gcc (var. revs of gcc-4_2-branch and gcc-4_3-branch; GPLv2)
   PR:		gnu/153298, gnu/153959, gnu/154385, gnu/155308
 
 Added:
   stable/8/contrib/gcc/config/i386/geode.md
      - copied unchanged from r219374, head/contrib/gcc/config/i386/geode.md
   stable/8/contrib/gcc/config/i386/tmmintrin.h
      - copied unchanged from r219639, head/contrib/gcc/config/i386/tmmintrin.h
 Modified:
   stable/8/contrib/gcc/BASE-VER
   stable/8/contrib/gcc/ChangeLog
   stable/8/contrib/gcc/DATESTAMP
   stable/8/contrib/gcc/DEV-PHASE
   stable/8/contrib/gcc/config.gcc
   stable/8/contrib/gcc/config/i386/driver-i386.c
   stable/8/contrib/gcc/config/i386/i386.c
   stable/8/contrib/gcc/config/i386/i386.h
   stable/8/contrib/gcc/config/i386/i386.md
   stable/8/contrib/gcc/config/i386/i386.opt
   stable/8/contrib/gcc/config/i386/sse.md
   stable/8/contrib/gcc/config/i386/xmmintrin.h
   stable/8/contrib/gcc/config/mips/predicates.md
   stable/8/contrib/gcc/config/rs6000/rs6000.c
   stable/8/contrib/gcc/config/s390/s390.md
   stable/8/contrib/gcc/cp/ChangeLog
   stable/8/contrib/gcc/cp/call.c
   stable/8/contrib/gcc/cp/cp-tree.h
   stable/8/contrib/gcc/cp/cxx-pretty-print.c
   stable/8/contrib/gcc/cp/decl.c
   stable/8/contrib/gcc/cp/decl2.c
   stable/8/contrib/gcc/cp/error.c
   stable/8/contrib/gcc/cp/lex.c
   stable/8/contrib/gcc/cp/name-lookup.c
   stable/8/contrib/gcc/cp/pt.c
   stable/8/contrib/gcc/cp/semantics.c
   stable/8/contrib/gcc/cp/typeck.c
   stable/8/contrib/gcc/doc/contrib.texi
   stable/8/contrib/gcc/doc/extend.texi
   stable/8/contrib/gcc/doc/gcc.1
   stable/8/contrib/gcc/doc/invoke.texi
   stable/8/contrib/gcc/dwarf2out.c
   stable/8/contrib/gcc/expr.c
   stable/8/contrib/gcc/fold-const.c
   stable/8/contrib/gcc/gimplify.c
   stable/8/contrib/gcc/reload1.c
   stable/8/contrib/gcc/simplify-rtx.c
   stable/8/contrib/gcc/target-def.h
   stable/8/contrib/gcc/target.h
   stable/8/contrib/gcc/targhooks.c
   stable/8/contrib/gcc/targhooks.h
   stable/8/contrib/gcc/tree-if-conv.c
   stable/8/contrib/gcc/tree-ssa-structalias.c
   stable/8/contrib/gcc/tree-vect-analyze.c
   stable/8/contrib/gcc/tree-vect-patterns.c
   stable/8/contrib/gcc/tree.c
   stable/8/contrib/gcc/tree.h
   stable/8/contrib/libstdc++/ChangeLog
   stable/8/contrib/libstdc++/include/std/std_valarray.h
   stable/8/contrib/libstdc++/include/tr1/random
   stable/8/share/examples/etc/make.conf
   stable/8/share/mk/bsd.cpu.mk
 Directory Properties:
   stable/8/contrib/gcc/   (props changed)
   stable/8/contrib/libstdc++/   (props changed)
   stable/8/share/examples/   (props changed)
   stable/8/share/examples/cvsup/   (props changed)
   stable/8/share/mk/   (props changed)
 
 Modified: stable/8/contrib/gcc/BASE-VER
 ==============================================================================
 --- stable/8/contrib/gcc/BASE-VER	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/BASE-VER	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -1 +1 @@
 -4.2.1
 +4.2.2
 
 Modified: stable/8/contrib/gcc/ChangeLog
 ==============================================================================
 --- stable/8/contrib/gcc/ChangeLog	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/ChangeLog	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -1,3 +1,121 @@
 +2007-08-31  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR rtl-optimization/33148
 +	* simplify-rtx.c (simplify_unary_operation_1): Only optimize
 +	(neg (lt X 0)) if X has scalar int mode.
 +
 +	PR debug/32914
 +	* dwarf2out.c (rtl_for_decl_init): If vector decl has CONSTRUCTOR
 +	initializer, use build_vector_from_ctor if possible to create
 +	VECTOR_CST out of it.  If vector initializer is not VECTOR_CST
 +	even after this, return NULL.
 +
 +2007-08-27  Jason Merrill  <jason@redhat.com>
 +
 +	PR c++/31337
 +	* gimplify.c (gimplify_modify_expr): Discard the assignment of 
 +	zero-sized types after calling gimplify_modify_expr_rhs.
 +
 +2007-08-24  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR debug/32610
 +	* dwarf2out.c (gen_decl_die): Don't call
 +	gen_tagged_type_instantiation_die if decl doesn't have tagged type.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Remove unused variable.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Do computation of bitoffset
 +	from offset in a way we can detect overflow reliably.
 +
 +2007-08-22  Richard Guenther  <rguenther@suse.de>
 +
 +	PR middle-end/32563
 +	* tree.c (host_integerp): Treat sizetype as signed as it is
 +	sign-extended.
 +
 +2007-08-20  Adam Nemet  <anemet@caviumnetworks.com>
 +
 +	* config/mips/predicates.md (const_call_insn_operand): Invoke
 +	SYMBOL_REF_LONG_CALL_P only on SYMBOL_REFs.
 +
 +2007-08-17  Chen liqin  <liqin@sunnorth.com.cn>
 +
 +        * config/score/score.md : Update pattern tablejump.
 +        * config/score/score.c : Update score_initialize_trampoline 
 +        function.
 +        * config/score/score.h (TRAMPOLINE_TEMPLATE): Added macro.
 +        (TRAMPOLINE_INSNS, TRAMPOLINE_SIZE) Update macro.
 +        * doc/contrib.texi: Add my entry.
 +
 +2007-08-02  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md ("*xordi3_cconly"): Change xr to xg.
 +
 +2007-08-01  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md (TF in GPR splitter): Change operand_subword
 +	parameter to TFmode.
 +
 +2007-07-30  Mark Mitchell  <mark@codesourcery.com>
 +
 +	* BASE-VER: Bump.
 +	* DEV-PHASE: Mark as prerelease.
 +
 +2007-07-25  Steve Ellcey  <sje@cup.hp.com>
 +
 +	PR target/32218
 +	* tree-vect-patterns.c (vect_pattern_recog_1): Check for valid type.
 +
 +2007-07-25  Dorit Nuzman  <dorit@il.ibm.com>
 +	    Devang Patel  <dpatel@apple.com>
 +
 +	PR tree-optimization/25413
 +	* targhooks.c (default_builtin_vector_alignment_reachable): New.
 +	* targhooks.h (default_builtin_vector_alignment_reachable): New.
 +	* tree.h (contains_packed_reference): New.
 +	* expr.c (contains_packed_reference): New.
 +	* tree-vect-analyze.c (vector_alignment_reachable_p): New.
 +	(vect_enhance_data_refs_alignment): Call
 +	vector_alignment_reachable_p.
 +	* target.h (vector_alignment_reachable): New builtin.
 +	* target-def.h (TARGET_VECTOR_ALIGNMENT_REACHABLE): New.
 +	* config/rs6000/rs6000.c (rs6000_vector_alignment_reachable): New.
 +	(TARGET_VECTOR_ALIGNMENT_REACHABLE): Define.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	Backport from mainline:
 +	2007-07-16  Richard Guenther  <rguenther@suse.de>
 +		    Uros Bizjak  <ubizjak@gmail.com>
 +
 +	* tree-if-conv.c (find_phi_replacement_condition): Unshare "*cond"
 +	before forcing it to gimple operand.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	PR tree-optimization/32723
 +	Backport from mainline:
 +	2007-03-09  Daniel Berlin  <dberlin@dberlin.org>
 +
 +        * tree-ssa-structalias.c (shared_bitmap_info_t): New structure.
 +        (shared_bitmap_table): New variable.
 +        (shared_bitmap_hash): New function.
 +        (shared_bitmap_eq): Ditto
 +        (shared_bitmap_lookup): Ditto.
 +        (shared_bitmap_add): Ditto.
 +        (find_what_p_points_to): Rewrite to use shared bitmap hashtable.
 +        (init_alias_vars): Init shared bitmap hashtable.
 +        (delete_points_to_sets): Delete shared bitmap hashtable.
 +
 +2007-07-23  Bernd Schmidt  <bernd.schmidt@analog.com>
 +
 +	* reload1.c (choose_reload_regs): Set reload_spill_index for regs
 +	chosen during find_reloads.
 +
  2007-07-19  Release Manager
  
  	* GCC 4.2.1 released.
 
 Modified: stable/8/contrib/gcc/DATESTAMP
 ==============================================================================
 --- stable/8/contrib/gcc/DATESTAMP	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/DATESTAMP	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -1 +1 @@
 -20070719
 +20070831
 
 Modified: stable/8/contrib/gcc/DEV-PHASE
 ==============================================================================
 --- stable/8/contrib/gcc/DEV-PHASE	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/DEV-PHASE	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -0,0 +1 @@
 +prerelease
 
 Modified: stable/8/contrib/gcc/config.gcc
 ==============================================================================
 --- stable/8/contrib/gcc/config.gcc	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/config.gcc	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -268,11 +268,13 @@ xscale-*-*)
  	;;
  i[34567]86-*-*)
  	cpu_type=i386
 -	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h"
 +	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h
 +		       pmmintrin.h tmmintrin.h"
  	;;
  x86_64-*-*)
  	cpu_type=i386
 -	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h"
 +	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h
 +		       pmmintrin.h tmmintrin.h"
  	need_64bit_hwint=yes
  	;;
  ia64-*-*)
 @@ -1207,14 +1209,14 @@ i[34567]86-*-solaris2*)
  		# FIXME: -m64 for i[34567]86-*-* should be allowed just
  		# like -m32 for x86_64-*-*.
  		case X"${with_cpu}" in
 -		Xgeneric|Xnocona|Xx86-64|Xk8|Xopteron|Xathlon64|Xathlon-fx)
 +		Xgeneric|Xcore2|Xnocona|Xx86-64|Xk8|Xopteron|Xathlon64|Xathlon-fx)
  			;;
  		X)
  			with_cpu=generic
  			;;
  		*)
  			echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
 -			echo "generic nocona x86-64 k8 opteron athlon64 athlon-fx" 1>&2
 +			echo "generic core2 nocona x86-64 k8 opteron athlon64 athlon-fx" 1>&2
  			exit 1
  			;;
  		esac
 @@ -2537,6 +2539,9 @@ if test x$with_cpu = x ; then
          nocona-*)
            with_cpu=nocona
            ;;
 +	core2-*)
 +	  with_cpu=core2
 +	  ;;
          pentium_m-*)
            with_cpu=pentium-m
            ;;
 @@ -2556,6 +2561,9 @@ if test x$with_cpu = x ; then
          nocona-*)
            with_cpu=nocona
            ;;
 +	core2-*)
 +	  with_cpu=core2
 +	  ;;
          *)
            with_cpu=generic
            ;;
 @@ -2787,7 +2795,7 @@ case "${target}" in
  				esac
  				# OK
  				;;
 -			"" | k8 | opteron | athlon64 | athlon-fx | nocona | generic)
 +			"" | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | generic)
  				# OK
  				;;
  			*)
 
 Modified: stable/8/contrib/gcc/config/i386/driver-i386.c
 ==============================================================================
 --- stable/8/contrib/gcc/config/i386/driver-i386.c	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/config/i386/driver-i386.c	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -39,6 +39,7 @@ const char *host_detect_local_cpu (int a
  #define bit_SSE2 (1 << 26)
  
  #define bit_SSE3 (1 << 0)
 +#define bit_SSSE3 (1 << 9)
  #define bit_CMPXCHG16B (1 << 13)
  
  #define bit_3DNOW (1 << 31)
 @@ -66,7 +67,7 @@ const char *host_detect_local_cpu (int a
    unsigned int vendor;
    unsigned int ext_level;
    unsigned char has_mmx = 0, has_3dnow = 0, has_3dnowp = 0, has_sse = 0;
 -  unsigned char has_sse2 = 0, has_sse3 = 0, has_cmov = 0;
 +  unsigned char has_sse2 = 0, has_sse3 = 0, has_ssse3 = 0, has_cmov = 0;
    unsigned char has_longmode = 0, has_cmpxchg8b = 0;
    unsigned char is_amd = 0;
    unsigned int family = 0;
 @@ -107,6 +108,7 @@ const char *host_detect_local_cpu (int a
    has_sse = !!(edx & bit_SSE);
    has_sse2 = !!(edx & bit_SSE2);
    has_sse3 = !!(ecx & bit_SSE3);
 +  has_ssse3 = !!(ecx & bit_SSSE3);
    /* We don't care for extended family.  */
    family = (eax >> 8) & ~(1 << 4);
  
 @@ -148,7 +150,9 @@ const char *host_detect_local_cpu (int a
  	  /* We have no idea.  Use something reasonable.  */
  	  if (arch)
  	    {
 -	      if (has_sse3)
 +	      if (has_ssse3)
 +		cpu = "core2";
 +	      else if (has_sse3)
  		{
  		  if (has_longmode)
  		    cpu = "nocona";
 @@ -230,6 +234,9 @@ const char *host_detect_local_cpu (int a
  	  cpu = "generic";
  	}
        break;
 +    case PROCESSOR_GEODE:
 +      cpu = "geode";
 +      break;
      case PROCESSOR_K6:
        if (has_3dnow)
          cpu = "k6-3";
 
 Copied: stable/8/contrib/gcc/config/i386/geode.md (from r219374, head/contrib/gcc/config/i386/geode.md)
 ==============================================================================
 --- /dev/null	00:00:00 1970	(empty, because file is newly added)
 +++ stable/8/contrib/gcc/config/i386/geode.md	Sat Apr 30 22:07:04 2011	(r221274, copy of r219374, head/contrib/gcc/config/i386/geode.md)
 @@ -0,0 +1,153 @@
 +;; Geode Scheduling
 +;; Copyright (C) 2006
 +;; Free Software Foundation, Inc.
 +;;
 +;; This file is part of GCC.
 +;;
 +;; GCC is free software; you can redistribute it and/or modify
 +;; it under the terms of the GNU General Public License as published by
 +;; the Free Software Foundation; either version 2, or (at your option)
 +;; any later version.
 +;;
 +;; GCC is distributed in the hope that it will be useful,
 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 +;; GNU General Public License for more details.
 +;;
 +;; You should have received a copy of the GNU General Public License
 +;; along with GCC; see the file COPYING.  If not, write to
 +;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
 +;; Boston, MA 02110-1301, USA.
 +;;
 +;; The Geode architecture is one insn issue processor.
 +;;
 +;; This description is based on data from the following documents:
 +;;
 +;;    "AMD Geode GX Processor Data Book"
 +;;    Advanced Micro Devices, Inc., Aug 2005.
 +;;
 +;;    "AMD Geode LX Processor Data Book"
 +;;    Advanced Micro Devices, Inc., Jan 2006.
 +;;
 +;;
 +;; CPU execution units of the Geode:
 +;;
 +;; issue	describes the issue pipeline.
 +;; alu		describes the Integer unit
 +;; fpu		describes the FP unit
 +;;
 +;; The fp unit is out of order execution unit with register renaming.
 +;; There is also memory management unit and execution pipeline for
 +;; load/store operations.  We ignore it and difference between insns
 +;; using memory and registers.
 +
 +(define_automaton "geode")
 +
 +(define_cpu_unit "geode_issue,geode_alu,geode_fpu" "geode")
 +
 +(define_insn_reservation "alu" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "alu,alu1,negnot,icmp,lea,test,imov,imovx,icmov,incdec,setcc"))
 +			 "geode_issue,geode_alu")
 +
 +(define_insn_reservation "shift" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "ishift,ishift1,rotate,rotate1,cld"))
 +			 "geode_issue,geode_alu*2")
 +
 +(define_insn_reservation "imul" 7
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "imul"))
 +			 "geode_issue,geode_alu*7")
 +
 +(define_insn_reservation "idiv" 40
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "idiv"))
 +			 "geode_issue,geode_alu*40")
 +
 +;; The branch unit.
 +(define_insn_reservation "call" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "call,callv"))
 +			 "geode_issue,geode_alu*2")
 +
 +(define_insn_reservation "geode_branch" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "ibr"))
 +			 "geode_issue,geode_alu")
 +
 +(define_insn_reservation "geode_pop_push" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "pop,push"))
 +			 "geode_issue,geode_alu")
 +
 +(define_insn_reservation "geode_leave" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "leave"))
 +			 "geode_issue,geode_alu*2")
 +
 +(define_insn_reservation "geode_load_str" 4
 +			 (and (eq_attr "cpu" "geode")
 +			      (and (eq_attr "type" "str")
 +				   (eq_attr "memory" "load,both")))
 +			 "geode_issue,geode_alu*4")
 +
 +(define_insn_reservation "geode_store_str" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (and (eq_attr "type" "str")
 +				   (eq_attr "memory" "store")))
 +			 "geode_issue,geode_alu*2")
 +
 +;; Be optimistic
 +(define_insn_reservation "geode_unknown" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "multi,other"))
 +			 "geode_issue,geode_alu")
 +
 +;; FPU
 +
 +(define_insn_reservation "geode_fop" 6
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fop,fcmp"))
 +			 "geode_issue,geode_fpu*6")
 +
 +(define_insn_reservation "geode_fsimple" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fmov,fcmov,fsgn,fxch"))
 +			 "geode_issue,geode_fpu")
 +
 +(define_insn_reservation "geode_fist" 4
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fistp,fisttp"))
 +			 "geode_issue,geode_fpu*4")
 +
 +(define_insn_reservation "geode_fmul" 10
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fmul"))
 +			 "geode_issue,geode_fpu*10")
 +
 +(define_insn_reservation "geode_fdiv" 47
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fdiv"))
 +			 "geode_issue,geode_fpu*47")
 +
 +;; We use minimal latency (fsin) here
 +(define_insn_reservation "geode_fpspc" 54
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fpspc"))
 +			 "geode_issue,geode_fpu*54")
 +
 +(define_insn_reservation "geode_frndint" 12
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "frndint"))
 +			 "geode_issue,geode_fpu*12")
 +
 +(define_insn_reservation "geode_mmxmov" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "mmxmov"))
 +			 "geode_issue,geode_fpu")
 +
 +(define_insn_reservation "geode_mmx" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "mmx,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft"))
 +			 "geode_issue,geode_fpu*2")
 
 Modified: stable/8/contrib/gcc/config/i386/i386.c
 ==============================================================================
 --- stable/8/contrib/gcc/config/i386/i386.c	Sat Apr 30 21:55:08 2011	(r221273)
 +++ stable/8/contrib/gcc/config/i386/i386.c	Sat Apr 30 22:07:04 2011	(r221274)
 @@ -336,6 +336,60 @@ struct processor_costs pentiumpro_cost =
  };
  
  static const
 +struct processor_costs geode_cost = {
 +  COSTS_N_INSNS (1),			/* cost of an add instruction */
 +  COSTS_N_INSNS (1),			/* cost of a lea instruction */
 +  COSTS_N_INSNS (2),			/* variable shift costs */
 +  COSTS_N_INSNS (1),			/* constant shift costs */
 +  {COSTS_N_INSNS (3),			/* cost of starting multiply for QI */
 +   COSTS_N_INSNS (4),			/*                               HI */
 +   COSTS_N_INSNS (7),			/*                               SI */
 +   COSTS_N_INSNS (7),			/*                               DI */
 +   COSTS_N_INSNS (7)},			/*                               other */
 +  0,					/* cost of multiply per each bit set */
 +  {COSTS_N_INSNS (15),			/* cost of a divide/mod for QI */
 +   COSTS_N_INSNS (23),			/*                          HI */
 +   COSTS_N_INSNS (39),			/*                          SI */
 +   COSTS_N_INSNS (39),			/*                          DI */
 +   COSTS_N_INSNS (39)},			/*                          other */
 +  COSTS_N_INSNS (1),			/* cost of movsx */
 +  COSTS_N_INSNS (1),			/* cost of movzx */
 +  8,					/* "large" insn */
 +  4,					/* MOVE_RATIO */
 +  1,					/* cost for loading QImode using movzbl */
 +  {1, 1, 1},				/* cost of loading integer registers
 +					   in QImode, HImode and SImode.
 +					   Relative to reg-reg move (2).  */
 +  {1, 1, 1},				/* cost of storing integer registers */
 +  1,					/* cost of reg,reg fld/fst */
 +  {1, 1, 1},				/* cost of loading fp registers
 +					   in SFmode, DFmode and XFmode */
 +  {4, 6, 6},				/* cost of storing fp registers
 +					   in SFmode, DFmode and XFmode */
 +
 +  1,					/* cost of moving MMX register */
 +  {1, 1},				/* cost of loading MMX registers
 +					   in SImode and DImode */
 +  {1, 1},				/* cost of storing MMX registers
 +					   in SImode and DImode */
 +  1,					/* cost of moving SSE register */
 +  {1, 1, 1},				/* cost of loading SSE registers
 +					   in SImode, DImode and TImode */
 +  {1, 1, 1},				/* cost of storing SSE registers
 +					   in SImode, DImode and TImode */
 +  1,					/* MMX or SSE register to integer */
 +  32,					/* size of prefetch block */
 +  1,					/* number of parallel prefetches */
 +  1,					/* Branch cost */
 +  COSTS_N_INSNS (6),			/* cost of FADD and FSUB insns.  */
 +  COSTS_N_INSNS (11),			/* cost of FMUL instruction.  */
 +  COSTS_N_INSNS (47),			/* cost of FDIV instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FABS instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FCHS instruction.  */
 +  COSTS_N_INSNS (54),			/* cost of FSQRT instruction.  */
 +};
 +
 +static const
  struct processor_costs k6_cost = {
    COSTS_N_INSNS (1),			/* cost of an add instruction */
    COSTS_N_INSNS (2),			/* cost of a lea instruction */
 @@ -600,6 +654,58 @@ struct processor_costs nocona_cost = {
    COSTS_N_INSNS (44),			/* cost of FSQRT instruction.  */
  };
  
 +static const
 +struct processor_costs core2_cost = {
 +  COSTS_N_INSNS (1),			/* cost of an add instruction */
 +  COSTS_N_INSNS (1) + 1,		/* cost of a lea instruction */
 +  COSTS_N_INSNS (1),			/* variable shift costs */
 +  COSTS_N_INSNS (1),			/* constant shift costs */
 +  {COSTS_N_INSNS (3),			/* cost of starting multiply for QI */
 +   COSTS_N_INSNS (3),			/*                               HI */
 +   COSTS_N_INSNS (3),			/*                               SI */
 +   COSTS_N_INSNS (3),			/*                               DI */
 +   COSTS_N_INSNS (3)},			/*                               other */
 +  0,					/* cost of multiply per each bit set */
 +  {COSTS_N_INSNS (22),			/* cost of a divide/mod for QI */
 +   COSTS_N_INSNS (22),			/*                          HI */
 +   COSTS_N_INSNS (22),			/*                          SI */
 +   COSTS_N_INSNS (22),			/*                          DI */
 +   COSTS_N_INSNS (22)},			/*                          other */
 +  COSTS_N_INSNS (1),			/* cost of movsx */
 +  COSTS_N_INSNS (1),			/* cost of movzx */
 +  8,					/* "large" insn */
 +  16,					/* MOVE_RATIO */
 +  2,					/* cost for loading QImode using movzbl */
 +  {6, 6, 6},				/* cost of loading integer registers
 +					   in QImode, HImode and SImode.
 +					   Relative to reg-reg move (2).  */
 +  {4, 4, 4},				/* cost of storing integer registers */
 +  2,					/* cost of reg,reg fld/fst */
 +  {6, 6, 6},				/* cost of loading fp registers
 +					   in SFmode, DFmode and XFmode */
 +  {4, 4, 4},				/* cost of loading integer registers */
 +  2,					/* cost of moving MMX register */
 +  {6, 6},				/* cost of loading MMX registers
 +					   in SImode and DImode */
 +  {4, 4},				/* cost of storing MMX registers
 +					   in SImode and DImode */
 +  2,					/* cost of moving SSE register */
 +  {6, 6, 6},				/* cost of loading SSE registers
 +					   in SImode, DImode and TImode */
 +  {4, 4, 4},				/* cost of storing SSE registers
 +					   in SImode, DImode and TImode */
 +  2,					/* MMX or SSE register to integer */
 +  128,					/* size of prefetch block */
 +  8,					/* number of parallel prefetches */
 +  3,					/* Branch cost */
 +  COSTS_N_INSNS (3),			/* cost of FADD and FSUB insns.  */
 +  COSTS_N_INSNS (5),			/* cost of FMUL instruction.  */
 +  COSTS_N_INSNS (32),			/* cost of FDIV instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FABS instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FCHS instruction.  */
 +  COSTS_N_INSNS (58),			/* cost of FSQRT instruction.  */
 +};
 +
  /* Generic64 should produce code tuned for Nocona and K8.  */
  static const
  struct processor_costs generic64_cost = {
 @@ -721,38 +827,41 @@ const struct processor_costs *ix86_cost 
  #define m_486 (1<<PROCESSOR_I486)
  #define m_PENT (1<<PROCESSOR_PENTIUM)
  #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
 +#define m_GEODE  (1<<PROCESSOR_GEODE)
 +#define m_K6_GEODE  (m_K6 | m_GEODE)
  #define m_K6  (1<<PROCESSOR_K6)
  #define m_ATHLON  (1<<PROCESSOR_ATHLON)
  #define m_PENT4  (1<<PROCESSOR_PENTIUM4)
  #define m_K8  (1<<PROCESSOR_K8)
  #define m_ATHLON_K8  (m_K8 | m_ATHLON)
  #define m_NOCONA  (1<<PROCESSOR_NOCONA)
 +#define m_CORE2  (1<<PROCESSOR_CORE2)
  #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
  #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
  #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
  
  /* Generic instruction choice should be common subset of supported CPUs
 -   (PPro/PENT4/NOCONA/Athlon/K8).  */
 +   (PPro/PENT4/NOCONA/CORE2/Athlon/K8).  */
  
  /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
     Generic64 seems like good code size tradeoff.  We can't enable it for 32bit
     generic because it is not working well with PPro base chips.  */
 -const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8 | m_GENERIC64;
 -const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 +const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
 +const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
  const int x86_zero_extend_with_and = m_486 | m_PENT;
 -const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC /* m_386 | m_K6 */;
 +const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
  const int x86_double_with_add = ~m_386;
  const int x86_use_bit_test = m_386;
 -const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_GENERIC;
 -const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
 +const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
 +const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
  const int x86_3dnow_a = m_ATHLON_K8;
 -const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 +const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
  /* Branch hints were put in P4 based on simulation result. But
     after P4 was made, no performance benefit was observed with
     branch hints. It also increases the code size. As the result,
     icc never generates branch hints.  */
  const int x86_branch_hints = 0;
 -const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
 +const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
  /* We probably ought to watch for partial register stalls on Generic32
     compilation setting as well.  However in current implementation the
     partial register stalls are not eliminated very well - they can
 @@ -762,15 +871,15 @@ const int x86_use_sahf = m_PPRO | m_K6 |
     with partial reg. dependencies used by Athlon/P4 based chips, it is better
     to leave it off for generic32 for now.  */
  const int x86_partial_reg_stall = m_PPRO;
 -const int x86_partial_flag_reg_stall = m_GENERIC;
 -const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
 -const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_GENERIC);
 +const int x86_partial_flag_reg_stall =  m_CORE2 | m_GENERIC;
 +const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
 +const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
  const int x86_use_mov0 = m_K6;
 -const int x86_use_cltd = ~(m_PENT | m_K6 | m_GENERIC);
 +const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
  const int x86_read_modify_write = ~m_PENT;
  const int x86_read_modify = ~(m_PENT | m_PPRO);
  const int x86_split_long_moves = m_PPRO;
 -const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_GENERIC; /* m_PENT4 ? */
 +const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
  const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
  const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
  const int x86_qimode_math = ~(0);
 @@ -780,18 +889,18 @@ const int x86_promote_qi_regs = 0;
     if our scheme for avoiding partial stalls was more effective.  */
  const int x86_himode_math = ~(m_PPRO);
  const int x86_promote_hi_regs = m_PPRO;
 -const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC);
 -const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC;
 -const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
 -const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
 +const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
 +const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
 +const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
 +const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
  const int x86_shift1 = ~m_486;
 -const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 +const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
  /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
     that thread 128bit SSE registers as single units versus K8 based chips that
     divide SSE registers to two 64bit halves.
 @@ -801,7 +910,7 @@ const int x86_arch_always_fancy_math_387
     this option on P4 brings over 20% SPECfp regression, while enabling it on
     K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
     of moves.  */
 -const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC;
 +const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
  /* Set for machines where the type and dependencies are resolved on SSE
     register parts instead of whole registers, so we may maintain just
     lower part of scalar values in proper format leaving the upper part
 @@ -810,28 +919,28 @@ const int x86_sse_split_regs = m_ATHLON_
  const int x86_sse_typeless_stores = m_ATHLON_K8;
  const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
  const int x86_use_ffreep = m_ATHLON_K8;
 -const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
 -const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_GENERIC);
 +const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6_GEODE | m_CORE2;
 +const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
  
  /* ??? Allowing interunit moves makes it all too easy for the compiler to put
     integer data in xmm registers.  Which results in pretty abysmal code.  */
  const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
  
 -const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC32;
 +const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON | m_PENT4 | m_NOCONA | m_CORE2 | m_PPRO | m_GENERIC32;
  /* Some CPU cores are not able to predict more than 4 branch instructions in
     the 16 byte window.  */
 -const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT | m_GENERIC;
 +const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
  const int x86_use_bt = m_ATHLON_K8;
  /* Compare and exchange was added for 80486.  */
  const int x86_cmpxchg = ~m_386;
  /* Compare and exchange 8 bytes was added for pentium.  */
  const int x86_cmpxchg8b = ~(m_386 | m_486);
  /* Compare and exchange 16 bytes was added for nocona.  */
 -const int x86_cmpxchg16b = m_NOCONA;
 +const int x86_cmpxchg16b = m_NOCONA | m_CORE2;
  /* Exchange and add was added for 80486.  */
  const int x86_xadd = ~m_386;
 -const int x86_pad_returns = m_ATHLON_K8 | m_GENERIC;
 +const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
  
  /* In case the average insn count for single function invocation is
     lower than this constant, emit fast (but longer) prologue and
 @@ -1402,16 +1511,24 @@ ix86_handle_option (size_t code, const c
      case OPT_msse:
        if (!value)
  	{
 -	  target_flags &= ~(MASK_SSE2 | MASK_SSE3);
 -	  target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
 +	  target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSSE3);
 +	  target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSSE3;
  	}
        return true;
  
      case OPT_msse2:
        if (!value)
  	{
 -	  target_flags &= ~MASK_SSE3;
 -	  target_flags_explicit |= MASK_SSE3;
 +	  target_flags &= ~(MASK_SSE3 | MASK_SSSE3);
 +	  target_flags_explicit |= MASK_SSE3 | MASK_SSSE3;
 +	}
 +      return true;
 +
 +    case OPT_msse3:
 +      if (!value)
 +	{
 +	  target_flags &= ~MASK_SSSE3;
 +	  target_flags_explicit |= MASK_SSSE3;
  	}
        return true;
  
 @@ -1455,11 +1572,13 @@ override_options (void)
        {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
        {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
        {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
 +      {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
        {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
        {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
        {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
        {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
        {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
 +      {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
        {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
        {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
      };
 @@ -1478,7 +1597,8 @@ override_options (void)
  	  PTA_PREFETCH_SSE = 16,
  	  PTA_3DNOW = 32,
  	  PTA_3DNOW_A = 64,
 -	  PTA_64BIT = 128
 +	  PTA_64BIT = 128,
 +	  PTA_SSSE3 = 256
  	} flags;
      }
    const processor_alias_table[] =
 @@ -1506,6 +1626,11 @@ override_options (void)
  				        | PTA_MMX | PTA_PREFETCH_SSE},
        {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
  				        | PTA_MMX | PTA_PREFETCH_SSE},
 +      {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
 +                                        | PTA_64BIT | PTA_MMX
 +                                        | PTA_PREFETCH_SSE},
 +      {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
 +				   | PTA_3DNOW_A},
        {"k6", PROCESSOR_K6, PTA_MMX},
        {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
        {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
 @@ -1695,6 +1820,9 @@ override_options (void)
  	if (processor_alias_table[i].flags & PTA_SSE3
  	    && !(target_flags_explicit & MASK_SSE3))
  	  target_flags |= MASK_SSE3;
 +	if (processor_alias_table[i].flags & PTA_SSSE3
 +	    && !(target_flags_explicit & MASK_SSSE3))
 +	  target_flags |= MASK_SSSE3;
  	if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
  	  x86_prefetch_sse = true;
  	if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
 @@ -1871,6 +1999,10 @@ override_options (void)
    if (!TARGET_80387)
      target_flags |= MASK_NO_FANCY_MATH_387;
  
 +  /* Turn on SSE3 builtins for -mssse3.  */
 +  if (TARGET_SSSE3)
 +    target_flags |= MASK_SSE3;
 +
    /* Turn on SSE2 builtins for -msse3.  */
    if (TARGET_SSE3)
      target_flags |= MASK_SSE2;
 @@ -13706,6 +13838,9 @@ ix86_issue_rate (void)
      case PROCESSOR_GENERIC64:
        return 3;
  
 +    case PROCESSOR_CORE2:
 +      return 4;
 +
      default:
        return 1;
      }
 @@ -14574,6 +14709,41 @@ enum ix86_builtins
    IX86_BUILTIN_MONITOR,
    IX86_BUILTIN_MWAIT,
  
 +  /* SSSE3.  */
 +  IX86_BUILTIN_PHADDW,
 +  IX86_BUILTIN_PHADDD,
 +  IX86_BUILTIN_PHADDSW,
 +  IX86_BUILTIN_PHSUBW,
 +  IX86_BUILTIN_PHSUBD,
 +  IX86_BUILTIN_PHSUBSW,
 +  IX86_BUILTIN_PMADDUBSW,
 +  IX86_BUILTIN_PMULHRSW,
 +  IX86_BUILTIN_PSHUFB,
 +  IX86_BUILTIN_PSIGNB,
 +  IX86_BUILTIN_PSIGNW,
 +  IX86_BUILTIN_PSIGND,
 +  IX86_BUILTIN_PALIGNR,
 +  IX86_BUILTIN_PABSB,
 +  IX86_BUILTIN_PABSW,
 +  IX86_BUILTIN_PABSD,
 +
 +  IX86_BUILTIN_PHADDW128,
 +  IX86_BUILTIN_PHADDD128,
 +  IX86_BUILTIN_PHADDSW128,
 +  IX86_BUILTIN_PHSUBW128,
 +  IX86_BUILTIN_PHSUBD128,
 +  IX86_BUILTIN_PHSUBSW128,
 +  IX86_BUILTIN_PMADDUBSW128,
 +  IX86_BUILTIN_PMULHRSW128,
 +  IX86_BUILTIN_PSHUFB128,
 +  IX86_BUILTIN_PSIGNB128,
 +  IX86_BUILTIN_PSIGNW128,
 +  IX86_BUILTIN_PSIGND128,
 +  IX86_BUILTIN_PALIGNR128,
 +  IX86_BUILTIN_PABSB128,
 +  IX86_BUILTIN_PABSW128,
 +  IX86_BUILTIN_PABSD128,
 +
    IX86_BUILTIN_VEC_INIT_V2SI,
    IX86_BUILTIN_VEC_INIT_V4HI,
    IX86_BUILTIN_VEC_INIT_V8QI,
 @@ -14915,7 +15085,33 @@ static const struct builtin_description 
    { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
    { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
    { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
 -  { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
 +  { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
 +
 +  /* SSSE3 */
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
  };
  
  static const struct builtin_description bdesc_1arg[] =
 @@ -14962,6 +15158,14 @@ static const struct builtin_description 
    /* SSE3 */
    { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
    { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
 +
 +  /* SSSE3 */
 +  { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
  };
  
  static void
 @@ -15096,6 +15300,16 @@ ix86_init_mmx_sse_builtins (void)
    /* Normal vector unops.  */
    tree v4sf_ftype_v4sf
      = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
 +  tree v16qi_ftype_v16qi
 +    = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
 +  tree v8hi_ftype_v8hi
 +    = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
 +  tree v4si_ftype_v4si
 +    = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
 +  tree v8qi_ftype_v8qi
 +    = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
 +  tree v4hi_ftype_v4hi
 +    = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
  
    /* Normal vector binops.  */
    tree v4sf_ftype_v4sf_v4sf
 @@ -15115,6 +15329,12 @@ ix86_init_mmx_sse_builtins (void)
  				long_long_unsigned_type_node,
  				long_long_unsigned_type_node, NULL_TREE);
  
 +  tree di_ftype_di_di_int
 +    = build_function_type_list (long_long_unsigned_type_node,
 +				long_long_unsigned_type_node,
 +				long_long_unsigned_type_node,
 +				integer_type_node, NULL_TREE);
 +
    tree v2si_ftype_v2sf
      = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
    tree v2sf_ftype_v2si
 @@ -15216,6 +15436,9 @@ ix86_init_mmx_sse_builtins (void)
    tree v2di_ftype_v2di_int
      = build_function_type_list (V2DI_type_node,
  				V2DI_type_node, integer_type_node, NULL_TREE);
 +  tree v2di_ftype_v2di_v2di_int
 +    = build_function_type_list (V2DI_type_node, V2DI_type_node,
 +				V2DI_type_node, integer_type_node, NULL_TREE);
    tree v4si_ftype_v4si_int
      = build_function_type_list (V4SI_type_node,
  				V4SI_type_node, integer_type_node, NULL_TREE);
 @@ -15332,6 +15555,50 @@ ix86_init_mmx_sse_builtins (void)
        def_builtin (d->mask, d->name, type, d->code);
      }
  
 +  /* Add all builtins that are more or less simple operations on 1 operand.  */
 +  for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
 +    {
 +      enum machine_mode mode;
 +      tree type;
 +
 +      if (d->name == 0)
 +	continue;
 +      mode = insn_data[d->icode].operand[1].mode;
 +
 +      switch (mode)
 +	{
 +	case V16QImode:
 +	  type = v16qi_ftype_v16qi;
 +	  break;
 +	case V8HImode:
 +	  type = v8hi_ftype_v8hi;
 +	  break;
 +	case V4SImode:
 +	  type = v4si_ftype_v4si;
 +	  break;
 +	case V2DFmode:
 +	  type = v2df_ftype_v2df;
 +	  break;
 +	case V4SFmode:
 +	  type = v4sf_ftype_v4sf;
 +	  break;
 +	case V8QImode:
 +	  type = v8qi_ftype_v8qi;
 +	  break;
 +	case V4HImode:
 +	  type = v4hi_ftype_v4hi;
 +	  break;
 +	case V2SImode:
 +	  type = v2si_ftype_v2si;
 +	  break;
 +
 +	default:
 +	  abort ();
 +	}
 +
 +      def_builtin (d->mask, d->name, type, d->code);
 +    }
 +
    /* Add the remaining MMX insns with somewhat more complicated types.  */
    def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
    def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
 @@ -15531,6 +15798,12 @@ ix86_init_mmx_sse_builtins (void)
    def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
  	       v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
  
 +  /* SSSE3.  */
 +  def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
 +	       v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
 +  def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
 +	       IX86_BUILTIN_PALIGNR);
 +
    /* Access to the vec_init patterns.  */
    ftype = build_function_type_list (V2SI_type_node, integer_type_node,
  				    integer_type_node, NULL_TREE);
 @@ -16029,7 +16302,7 @@ ix86_expand_builtin (tree exp, rtx targe
    tree arglist = TREE_OPERAND (exp, 1);
    tree arg0, arg1, arg2;
    rtx op0, op1, op2, pat;
 -  enum machine_mode tmode, mode0, mode1, mode2;
 +  enum machine_mode tmode, mode0, mode1, mode2, mode3;
    unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
  
    switch (fcode)
 @@ -16499,6 +16772,52 @@ ix86_expand_builtin (tree exp, rtx targe
        return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
  				       target, 1);
  
 +    case IX86_BUILTIN_PALIGNR:
 +    case IX86_BUILTIN_PALIGNR128:
 +      if (fcode == IX86_BUILTIN_PALIGNR)
 +	{
 +	  icode = CODE_FOR_ssse3_palignrdi;
 +	  mode = DImode;
 +	}
 +      else
 +	{
 +	  icode = CODE_FOR_ssse3_palignrti;
 +	  mode = V2DImode;
 +	}
 +      arg0 = TREE_VALUE (arglist);
 +      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
 +      arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
 +      op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
 +      op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
 +      op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
 +      tmode = insn_data[icode].operand[0].mode;
 +      mode1 = insn_data[icode].operand[1].mode;
 +      mode2 = insn_data[icode].operand[2].mode;
 
 *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
 _______________________________________________
 svn-src-all@freebsd.org mailing list
 http://lists.freebsd.org/mailman/listinfo/svn-src-all
 To unsubscribe, send any mail to "svn-src-all-unsubscribe@freebsd.org"
 
State-Changed-From-To: patched->closed 
State-Changed-By: mm 
State-Changed-When: Sat Apr 30 22:21:03 UTC 2011 
State-Changed-Why:  
Resolved. Thanks! 

http://www.freebsd.org/cgi/query-pr.cgi?pr=153298 

From: dfilter@FreeBSD.ORG (dfilter service)
To: bug-followup@FreeBSD.org
Cc:  
Subject: Re: gnu/153298: commit references a PR
Date: Mon,  2 May 2011 08:32:07 +0000 (UTC)

 Author: mm
 Date: Mon May  2 08:31:53 2011
 New Revision: 221317
 URL: http://svn.freebsd.org/changeset/base/221317
 
 Log:
   MFC 218895, 218896, 219374, 219376, 219639, 219640, 219697, 219711, 220150:
   
   MFC r218895:
   Backport svn r124339 from gcc 4.3 and add opteron-sse3, athlon64-sse3
   and k8-sse3 cpu-types for -march=/-mtune= gcc options.
   These new cpu-types include the SSE3 instruction set that is supported
   by all newer AMD Athlon 64 and Opteron processors.
   All three cpu-types are supported by clang and all gcc versions
   starting with 4.3 SVN rev 124339 (at that time GPLv2 licensed).
   
   Source: gcc-4_3-branch (rev. 124339; GPLv2)
   
   MFC r218896:
   Add opteron-sse3, athlon64-sse3 and k8-sse3 cpu types to bsd.cpu.mk.
   - add "sse3" to MACHINE_CPU for the new cpu types
   - for i386, default to CPUTYPE=prescott for the new cpu types
   
   MFC r219374:
   Backport Intel Core 2 and AMD Geode CPU types from gcc-4.3 (GPLv2)
   These options are supported in this shape in all newer GCC versions.
   
   Source:	gcc-4_3-branch (rev. 118090, 118973, 120846; GPLv2)
   
   MFC r219376:
   Add AMD Geode CPU type to bsd.cpu.mk and examples/etc/make.conf
   For CPUTYPE=core2 use -march=core2
   
   MFC r219639:
   Backport SSSE3 instruction set support to base gcc.
   Enabled by default for -march=core2
   
   Source:	gcc-4_3-branch (rev. 117958, 121687, 121726, 123639; GPLv2)
   
   MFC r219640:
   Add ssse3 capability for CPUTYPE=core2 to MACHINE_CPU in bsd.cpu.mk
   
   MFC r219697:
   Fix -march/-mtune=native autodetection for Intel Core 2 CPUs
   
   Source:	gcc-4_3-branch (partial rev. 119454; GPLv2)
   
   MFC r219711:
   Backport missing tunings for -march=core2:
   - enable extra 80387 mathematical constants (ext_80387_constants)
   - enable compare and exchange 16 bytes (cmpxchg16b)
   
   Verified against llvm-gcc (and apple gcc)
   Source:	gcc-4_3-branch (ref. svn revs. 119260, 121140; GPLv2)
   
   MFC r220150:
   Upgrade of base gcc and libstdc++ to the last GPLv2-licensed revision
   (rev. 127959 of gcc-4_2-branch).
   
   Resolved GCC bugs:
   	c++: 17763, 29365, 30535, 30917, 31337, 31941, 32108, 32112, 32346,
   	     32898, 32992
   	debug: 32610, 32914
   	libstdc++: 33084, 33128
   	middle-end: 32563
   	rtl-optimization: 33148
   	tree-optimization: 25413, 32723
   	target: 32218
   
   Source:	gcc-4_2-branch (up to rev. 127959)
   
   Obtained from:	gcc (var. revs of gcc-4_2-branch and gcc-4_3-branch; GPLv2)
   PR:		gnu/153298, gnu/153959, gnu/154385, gnu/155308, gnu/154906
 
 Added:
   stable/7/contrib/gcc/config/i386/geode.md
      - copied unchanged from r219374, head/contrib/gcc/config/i386/geode.md
   stable/7/contrib/gcc/config/i386/tmmintrin.h
      - copied unchanged from r219639, head/contrib/gcc/config/i386/tmmintrin.h
 Modified:
   stable/7/contrib/gcc/BASE-VER
   stable/7/contrib/gcc/ChangeLog
   stable/7/contrib/gcc/DATESTAMP
   stable/7/contrib/gcc/DEV-PHASE
   stable/7/contrib/gcc/config.gcc
   stable/7/contrib/gcc/config/i386/driver-i386.c
   stable/7/contrib/gcc/config/i386/i386.c
   stable/7/contrib/gcc/config/i386/i386.h
   stable/7/contrib/gcc/config/i386/i386.md
   stable/7/contrib/gcc/config/i386/i386.opt
   stable/7/contrib/gcc/config/i386/sse.md
   stable/7/contrib/gcc/config/i386/xmmintrin.h
   stable/7/contrib/gcc/config/mips/predicates.md
   stable/7/contrib/gcc/config/rs6000/rs6000.c
   stable/7/contrib/gcc/config/s390/s390.md
   stable/7/contrib/gcc/cp/ChangeLog
   stable/7/contrib/gcc/cp/call.c
   stable/7/contrib/gcc/cp/cp-tree.h
   stable/7/contrib/gcc/cp/cxx-pretty-print.c
   stable/7/contrib/gcc/cp/decl.c
   stable/7/contrib/gcc/cp/decl2.c
   stable/7/contrib/gcc/cp/error.c
   stable/7/contrib/gcc/cp/lex.c
   stable/7/contrib/gcc/cp/name-lookup.c
   stable/7/contrib/gcc/cp/pt.c
   stable/7/contrib/gcc/cp/semantics.c
   stable/7/contrib/gcc/cp/typeck.c
   stable/7/contrib/gcc/doc/contrib.texi
   stable/7/contrib/gcc/doc/extend.texi
   stable/7/contrib/gcc/doc/gcc.1
   stable/7/contrib/gcc/doc/invoke.texi
   stable/7/contrib/gcc/dwarf2out.c
   stable/7/contrib/gcc/expr.c
   stable/7/contrib/gcc/fold-const.c
   stable/7/contrib/gcc/gimplify.c
   stable/7/contrib/gcc/reload1.c
   stable/7/contrib/gcc/simplify-rtx.c
   stable/7/contrib/gcc/target-def.h
   stable/7/contrib/gcc/target.h
   stable/7/contrib/gcc/targhooks.c
   stable/7/contrib/gcc/targhooks.h
   stable/7/contrib/gcc/tree-if-conv.c
   stable/7/contrib/gcc/tree-ssa-structalias.c
   stable/7/contrib/gcc/tree-vect-analyze.c
   stable/7/contrib/gcc/tree-vect-patterns.c
   stable/7/contrib/gcc/tree.c
   stable/7/contrib/gcc/tree.h
   stable/7/contrib/libstdc++/ChangeLog
   stable/7/contrib/libstdc++/include/std/std_valarray.h
   stable/7/contrib/libstdc++/include/tr1/random
   stable/7/share/examples/etc/make.conf
   stable/7/share/mk/bsd.cpu.mk
 Directory Properties:
   stable/7/contrib/gcc/   (props changed)
   stable/7/contrib/libstdc++/   (props changed)
   stable/7/share/examples/   (props changed)
   stable/7/share/mk/   (props changed)
 
 Modified: stable/7/contrib/gcc/BASE-VER
 ==============================================================================
 --- stable/7/contrib/gcc/BASE-VER	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/BASE-VER	Mon May  2 08:31:53 2011	(r221317)
 @@ -1 +1 @@
 -4.2.1
 +4.2.2
 
 Modified: stable/7/contrib/gcc/ChangeLog
 ==============================================================================
 --- stable/7/contrib/gcc/ChangeLog	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/ChangeLog	Mon May  2 08:31:53 2011	(r221317)
 @@ -1,3 +1,121 @@
 +2007-08-31  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR rtl-optimization/33148
 +	* simplify-rtx.c (simplify_unary_operation_1): Only optimize
 +	(neg (lt X 0)) if X has scalar int mode.
 +
 +	PR debug/32914
 +	* dwarf2out.c (rtl_for_decl_init): If vector decl has CONSTRUCTOR
 +	initializer, use build_vector_from_ctor if possible to create
 +	VECTOR_CST out of it.  If vector initializer is not VECTOR_CST
 +	even after this, return NULL.
 +
 +2007-08-27  Jason Merrill  <jason@redhat.com>
 +
 +	PR c++/31337
 +	* gimplify.c (gimplify_modify_expr): Discard the assignment of 
 +	zero-sized types after calling gimplify_modify_expr_rhs.
 +
 +2007-08-24  Jakub Jelinek  <jakub@redhat.com>
 +
 +	PR debug/32610
 +	* dwarf2out.c (gen_decl_die): Don't call
 +	gen_tagged_type_instantiation_die if decl doesn't have tagged type.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Remove unused variable.
 +
 +2007-08-24  Richard Guenther  <rguenther@suse.de>
 +
 +	* expr.c (get_inner_reference): Do computation of bitoffset
 +	from offset in a way we can detect overflow reliably.
 +
 +2007-08-22  Richard Guenther  <rguenther@suse.de>
 +
 +	PR middle-end/32563
 +	* tree.c (host_integerp): Treat sizetype as signed as it is
 +	sign-extended.
 +
 +2007-08-20  Adam Nemet  <anemet@caviumnetworks.com>
 +
 +	* config/mips/predicates.md (const_call_insn_operand): Invoke
 +	SYMBOL_REF_LONG_CALL_P only on SYMBOL_REFs.
 +
 +2007-08-17  Chen liqin  <liqin@sunnorth.com.cn>
 +
 +        * config/score/score.md : Update pattern tablejump.
 +        * config/score/score.c : Update score_initialize_trampoline 
 +        function.
 +        * config/score/score.h (TRAMPOLINE_TEMPLATE): Added macro.
 +        (TRAMPOLINE_INSNS, TRAMPOLINE_SIZE) Update macro.
 +        * doc/contrib.texi: Add my entry.
 +
 +2007-08-02  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md ("*xordi3_cconly"): Change xr to xg.
 +
 +2007-08-01  Andreas Krebbel  <krebbel1@de.ibm.com>
 +
 +	* config/s390/s390.md (TF in GPR splitter): Change operand_subword
 +	parameter to TFmode.
 +
 +2007-07-30  Mark Mitchell  <mark@codesourcery.com>
 +
 +	* BASE-VER: Bump.
 +	* DEV-PHASE: Mark as prerelease.
 +
 +2007-07-25  Steve Ellcey  <sje@cup.hp.com>
 +
 +	PR target/32218
 +	* tree-vect-patterns.c (vect_pattern_recog_1): Check for valid type.
 +
 +2007-07-25  Dorit Nuzman  <dorit@il.ibm.com>
 +	    Devang Patel  <dpatel@apple.com>
 +
 +	PR tree-optimization/25413
 +	* targhooks.c (default_builtin_vector_alignment_reachable): New.
 +	* targhooks.h (default_builtin_vector_alignment_reachable): New.
 +	* tree.h (contains_packed_reference): New.
 +	* expr.c (contains_packed_reference): New.
 +	* tree-vect-analyze.c (vector_alignment_reachable_p): New.
 +	(vect_enhance_data_refs_alignment): Call
 +	vector_alignment_reachable_p.
 +	* target.h (vector_alignment_reachable): New builtin.
 +	* target-def.h (TARGET_VECTOR_ALIGNMENT_REACHABLE): New.
 +	* config/rs6000/rs6000.c (rs6000_vector_alignment_reachable): New.
 +	(TARGET_VECTOR_ALIGNMENT_REACHABLE): Define.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	Backport from mainline:
 +	2007-07-16  Richard Guenther  <rguenther@suse.de>
 +		    Uros Bizjak  <ubizjak@gmail.com>
 +
 +	* tree-if-conv.c (find_phi_replacement_condition): Unshare "*cond"
 +	before forcing it to gimple operand.
 +
 +2007-07-24  Richard Guenther  <rguenther@suse.de>
 +
 +	PR tree-optimization/32723
 +	Backport from mainline:
 +	2007-03-09  Daniel Berlin  <dberlin@dberlin.org>
 +
 +        * tree-ssa-structalias.c (shared_bitmap_info_t): New structure.
 +        (shared_bitmap_table): New variable.
 +        (shared_bitmap_hash): New function.
 +        (shared_bitmap_eq): Ditto
 +        (shared_bitmap_lookup): Ditto.
 +        (shared_bitmap_add): Ditto.
 +        (find_what_p_points_to): Rewrite to use shared bitmap hashtable.
 +        (init_alias_vars): Init shared bitmap hashtable.
 +        (delete_points_to_sets): Delete shared bitmap hashtable.
 +
 +2007-07-23  Bernd Schmidt  <bernd.schmidt@analog.com>
 +
 +	* reload1.c (choose_reload_regs): Set reload_spill_index for regs
 +	chosen during find_reloads.
 +
  2007-07-19  Release Manager
  
  	* GCC 4.2.1 released.
 
 Modified: stable/7/contrib/gcc/DATESTAMP
 ==============================================================================
 --- stable/7/contrib/gcc/DATESTAMP	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/DATESTAMP	Mon May  2 08:31:53 2011	(r221317)
 @@ -1 +1 @@
 -20070719
 +20070831
 
 Modified: stable/7/contrib/gcc/DEV-PHASE
 ==============================================================================
 --- stable/7/contrib/gcc/DEV-PHASE	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/DEV-PHASE	Mon May  2 08:31:53 2011	(r221317)
 @@ -0,0 +1 @@
 +prerelease
 
 Modified: stable/7/contrib/gcc/config.gcc
 ==============================================================================
 --- stable/7/contrib/gcc/config.gcc	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/config.gcc	Mon May  2 08:31:53 2011	(r221317)
 @@ -268,11 +268,13 @@ xscale-*-*)
  	;;
  i[34567]86-*-*)
  	cpu_type=i386
 -	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h"
 +	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h
 +		       pmmintrin.h tmmintrin.h"
  	;;
  x86_64-*-*)
  	cpu_type=i386
 -	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h"
 +	extra_headers="mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h
 +		       pmmintrin.h tmmintrin.h"
  	need_64bit_hwint=yes
  	;;
  ia64-*-*)
 @@ -1207,14 +1209,14 @@ i[34567]86-*-solaris2*)
  		# FIXME: -m64 for i[34567]86-*-* should be allowed just
  		# like -m32 for x86_64-*-*.
  		case X"${with_cpu}" in
 -		Xgeneric|Xnocona|Xx86-64|Xk8|Xopteron|Xathlon64|Xathlon-fx)
 +		Xgeneric|Xcore2|Xnocona|Xx86-64|Xk8|Xopteron|Xathlon64|Xathlon-fx)
  			;;
  		X)
  			with_cpu=generic
  			;;
  		*)
  			echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
 -			echo "generic nocona x86-64 k8 opteron athlon64 athlon-fx" 1>&2
 +			echo "generic core2 nocona x86-64 k8 opteron athlon64 athlon-fx" 1>&2
  			exit 1
  			;;
  		esac
 @@ -2537,6 +2539,9 @@ if test x$with_cpu = x ; then
          nocona-*)
            with_cpu=nocona
            ;;
 +	core2-*)
 +	  with_cpu=core2
 +	  ;;
          pentium_m-*)
            with_cpu=pentium-m
            ;;
 @@ -2556,6 +2561,9 @@ if test x$with_cpu = x ; then
          nocona-*)
            with_cpu=nocona
            ;;
 +	core2-*)
 +	  with_cpu=core2
 +	  ;;
          *)
            with_cpu=generic
            ;;
 @@ -2787,7 +2795,7 @@ case "${target}" in
  				esac
  				# OK
  				;;
 -			"" | k8 | opteron | athlon64 | athlon-fx | nocona | generic)
 +			"" | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | generic)
  				# OK
  				;;
  			*)
 
 Modified: stable/7/contrib/gcc/config/i386/driver-i386.c
 ==============================================================================
 --- stable/7/contrib/gcc/config/i386/driver-i386.c	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/config/i386/driver-i386.c	Mon May  2 08:31:53 2011	(r221317)
 @@ -39,6 +39,7 @@ const char *host_detect_local_cpu (int a
  #define bit_SSE2 (1 << 26)
  
  #define bit_SSE3 (1 << 0)
 +#define bit_SSSE3 (1 << 9)
  #define bit_CMPXCHG16B (1 << 13)
  
  #define bit_3DNOW (1 << 31)
 @@ -66,7 +67,7 @@ const char *host_detect_local_cpu (int a
    unsigned int vendor;
    unsigned int ext_level;
    unsigned char has_mmx = 0, has_3dnow = 0, has_3dnowp = 0, has_sse = 0;
 -  unsigned char has_sse2 = 0, has_sse3 = 0, has_cmov = 0;
 +  unsigned char has_sse2 = 0, has_sse3 = 0, has_ssse3 = 0, has_cmov = 0;
    unsigned char has_longmode = 0, has_cmpxchg8b = 0;
    unsigned char is_amd = 0;
    unsigned int family = 0;
 @@ -107,6 +108,7 @@ const char *host_detect_local_cpu (int a
    has_sse = !!(edx & bit_SSE);
    has_sse2 = !!(edx & bit_SSE2);
    has_sse3 = !!(ecx & bit_SSE3);
 +  has_ssse3 = !!(ecx & bit_SSSE3);
    /* We don't care for extended family.  */
    family = (eax >> 8) & ~(1 << 4);
  
 @@ -148,7 +150,9 @@ const char *host_detect_local_cpu (int a
  	  /* We have no idea.  Use something reasonable.  */
  	  if (arch)
  	    {
 -	      if (has_sse3)
 +	      if (has_ssse3)
 +		cpu = "core2";
 +	      else if (has_sse3)
  		{
  		  if (has_longmode)
  		    cpu = "nocona";
 @@ -230,6 +234,9 @@ const char *host_detect_local_cpu (int a
  	  cpu = "generic";
  	}
        break;
 +    case PROCESSOR_GEODE:
 +      cpu = "geode";
 +      break;
      case PROCESSOR_K6:
        if (has_3dnow)
          cpu = "k6-3";
 
 Copied: stable/7/contrib/gcc/config/i386/geode.md (from r219374, head/contrib/gcc/config/i386/geode.md)
 ==============================================================================
 --- /dev/null	00:00:00 1970	(empty, because file is newly added)
 +++ stable/7/contrib/gcc/config/i386/geode.md	Mon May  2 08:31:53 2011	(r221317, copy of r219374, head/contrib/gcc/config/i386/geode.md)
 @@ -0,0 +1,153 @@
 +;; Geode Scheduling
 +;; Copyright (C) 2006
 +;; Free Software Foundation, Inc.
 +;;
 +;; This file is part of GCC.
 +;;
 +;; GCC is free software; you can redistribute it and/or modify
 +;; it under the terms of the GNU General Public License as published by
 +;; the Free Software Foundation; either version 2, or (at your option)
 +;; any later version.
 +;;
 +;; GCC is distributed in the hope that it will be useful,
 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 +;; GNU General Public License for more details.
 +;;
 +;; You should have received a copy of the GNU General Public License
 +;; along with GCC; see the file COPYING.  If not, write to
 +;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
 +;; Boston, MA 02110-1301, USA.
 +;;
 +;; The Geode architecture is one insn issue processor.
 +;;
 +;; This description is based on data from the following documents:
 +;;
 +;;    "AMD Geode GX Processor Data Book"
 +;;    Advanced Micro Devices, Inc., Aug 2005.
 +;;
 +;;    "AMD Geode LX Processor Data Book"
 +;;    Advanced Micro Devices, Inc., Jan 2006.
 +;;
 +;;
 +;; CPU execution units of the Geode:
 +;;
 +;; issue	describes the issue pipeline.
 +;; alu		describes the Integer unit
 +;; fpu		describes the FP unit
 +;;
 +;; The fp unit is out of order execution unit with register renaming.
 +;; There is also memory management unit and execution pipeline for
 +;; load/store operations.  We ignore it and difference between insns
 +;; using memory and registers.
 +
 +(define_automaton "geode")
 +
 +(define_cpu_unit "geode_issue,geode_alu,geode_fpu" "geode")
 +
 +(define_insn_reservation "alu" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "alu,alu1,negnot,icmp,lea,test,imov,imovx,icmov,incdec,setcc"))
 +			 "geode_issue,geode_alu")
 +
 +(define_insn_reservation "shift" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "ishift,ishift1,rotate,rotate1,cld"))
 +			 "geode_issue,geode_alu*2")
 +
 +(define_insn_reservation "imul" 7
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "imul"))
 +			 "geode_issue,geode_alu*7")
 +
 +(define_insn_reservation "idiv" 40
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "idiv"))
 +			 "geode_issue,geode_alu*40")
 +
 +;; The branch unit.
 +(define_insn_reservation "call" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "call,callv"))
 +			 "geode_issue,geode_alu*2")
 +
 +(define_insn_reservation "geode_branch" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "ibr"))
 +			 "geode_issue,geode_alu")
 +
 +(define_insn_reservation "geode_pop_push" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "pop,push"))
 +			 "geode_issue,geode_alu")
 +
 +(define_insn_reservation "geode_leave" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "leave"))
 +			 "geode_issue,geode_alu*2")
 +
 +(define_insn_reservation "geode_load_str" 4
 +			 (and (eq_attr "cpu" "geode")
 +			      (and (eq_attr "type" "str")
 +				   (eq_attr "memory" "load,both")))
 +			 "geode_issue,geode_alu*4")
 +
 +(define_insn_reservation "geode_store_str" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (and (eq_attr "type" "str")
 +				   (eq_attr "memory" "store")))
 +			 "geode_issue,geode_alu*2")
 +
 +;; Be optimistic
 +(define_insn_reservation "geode_unknown" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "multi,other"))
 +			 "geode_issue,geode_alu")
 +
 +;; FPU
 +
 +(define_insn_reservation "geode_fop" 6
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fop,fcmp"))
 +			 "geode_issue,geode_fpu*6")
 +
 +(define_insn_reservation "geode_fsimple" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fmov,fcmov,fsgn,fxch"))
 +			 "geode_issue,geode_fpu")
 +
 +(define_insn_reservation "geode_fist" 4
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fistp,fisttp"))
 +			 "geode_issue,geode_fpu*4")
 +
 +(define_insn_reservation "geode_fmul" 10
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fmul"))
 +			 "geode_issue,geode_fpu*10")
 +
 +(define_insn_reservation "geode_fdiv" 47
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fdiv"))
 +			 "geode_issue,geode_fpu*47")
 +
 +;; We use minimal latency (fsin) here
 +(define_insn_reservation "geode_fpspc" 54
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "fpspc"))
 +			 "geode_issue,geode_fpu*54")
 +
 +(define_insn_reservation "geode_frndint" 12
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "frndint"))
 +			 "geode_issue,geode_fpu*12")
 +
 +(define_insn_reservation "geode_mmxmov" 1
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "mmxmov"))
 +			 "geode_issue,geode_fpu")
 +
 +(define_insn_reservation "geode_mmx" 2
 +			 (and (eq_attr "cpu" "geode")
 +			      (eq_attr "type" "mmx,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft"))
 +			 "geode_issue,geode_fpu*2")
 
 Modified: stable/7/contrib/gcc/config/i386/i386.c
 ==============================================================================
 --- stable/7/contrib/gcc/config/i386/i386.c	Mon May  2 06:59:09 2011	(r221316)
 +++ stable/7/contrib/gcc/config/i386/i386.c	Mon May  2 08:31:53 2011	(r221317)
 @@ -336,6 +336,60 @@ struct processor_costs pentiumpro_cost =
  };
  
  static const
 +struct processor_costs geode_cost = {
 +  COSTS_N_INSNS (1),			/* cost of an add instruction */
 +  COSTS_N_INSNS (1),			/* cost of a lea instruction */
 +  COSTS_N_INSNS (2),			/* variable shift costs */
 +  COSTS_N_INSNS (1),			/* constant shift costs */
 +  {COSTS_N_INSNS (3),			/* cost of starting multiply for QI */
 +   COSTS_N_INSNS (4),			/*                               HI */
 +   COSTS_N_INSNS (7),			/*                               SI */
 +   COSTS_N_INSNS (7),			/*                               DI */
 +   COSTS_N_INSNS (7)},			/*                               other */
 +  0,					/* cost of multiply per each bit set */
 +  {COSTS_N_INSNS (15),			/* cost of a divide/mod for QI */
 +   COSTS_N_INSNS (23),			/*                          HI */
 +   COSTS_N_INSNS (39),			/*                          SI */
 +   COSTS_N_INSNS (39),			/*                          DI */
 +   COSTS_N_INSNS (39)},			/*                          other */
 +  COSTS_N_INSNS (1),			/* cost of movsx */
 +  COSTS_N_INSNS (1),			/* cost of movzx */
 +  8,					/* "large" insn */
 +  4,					/* MOVE_RATIO */
 +  1,					/* cost for loading QImode using movzbl */
 +  {1, 1, 1},				/* cost of loading integer registers
 +					   in QImode, HImode and SImode.
 +					   Relative to reg-reg move (2).  */
 +  {1, 1, 1},				/* cost of storing integer registers */
 +  1,					/* cost of reg,reg fld/fst */
 +  {1, 1, 1},				/* cost of loading fp registers
 +					   in SFmode, DFmode and XFmode */
 +  {4, 6, 6},				/* cost of storing fp registers
 +					   in SFmode, DFmode and XFmode */
 +
 +  1,					/* cost of moving MMX register */
 +  {1, 1},				/* cost of loading MMX registers
 +					   in SImode and DImode */
 +  {1, 1},				/* cost of storing MMX registers
 +					   in SImode and DImode */
 +  1,					/* cost of moving SSE register */
 +  {1, 1, 1},				/* cost of loading SSE registers
 +					   in SImode, DImode and TImode */
 +  {1, 1, 1},				/* cost of storing SSE registers
 +					   in SImode, DImode and TImode */
 +  1,					/* MMX or SSE register to integer */
 +  32,					/* size of prefetch block */
 +  1,					/* number of parallel prefetches */
 +  1,					/* Branch cost */
 +  COSTS_N_INSNS (6),			/* cost of FADD and FSUB insns.  */
 +  COSTS_N_INSNS (11),			/* cost of FMUL instruction.  */
 +  COSTS_N_INSNS (47),			/* cost of FDIV instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FABS instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FCHS instruction.  */
 +  COSTS_N_INSNS (54),			/* cost of FSQRT instruction.  */
 +};
 +
 +static const
  struct processor_costs k6_cost = {
    COSTS_N_INSNS (1),			/* cost of an add instruction */
    COSTS_N_INSNS (2),			/* cost of a lea instruction */
 @@ -600,6 +654,58 @@ struct processor_costs nocona_cost = {
    COSTS_N_INSNS (44),			/* cost of FSQRT instruction.  */
  };
  
 +static const
 +struct processor_costs core2_cost = {
 +  COSTS_N_INSNS (1),			/* cost of an add instruction */
 +  COSTS_N_INSNS (1) + 1,		/* cost of a lea instruction */
 +  COSTS_N_INSNS (1),			/* variable shift costs */
 +  COSTS_N_INSNS (1),			/* constant shift costs */
 +  {COSTS_N_INSNS (3),			/* cost of starting multiply for QI */
 +   COSTS_N_INSNS (3),			/*                               HI */
 +   COSTS_N_INSNS (3),			/*                               SI */
 +   COSTS_N_INSNS (3),			/*                               DI */
 +   COSTS_N_INSNS (3)},			/*                               other */
 +  0,					/* cost of multiply per each bit set */
 +  {COSTS_N_INSNS (22),			/* cost of a divide/mod for QI */
 +   COSTS_N_INSNS (22),			/*                          HI */
 +   COSTS_N_INSNS (22),			/*                          SI */
 +   COSTS_N_INSNS (22),			/*                          DI */
 +   COSTS_N_INSNS (22)},			/*                          other */
 +  COSTS_N_INSNS (1),			/* cost of movsx */
 +  COSTS_N_INSNS (1),			/* cost of movzx */
 +  8,					/* "large" insn */
 +  16,					/* MOVE_RATIO */
 +  2,					/* cost for loading QImode using movzbl */
 +  {6, 6, 6},				/* cost of loading integer registers
 +					   in QImode, HImode and SImode.
 +					   Relative to reg-reg move (2).  */
 +  {4, 4, 4},				/* cost of storing integer registers */
 +  2,					/* cost of reg,reg fld/fst */
 +  {6, 6, 6},				/* cost of loading fp registers
 +					   in SFmode, DFmode and XFmode */
 +  {4, 4, 4},				/* cost of loading integer registers */
 +  2,					/* cost of moving MMX register */
 +  {6, 6},				/* cost of loading MMX registers
 +					   in SImode and DImode */
 +  {4, 4},				/* cost of storing MMX registers
 +					   in SImode and DImode */
 +  2,					/* cost of moving SSE register */
 +  {6, 6, 6},				/* cost of loading SSE registers
 +					   in SImode, DImode and TImode */
 +  {4, 4, 4},				/* cost of storing SSE registers
 +					   in SImode, DImode and TImode */
 +  2,					/* MMX or SSE register to integer */
 +  128,					/* size of prefetch block */
 +  8,					/* number of parallel prefetches */
 +  3,					/* Branch cost */
 +  COSTS_N_INSNS (3),			/* cost of FADD and FSUB insns.  */
 +  COSTS_N_INSNS (5),			/* cost of FMUL instruction.  */
 +  COSTS_N_INSNS (32),			/* cost of FDIV instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FABS instruction.  */
 +  COSTS_N_INSNS (1),			/* cost of FCHS instruction.  */
 +  COSTS_N_INSNS (58),			/* cost of FSQRT instruction.  */
 +};
 +
  /* Generic64 should produce code tuned for Nocona and K8.  */
  static const
  struct processor_costs generic64_cost = {
 @@ -721,38 +827,41 @@ const struct processor_costs *ix86_cost 
  #define m_486 (1<<PROCESSOR_I486)
  #define m_PENT (1<<PROCESSOR_PENTIUM)
  #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
 +#define m_GEODE  (1<<PROCESSOR_GEODE)
 +#define m_K6_GEODE  (m_K6 | m_GEODE)
  #define m_K6  (1<<PROCESSOR_K6)
  #define m_ATHLON  (1<<PROCESSOR_ATHLON)
  #define m_PENT4  (1<<PROCESSOR_PENTIUM4)
  #define m_K8  (1<<PROCESSOR_K8)
  #define m_ATHLON_K8  (m_K8 | m_ATHLON)
  #define m_NOCONA  (1<<PROCESSOR_NOCONA)
 +#define m_CORE2  (1<<PROCESSOR_CORE2)
  #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
  #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
  #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
  
  /* Generic instruction choice should be common subset of supported CPUs
 -   (PPro/PENT4/NOCONA/Athlon/K8).  */
 +   (PPro/PENT4/NOCONA/CORE2/Athlon/K8).  */
  
  /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
     Generic64 seems like good code size tradeoff.  We can't enable it for 32bit
     generic because it is not working well with PPro base chips.  */
 -const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8 | m_GENERIC64;
 -const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 +const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
 +const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
  const int x86_zero_extend_with_and = m_486 | m_PENT;
 -const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC /* m_386 | m_K6 */;
 +const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
  const int x86_double_with_add = ~m_386;
  const int x86_use_bit_test = m_386;
 -const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_GENERIC;
 -const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
 +const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
 +const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
  const int x86_3dnow_a = m_ATHLON_K8;
 -const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 +const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
  /* Branch hints were put in P4 based on simulation result. But
     after P4 was made, no performance benefit was observed with
     branch hints. It also increases the code size. As the result,
     icc never generates branch hints.  */
  const int x86_branch_hints = 0;
 -const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
 +const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
  /* We probably ought to watch for partial register stalls on Generic32
     compilation setting as well.  However in current implementation the
     partial register stalls are not eliminated very well - they can
 @@ -762,15 +871,15 @@ const int x86_use_sahf = m_PPRO | m_K6 |
     with partial reg. dependencies used by Athlon/P4 based chips, it is better
     to leave it off for generic32 for now.  */
  const int x86_partial_reg_stall = m_PPRO;
 -const int x86_partial_flag_reg_stall = m_GENERIC;
 -const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
 -const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_GENERIC);
 +const int x86_partial_flag_reg_stall =  m_CORE2 | m_GENERIC;
 +const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
 +const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
  const int x86_use_mov0 = m_K6;
 -const int x86_use_cltd = ~(m_PENT | m_K6 | m_GENERIC);
 +const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
  const int x86_read_modify_write = ~m_PENT;
  const int x86_read_modify = ~(m_PENT | m_PPRO);
  const int x86_split_long_moves = m_PPRO;
 -const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_GENERIC; /* m_PENT4 ? */
 +const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
  const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
  const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
  const int x86_qimode_math = ~(0);
 @@ -780,18 +889,18 @@ const int x86_promote_qi_regs = 0;
     if our scheme for avoiding partial stalls was more effective.  */
  const int x86_himode_math = ~(m_PPRO);
  const int x86_promote_hi_regs = m_PPRO;
 -const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC);
 -const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC;
 -const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
 -const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
 +const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
 +const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
 +const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
 +const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
  const int x86_shift1 = ~m_486;
 -const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 +const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
  /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
     that thread 128bit SSE registers as single units versus K8 based chips that
     divide SSE registers to two 64bit halves.
 @@ -801,7 +910,7 @@ const int x86_arch_always_fancy_math_387
     this option on P4 brings over 20% SPECfp regression, while enabling it on
     K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
     of moves.  */
 -const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC;
 +const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
  /* Set for machines where the type and dependencies are resolved on SSE
     register parts instead of whole registers, so we may maintain just
     lower part of scalar values in proper format leaving the upper part
 @@ -810,28 +919,28 @@ const int x86_sse_split_regs = m_ATHLON_
  const int x86_sse_typeless_stores = m_ATHLON_K8;
  const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
  const int x86_use_ffreep = m_ATHLON_K8;
 -const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
 -const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_GENERIC);
 +const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6_GEODE | m_CORE2;
 +const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
  
  /* ??? Allowing interunit moves makes it all too easy for the compiler to put
     integer data in xmm registers.  Which results in pretty abysmal code.  */
  const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
  
 -const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC32;
 +const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON | m_PENT4 | m_NOCONA | m_CORE2 | m_PPRO | m_GENERIC32;
  /* Some CPU cores are not able to predict more than 4 branch instructions in
     the 16 byte window.  */
 -const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
 -const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT | m_GENERIC;
 +const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
 +const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
  const int x86_use_bt = m_ATHLON_K8;
  /* Compare and exchange was added for 80486.  */
  const int x86_cmpxchg = ~m_386;
  /* Compare and exchange 8 bytes was added for pentium.  */
  const int x86_cmpxchg8b = ~(m_386 | m_486);
  /* Compare and exchange 16 bytes was added for nocona.  */
 -const int x86_cmpxchg16b = m_NOCONA;
 +const int x86_cmpxchg16b = m_NOCONA | m_CORE2;
  /* Exchange and add was added for 80486.  */
  const int x86_xadd = ~m_386;
 -const int x86_pad_returns = m_ATHLON_K8 | m_GENERIC;
 +const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
  
  /* In case the average insn count for single function invocation is
     lower than this constant, emit fast (but longer) prologue and
 @@ -1402,16 +1511,24 @@ ix86_handle_option (size_t code, const c
      case OPT_msse:
        if (!value)
  	{
 -	  target_flags &= ~(MASK_SSE2 | MASK_SSE3);
 -	  target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
 +	  target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSSE3);
 +	  target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSSE3;
  	}
        return true;
  
      case OPT_msse2:
        if (!value)
  	{
 -	  target_flags &= ~MASK_SSE3;
 -	  target_flags_explicit |= MASK_SSE3;
 +	  target_flags &= ~(MASK_SSE3 | MASK_SSSE3);
 +	  target_flags_explicit |= MASK_SSE3 | MASK_SSSE3;
 +	}
 +      return true;
 +
 +    case OPT_msse3:
 +      if (!value)
 +	{
 +	  target_flags &= ~MASK_SSSE3;
 +	  target_flags_explicit |= MASK_SSSE3;
  	}
        return true;
  
 @@ -1455,11 +1572,13 @@ override_options (void)
        {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
        {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
        {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
 +      {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
        {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
        {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
        {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
        {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
        {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
 +      {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
        {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
        {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
      };
 @@ -1478,7 +1597,8 @@ override_options (void)
  	  PTA_PREFETCH_SSE = 16,
  	  PTA_3DNOW = 32,
  	  PTA_3DNOW_A = 64,
 -	  PTA_64BIT = 128
 +	  PTA_64BIT = 128,
 +	  PTA_SSSE3 = 256
  	} flags;
      }
    const processor_alias_table[] =
 @@ -1506,6 +1626,11 @@ override_options (void)
  				        | PTA_MMX | PTA_PREFETCH_SSE},
        {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
  				        | PTA_MMX | PTA_PREFETCH_SSE},
 +      {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
 +                                        | PTA_64BIT | PTA_MMX
 +                                        | PTA_PREFETCH_SSE},
 +      {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
 +				   | PTA_3DNOW_A},
        {"k6", PROCESSOR_K6, PTA_MMX},
        {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
        {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
 @@ -1523,10 +1648,19 @@ override_options (void)
  			       | PTA_SSE | PTA_SSE2 },
        {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
  				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
 +      {"k8-sse3", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
 +				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2
 +				      | PTA_SSE3 },
        {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
  				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
 +      {"opteron-sse3", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
 +				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2
 +				      | PTA_SSE3 },
        {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
  				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
 +      {"athlon64-sse3", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
 +				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2
 +				      | PTA_SSE3 },
        {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
  				      | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
        {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch.  */ },
 @@ -1686,6 +1820,9 @@ override_options (void)
  	if (processor_alias_table[i].flags & PTA_SSE3
  	    && !(target_flags_explicit & MASK_SSE3))
  	  target_flags |= MASK_SSE3;
 +	if (processor_alias_table[i].flags & PTA_SSSE3
 +	    && !(target_flags_explicit & MASK_SSSE3))
 +	  target_flags |= MASK_SSSE3;
  	if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
  	  x86_prefetch_sse = true;
  	if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
 @@ -1862,6 +1999,10 @@ override_options (void)
    if (!TARGET_80387)
      target_flags |= MASK_NO_FANCY_MATH_387;
  
 +  /* Turn on SSE3 builtins for -mssse3.  */
 +  if (TARGET_SSSE3)
 +    target_flags |= MASK_SSE3;
 +
    /* Turn on SSE2 builtins for -msse3.  */
    if (TARGET_SSE3)
      target_flags |= MASK_SSE2;
 @@ -13697,6 +13838,9 @@ ix86_issue_rate (void)
      case PROCESSOR_GENERIC64:
        return 3;
  
 +    case PROCESSOR_CORE2:
 +      return 4;
 +
      default:
        return 1;
      }
 @@ -14565,6 +14709,41 @@ enum ix86_builtins
    IX86_BUILTIN_MONITOR,
    IX86_BUILTIN_MWAIT,
  
 +  /* SSSE3.  */
 +  IX86_BUILTIN_PHADDW,
 +  IX86_BUILTIN_PHADDD,
 +  IX86_BUILTIN_PHADDSW,
 +  IX86_BUILTIN_PHSUBW,
 +  IX86_BUILTIN_PHSUBD,
 +  IX86_BUILTIN_PHSUBSW,
 +  IX86_BUILTIN_PMADDUBSW,
 +  IX86_BUILTIN_PMULHRSW,
 +  IX86_BUILTIN_PSHUFB,
 +  IX86_BUILTIN_PSIGNB,
 +  IX86_BUILTIN_PSIGNW,
 +  IX86_BUILTIN_PSIGND,
 +  IX86_BUILTIN_PALIGNR,
 +  IX86_BUILTIN_PABSB,
 +  IX86_BUILTIN_PABSW,
 +  IX86_BUILTIN_PABSD,
 +
 +  IX86_BUILTIN_PHADDW128,
 +  IX86_BUILTIN_PHADDD128,
 +  IX86_BUILTIN_PHADDSW128,
 +  IX86_BUILTIN_PHSUBW128,
 +  IX86_BUILTIN_PHSUBD128,
 +  IX86_BUILTIN_PHSUBSW128,
 +  IX86_BUILTIN_PMADDUBSW128,
 +  IX86_BUILTIN_PMULHRSW128,
 +  IX86_BUILTIN_PSHUFB128,
 +  IX86_BUILTIN_PSIGNB128,
 +  IX86_BUILTIN_PSIGNW128,
 +  IX86_BUILTIN_PSIGND128,
 +  IX86_BUILTIN_PALIGNR128,
 +  IX86_BUILTIN_PABSB128,
 +  IX86_BUILTIN_PABSW128,
 +  IX86_BUILTIN_PABSD128,
 +
    IX86_BUILTIN_VEC_INIT_V2SI,
    IX86_BUILTIN_VEC_INIT_V4HI,
    IX86_BUILTIN_VEC_INIT_V8QI,
 @@ -14906,7 +15085,33 @@ static const struct builtin_description 
    { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
    { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
    { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
 -  { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
 +  { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
 +
 +  /* SSSE3 */
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
  };
  
  static const struct builtin_description bdesc_1arg[] =
 @@ -14953,6 +15158,14 @@ static const struct builtin_description 
    /* SSE3 */
    { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
    { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
 +
 +  /* SSSE3 */
 +  { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
 +  { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
  };
  
  static void
 @@ -15087,6 +15300,16 @@ ix86_init_mmx_sse_builtins (void)
    /* Normal vector unops.  */
    tree v4sf_ftype_v4sf
      = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
 +  tree v16qi_ftype_v16qi
 +    = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
 +  tree v8hi_ftype_v8hi
 +    = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
 +  tree v4si_ftype_v4si
 +    = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
 +  tree v8qi_ftype_v8qi
 +    = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
 +  tree v4hi_ftype_v4hi
 +    = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
  
    /* Normal vector binops.  */
    tree v4sf_ftype_v4sf_v4sf
 @@ -15106,6 +15329,12 @@ ix86_init_mmx_sse_builtins (void)
  				long_long_unsigned_type_node,
  				long_long_unsigned_type_node, NULL_TREE);
  
 +  tree di_ftype_di_di_int
 +    = build_function_type_list (long_long_unsigned_type_node,
 +				long_long_unsigned_type_node,
 +				long_long_unsigned_type_node,
 +				integer_type_node, NULL_TREE);
 +
    tree v2si_ftype_v2sf
      = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
    tree v2sf_ftype_v2si
 @@ -15207,6 +15436,9 @@ ix86_init_mmx_sse_builtins (void)
    tree v2di_ftype_v2di_int
      = build_function_type_list (V2DI_type_node,
  				V2DI_type_node, integer_type_node, NULL_TREE);
 +  tree v2di_ftype_v2di_v2di_int
 +    = build_function_type_list (V2DI_type_node, V2DI_type_node,
 +				V2DI_type_node, integer_type_node, NULL_TREE);
    tree v4si_ftype_v4si_int
      = build_function_type_list (V4SI_type_node,
  				V4SI_type_node, integer_type_node, NULL_TREE);
 @@ -15323,6 +15555,50 @@ ix86_init_mmx_sse_builtins (void)
        def_builtin (d->mask, d->name, type, d->code);
      }
  
 +  /* Add all builtins that are more or less simple operations on 1 operand.  */
 +  for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
 +    {
 +      enum machine_mode mode;
 +      tree type;
 +
 +      if (d->name == 0)
 +	continue;
 +      mode = insn_data[d->icode].operand[1].mode;
 +
 +      switch (mode)
 +	{
 +	case V16QImode:
 +	  type = v16qi_ftype_v16qi;
 +	  break;
 +	case V8HImode:
 +	  type = v8hi_ftype_v8hi;
 +	  break;
 +	case V4SImode:
 +	  type = v4si_ftype_v4si;
 +	  break;
 +	case V2DFmode:
 +	  type = v2df_ftype_v2df;
 +	  break;
 +	case V4SFmode:
 +	  type = v4sf_ftype_v4sf;
 +	  break;
 +	case V8QImode:
 +	  type = v8qi_ftype_v8qi;
 +	  break;
 +	case V4HImode:
 +	  type = v4hi_ftype_v4hi;
 +	  break;
 +	case V2SImode:
 +	  type = v2si_ftype_v2si;
 +	  break;
 +
 +	default:
 +	  abort ();
 +	}
 +
 +      def_builtin (d->mask, d->name, type, d->code);
 +    }
 +
    /* Add the remaining MMX insns with somewhat more complicated types.  */
    def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
    def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
 @@ -15522,6 +15798,12 @@ ix86_init_mmx_sse_builtins (void)
    def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
  	       v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
  
 +  /* SSSE3.  */
 +  def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
 +	       v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
 +  def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
 +	       IX86_BUILTIN_PALIGNR);
 +
    /* Access to the vec_init patterns.  */
    ftype = build_function_type_list (V2SI_type_node, integer_type_node,
  				    integer_type_node, NULL_TREE);
 @@ -16020,7 +16302,7 @@ ix86_expand_builtin (tree exp, rtx targe
    tree arglist = TREE_OPERAND (exp, 1);
    tree arg0, arg1, arg2;
    rtx op0, op1, op2, pat;
 -  enum machine_mode tmode, mode0, mode1, mode2;
 +  enum machine_mode tmode, mode0, mode1, mode2, mode3;
    unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
  
    switch (fcode)
 @@ -16490,6 +16772,52 @@ ix86_expand_builtin (tree exp, rtx targe
        return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
  				       target, 1);
  
 +    case IX86_BUILTIN_PALIGNR:
 
 *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
 _______________________________________________
 svn-src-all@freebsd.org mailing list
 http://lists.freebsd.org/mailman/listinfo/svn-src-all
 To unsubscribe, send any mail to "svn-src-all-unsubscribe@freebsd.org"
 
>Unformatted:
