summaryrefslogtreecommitdiff
path: root/patches/gcc/3.4.6/600-arm-ldm-peephole.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/gcc/3.4.6/600-arm-ldm-peephole.patch')
-rw-r--r--patches/gcc/3.4.6/600-arm-ldm-peephole.patch68
1 files changed, 68 insertions, 0 deletions
diff --git a/patches/gcc/3.4.6/600-arm-ldm-peephole.patch b/patches/gcc/3.4.6/600-arm-ldm-peephole.patch
new file mode 100644
index 0000000..81a0ac8
--- /dev/null
+++ b/patches/gcc/3.4.6/600-arm-ldm-peephole.patch
@@ -0,0 +1,68 @@
+diff -durN gcc-3.4.6.orig/gcc/config/arm/arm.c gcc-3.4.6/gcc/config/arm/arm.c
+--- gcc-3.4.6.orig/gcc/config/arm/arm.c 2005-10-01 15:31:38.000000000 +0200
++++ gcc-3.4.6/gcc/config/arm/arm.c 2007-08-15 22:54:59.000000000 +0200
+@@ -4857,6 +4857,11 @@
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
++ /* For XScale a two-word LDM is a performance loss, so only do this if
++ size is more important. See comments in arm_gen_load_multiple. */
++ if (nops == 2 && arm_tune_xscale && !optimize_size)
++ return 0;
++
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* ldmia */
+
+@@ -5083,6 +5088,11 @@
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
++ /* For XScale a two-word LDM is a performance loss, so only do this if
++ size is more important. See comments in arm_gen_load_multiple. */
++ if (nops == 2 && arm_tune_xscale && !optimize_size)
++ return 0;
++
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* stmia */
+
+diff -durN gcc-3.4.6.orig/gcc/config/arm/arm.md gcc-3.4.6/gcc/config/arm/arm.md
+--- gcc-3.4.6.orig/gcc/config/arm/arm.md 2005-10-01 15:31:38.000000000 +0200
++++ gcc-3.4.6/gcc/config/arm/arm.md 2007-08-15 22:54:59.000000000 +0200
+@@ -8811,13 +8811,16 @@
+ (set_attr "length" "4,8,8")]
+ )
+
++; Try to convert LDR+LDR+arith into [add+]LDM+arith
++; On XScale, LDM is always slower than two LDRs, so only do this if
++; optimising for size.
+ (define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+- "TARGET_ARM && adjacent_mem_locations (operands[2], operands[3])"
++ "TARGET_ARM && (!arm_tune_xscale || optimize_size) && adjacent_mem_locations (operands[2], operands[3])"
+ "*
+ {
+ rtx ldm[3];
+@@ -8852,6 +8855,8 @@
+ }
+ if (val1 && val2)
+ {
++ /* This would be a loss on a Harvard core, but adjacent_mem_locations()
++ will prevent it from happening. */
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+diff -durN gcc-3.4.6.orig/gcc/genpeep.c gcc-3.4.6/gcc/genpeep.c
+--- gcc-3.4.6.orig/gcc/genpeep.c 2003-07-05 07:27:22.000000000 +0200
++++ gcc-3.4.6/gcc/genpeep.c 2007-08-15 22:54:59.000000000 +0200
+@@ -381,6 +381,7 @@
+ printf ("#include \"recog.h\"\n");
+ printf ("#include \"except.h\"\n\n");
+ printf ("#include \"function.h\"\n\n");
++ printf ("#include \"flags.h\"\n\n");
+
+ printf ("#ifdef HAVE_peephole\n");
+ printf ("extern rtx peep_operand[];\n\n");