Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
openSUSE:11.4
libgcj41
gcc-amdfam10-suse-2.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File gcc-amdfam10-suse-2.patch of Package libgcj41
Index: gcc/config/i386/i386.c =================================================================== --- gcc/config/i386/i386.c.orig 2009-11-20 13:42:41.000000000 +0100 +++ gcc/config/i386/i386.c 2009-11-20 13:42:43.000000000 +0100 @@ -480,6 +480,64 @@ struct processor_costs k8_cost = { COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ }; +struct processor_costs amdfam10_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {3, 4, 3}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {3, 4, 3}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {4, 4, 12}, /* cost of loading fp registers (On K8 -332) + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers (On K8 -223) + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {3, 3}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 3}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 5}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + /* On K8 + MOVD reg64, xmmreg Double FSTORE 4 + MOVD reg32, xmmreg Double FSTORE 4 + On AMDFAM10 + MOVD reg64, xmmreg Double FADD 3 1/1 1/1 + MOVD reg32, xmmreg Double FADD 3 1/1 1/1 */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 5, /* Branch cost */ + COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (4), /* cost of FMUL instruction. */ + COSTS_N_INSNS (19), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ +}; + static const struct processor_costs pentium4_cost = { COSTS_N_INSNS (1), /* cost of an add instruction */ @@ -708,6 +766,7 @@ const struct processor_costs *ix86_cost #define m_PENT4 (1<<PROCESSOR_PENTIUM4) #define m_K8 (1<<PROCESSOR_K8) #define m_ATHLON_K8 (m_K8 | m_ATHLON) +#define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10) #define m_NOCONA (1<<PROCESSOR_NOCONA) #define m_GENERIC32 (1<<PROCESSOR_GENERIC32) #define m_GENERIC64 (1<<PROCESSOR_GENERIC64) @@ -719,17 +778,19 @@ const struct processor_costs *ix86_cost /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for Generic64 seems like good code size tradeoff. We can't enable it for 32bit generic because it is not working well with PPro base chips. */ -const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8 | m_GENERIC64; -const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC; +const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8 | m_GENERIC64 | m_AMDFAM10; +const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; const int x86_zero_extend_with_and = m_486 | m_PENT; -const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC /* m_386 | m_K6 */; +/*Enable to zero extend integer registers to avoid partial dependencies*/ +const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10 /* m_386 | m_K6 */; const int x86_double_with_add = ~m_386; const int x86_use_bit_test = m_386; -const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_GENERIC; -const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA; -const int x86_fisttp = m_NOCONA; -const int x86_3dnow_a = m_ATHLON_K8; -const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC; +const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_GENERIC | m_AMDFAM10; +const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_AMDFAM10; +/*fisttp is an SSE3 instruction*/ +const int x86_fisttp = m_NOCONA | m_AMDFAM10; +const int x86_3dnow_a = m_ATHLON_K8 | m_AMDFAM10; +const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; /* Branch hints were put in P4 based on simulation result. But after P4 was made, no performance benefit was observed with branch hints. It also increases the code size. As the result, @@ -747,13 +808,13 @@ const int x86_use_sahf = m_PPRO | m_K6 | const int x86_partial_reg_stall = m_PPRO; const int x86_partial_flag_reg_stall = m_GENERIC; const int x86_use_himode_fiop = m_386 | m_486 | m_K6; -const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_GENERIC); +const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_GENERIC | m_AMDFAM10); const int x86_use_mov0 = m_K6; const int x86_use_cltd = ~(m_PENT | m_K6 | m_GENERIC); const int x86_read_modify_write = ~m_PENT; const int x86_read_modify = ~(m_PENT | m_PPRO); const int x86_split_long_moves = m_PPRO; -const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_GENERIC; /* m_PENT4 ? */ +const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_GENERIC | m_AMDFAM10; /* m_PENT4 ? */ const int x86_fast_prefix = ~(m_PENT | m_486 | m_386); const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA; const int x86_qimode_math = ~(0); @@ -763,18 +824,24 @@ const int x86_promote_qi_regs = 0; if our scheme for avoiding partial stalls was more effective. */ const int x86_himode_math = ~(m_PPRO); const int x86_promote_hi_regs = m_PPRO; -const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC); -const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC; +/*Enable if add/sub rsp is preferred over 1 or 2 push/pop*/ +const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +/*Enable if integer moves are preferred for DFmode copies*/ +const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC | m_AMDFAM10); +const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +/*If ACCUMULATE_OUTGOING_ARGS is enabled, the maximum amount of space required for outgoing arguments +will be computed and placed into the variable `current_function_outgoing_args_size'. No space will be +pushed onto the stack for each call; instead, the function prologue should increase the stack frame +size by this amount. Setting both @code{PUSH_ARGS} and @code{ACCUMULATE_OUTGOING_ARGS} is not proper.*/ +const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC | m_AMDFAM10; const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC; const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC; const int x86_shift1 = ~m_486; -const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC; +const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; /* In Generic model we have an confict here in between PPro/Pentium4 based chips that thread 128bit SSE registers as single units versus K8 based chips that divide SSE registers to two 64bit halves. @@ -784,15 +851,15 @@ const int x86_arch_always_fancy_math_387 this option on P4 brings over 20% SPECfp regression, while enabling it on K8 brings roughly 2.4% regression that can be partly masked by careful scheduling of moves. */ -const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC; +const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC | m_AMDFAM10; /* Set for machines where the type and dependencies are resolved on SSE register parts instead of whole registers, so we may maintain just lower part of scalar values in proper format leaving the upper part undefined. */ const int x86_sse_split_regs = m_ATHLON_K8; -const int x86_sse_typeless_stores = m_ATHLON_K8; +const int x86_sse_typeless_stores = m_ATHLON_K8 | m_AMDFAM10; const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA; -const int x86_use_ffreep = m_ATHLON_K8; +const int x86_use_ffreep = m_ATHLON_K8 | m_AMDFAM10; const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6; const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_GENERIC); @@ -803,14 +870,14 @@ const int x86_inter_unit_moves = 0 /* ~( const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO | m_GENERIC32; /* Some CPU cores are not able to predict more than 4 branch instructions in the 16 byte window. */ -const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC; -const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT | m_GENERIC; -const int x86_use_bt = m_ATHLON_K8; +const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC | m_AMDFAM10; +const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT | m_GENERIC | m_AMDFAM10; +const int x86_use_bt = m_ATHLON_K8 | m_AMDFAM10; /* Compare and exchange was added for 80486. */ const int x86_cmpxchg = ~m_386; /* Exchange and add was added for 80486. */ const int x86_xadd = ~m_386; -const int x86_pad_returns = m_ATHLON_K8 | m_GENERIC; +const int x86_pad_returns = m_ATHLON_K8 | m_GENERIC | m_AMDFAM10; /* In case the average insn count for single function invocation is lower than this constant, emit fast (but longer) prologue and @@ -1438,7 +1505,8 @@ override_options (void) {&k8_cost, 0, 0, 16, 7, 16, 7, 16}, {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}, {&generic32_cost, 0, 0, 16, 7, 16, 7, 16}, - {&generic64_cost, 0, 0, 16, 7, 16, 7, 16} + {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}, + {&amdfam10_cost, 0, 0, 32, 7, 32, 7, 32} }; static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES; @@ -1512,7 +1580,7 @@ override_options (void) | PTA_3DNOW_A | PTA_SSE | PTA_SSE2}, {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ }, {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ }, - {"amdfam10", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT + {"amdfam10", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT | PTA_3DNOW_A | PTA_SSE | PTA_SSE2| PTA_SSE3 | PTA_POPCNT | PTA_ABM | PTA_SSE4A}, }; @@ -13378,6 +13446,7 @@ ix86_issue_rate (void) case PROCESSOR_PENTIUM4: case PROCESSOR_ATHLON: case PROCESSOR_K8: + case PROCESSOR_AMDFAM10: case PROCESSOR_NOCONA: case PROCESSOR_GENERIC32: case PROCESSOR_GENERIC64: @@ -13573,6 +13642,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt case PROCESSOR_ATHLON: case PROCESSOR_K8: + case PROCESSOR_AMDFAM10: case PROCESSOR_GENERIC32: case PROCESSOR_GENERIC64: memory = get_attr_memory (insn); @@ -13594,7 +13664,7 @@ ix86_adjust_cost (rtx insn, rtx link, rt if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN) loadcost = 3; else - loadcost = TARGET_ATHLON ? 2 : 0; + loadcost = (TARGET_ATHLON_K8 | TARGET_AMDFAM10) ? 2 : 0; if (cost >= loadcost) cost -= loadcost; Index: gcc/config/i386/i386.h =================================================================== --- gcc/config/i386/i386.h.orig 2009-11-20 13:42:41.000000000 +0100 +++ gcc/config/i386/i386.h 2009-11-20 13:42:43.000000000 +0100 @@ -139,6 +139,7 @@ extern const struct processor_costs *ix8 #define TARGET_GENERIC32 (ix86_tune == PROCESSOR_GENERIC32) #define TARGET_GENERIC64 (ix86_tune == PROCESSOR_GENERIC64) #define TARGET_GENERIC (TARGET_GENERIC32 || TARGET_GENERIC64) +#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10) #define TUNEMASK (1 << ix86_tune) extern const int x86_use_leave, x86_push_memory, x86_zero_extend_with_and; @@ -368,6 +369,8 @@ extern int x86_prefetch_sse; } \ else if (TARGET_K8) \ builtin_define ("__tune_k8__"); \ + else if (TARGET_AMDFAM10) \ + builtin_define ("__tune_amdfam10__"); \ else if (TARGET_PENTIUM4) \ builtin_define ("__tune_pentium4__"); \ else if (TARGET_NOCONA) \ @@ -439,6 +442,11 @@ extern int x86_prefetch_sse; builtin_define ("__k8"); \ builtin_define ("__k8__"); \ } \ + else if (ix86_arch == PROCESSOR_AMDFAM10) \ + { \ + builtin_define ("__amdfam10"); \ + builtin_define ("__amdfam10__"); \ + } \ else if (ix86_arch == PROCESSOR_PENTIUM4) \ { \ builtin_define ("__pentium4"); \ @@ -470,13 +478,14 @@ extern int x86_prefetch_sse; #define TARGET_CPU_DEFAULT_prescott 15 #define TARGET_CPU_DEFAULT_nocona 16 #define TARGET_CPU_DEFAULT_generic 17 +#define TARGET_CPU_DEFAULT_amdfam10 18 #define TARGET_CPU_DEFAULT_NAMES {"i386", "i486", "pentium", "pentium-mmx",\ "pentiumpro", "pentium2", "pentium3", \ "pentium4", "k6", "k6-2", "k6-3",\ "athlon", "athlon-4", "k8", \ "pentium-m", "prescott", "nocona", \ - "generic"} + "generic", "amdfam10"} #ifndef CC1_SPEC #define CC1_SPEC "%(cc1_cpu) " @@ -2139,6 +2148,7 @@ enum processor_type PROCESSOR_NOCONA, PROCESSOR_GENERIC32, PROCESSOR_GENERIC64, + PROCESSOR_AMDFAM10, PROCESSOR_max }; Index: gcc/config/i386/i386.md =================================================================== --- gcc/config/i386/i386.md.orig 2009-11-20 13:42:41.000000000 +0100 +++ gcc/config/i386/i386.md 2009-11-20 13:42:43.000000000 +0100 @@ -201,7 +201,7 @@ ;; Processor type. This attribute must exactly match the processor_type ;; enumeration in i386.h. -(define_attr "cpu" "i386,i486,pentium,pentiumpro,k6,athlon,pentium4,k8,nocona,generic32,generic64" +(define_attr "cpu" "i386,i486,pentium,pentiumpro,k6,athlon,pentium4,k8,nocona,generic32,generic64,amdfam10" (const (symbol_ref "ix86_tune"))) ;; A basic instruction type. Refinements due to arguments to be @@ -20287,7 +20287,7 @@ (mult:DI (match_operand:DI 1 "memory_operand" "") (match_operand:DI 2 "immediate_operand" ""))) (clobber (reg:CC FLAGS_REG))])] - "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size + "(TARGET_K8 || TARGET_GENERIC64 || TARGET_AMDFAM10) && !optimize_size && (GET_CODE (operands[2]) != CONST_INT || !CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))" [(set (match_dup 3) (match_dup 1)) @@ -20301,7 +20301,7 @@ (mult:SI (match_operand:SI 1 "memory_operand" "") (match_operand:SI 2 "immediate_operand" ""))) (clobber (reg:CC FLAGS_REG))])] - "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size + "(TARGET_K8 || TARGET_GENERIC64 || TARGET_AMDFAM10) && !optimize_size && (GET_CODE (operands[2]) != CONST_INT || !CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))" [(set (match_dup 3) (match_dup 1)) @@ -20316,7 +20316,7 @@ (mult:SI (match_operand:SI 1 "memory_operand" "") (match_operand:SI 2 "immediate_operand" "")))) (clobber (reg:CC FLAGS_REG))])] - "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size + "(TARGET_K8 || TARGET_GENERIC64 || TARGET_AMDFAM10) && !optimize_size && (GET_CODE (operands[2]) != CONST_INT || !CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))" [(set (match_dup 3) (match_dup 1)) @@ -20334,7 +20334,7 @@ (match_operand:DI 2 "const_int_operand" ""))) (clobber (reg:CC FLAGS_REG))]) (match_scratch:DI 3 "r")] - "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size + "(TARGET_K8 || TARGET_GENERIC64 || TARGET_AMDFAM10) && !optimize_size && CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K')" [(set (match_dup 3) (match_dup 2)) (parallel [(set (match_dup 0) (mult:DI (match_dup 0) (match_dup 3))) @@ -20350,7 +20350,7 @@ (match_operand:SI 2 "const_int_operand" ""))) (clobber (reg:CC FLAGS_REG))]) (match_scratch:SI 3 "r")] - "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size + "(TARGET_K8 || TARGET_GENERIC64 || TARGET_AMDFAM10) && !optimize_size && CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K')" [(set (match_dup 3) (match_dup 2)) (parallel [(set (match_dup 0) (mult:SI (match_dup 0) (match_dup 3))) @@ -20366,7 +20366,7 @@ (match_operand:HI 2 "immediate_operand" ""))) (clobber (reg:CC FLAGS_REG))]) (match_scratch:HI 3 "r")] - "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size" + "(TARGET_K8 || TARGET_GENERIC64 || TARGET_AMDFAM10) && !optimize_size" [(set (match_dup 3) (match_dup 2)) (parallel [(set (match_dup 0) (mult:HI (match_dup 0) (match_dup 3))) (clobber (reg:CC FLAGS_REG))])] Index: gcc/config/i386/sse.md =================================================================== --- gcc/config/i386/sse.md.orig 2009-11-20 13:42:41.000000000 +0100 +++ gcc/config/i386/sse.md 2009-11-20 13:42:43.000000000 +0100 @@ -4488,6 +4488,7 @@ [(set_attr "type" "sselog1") (set_attr "mode" "DI")]) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; AMD SSE4A instructions Index: gcc/config.gcc =================================================================== --- gcc/config.gcc.orig 2009-11-20 13:42:41.000000000 +0100 +++ gcc/config.gcc 2009-11-20 13:42:43.000000000 +0100 @@ -2396,6 +2396,9 @@ if test x$with_cpu = x ; then ;; i686-*-* | i786-*-*) case ${target_noncanonical} in + amdfam10-*) + with_cpu=amdfam10 + ;; k8-*|opteron-*|athlon_64-*) with_cpu=k8 ;; @@ -2433,6 +2436,9 @@ if test x$with_cpu = x ; then ;; x86_64-*-*) case ${target_noncanonical} in + amdfam10-*) + with_cpu=amdfam10 + ;; k8-*|opteron-*|athlon_64-*) with_cpu=k8 ;; @@ -2662,7 +2668,7 @@ case "${target}" in esac # OK ;; - "" | k8 | opteron | athlon64 | athlon-fx | nocona | generic) + "" | k8 | opteron | athlon64 | athlon-fx | nocona | generic | amdfam10 ) # OK ;; *)
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor