1/* x86 fat binary initializers. 2 3 Contributed to the GNU project by Kevin Ryde (original x86_32 code) and 4 Torbjorn Granlund (port to x86_64) 5 6 THE FUNCTIONS AND VARIABLES IN THIS FILE ARE FOR INTERNAL USE ONLY. 7 THEY'RE ALMOST CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR 8 COMPLETELY IN FUTURE GNU MP RELEASES. 9 10Copyright 2003, 2004, 2009 Free Software Foundation, Inc. 11 12This file is part of the GNU MP Library. 13 14The GNU MP Library is free software; you can redistribute it and/or modify 15it under the terms of the GNU Lesser General Public License as published by 16the Free Software Foundation; either version 3 of the License, or (at your 17option) any later version. 18 19The GNU MP Library is distributed in the hope that it will be useful, but 20WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 21or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 22License for more details. 23 24You should have received a copy of the GNU Lesser General Public License 25along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ 26 27#include <stdio.h> /* for printf */ 28#include <stdlib.h> /* for getenv */ 29#include <string.h> 30 31#include "gmp.h" 32#include "gmp-impl.h" 33 34/* Change this to "#define TRACE(x) x" for some traces. */ 35#define TRACE(x) 36 37 38/* fat_entry.asm */ 39long __gmpn_cpuid __GMP_PROTO ((char dst[12], int id)); 40 41 42typedef DECL_preinv_divrem_1 ((*preinv_divrem_1_t)); 43typedef DECL_preinv_mod_1 ((*preinv_mod_1_t)); 44 45struct cpuvec_t __gmpn_cpuvec = { 46 __MPN(add_n_init), 47 __MPN(addmul_1_init), 48 __MPN(copyd_init), 49 __MPN(copyi_init), 50 __MPN(divexact_1_init), 51 __MPN(divexact_by3c_init), 52 __MPN(divrem_1_init), 53 __MPN(gcd_1_init), 54 __MPN(lshift_init), 55 __MPN(mod_1_init), 56 __MPN(mod_34lsub1_init), 57 __MPN(modexact_1c_odd_init), 58 __MPN(mul_1_init), 59 __MPN(mul_basecase_init), 60 __MPN(preinv_divrem_1_init), 61 __MPN(preinv_mod_1_init), 62 __MPN(rshift_init), 63 __MPN(sqr_basecase_init), 64 __MPN(sub_n_init), 65 __MPN(submul_1_init), 66 0 67}; 68 69 70/* The following setups start with generic x86, then overwrite with 71 specifics for a chip, and higher versions of that chip. 72 73 The arrangement of the setups here will normally be the same as the $path 74 selections in configure.in for the respective chips. 75 76 This code is reentrant and thread safe. We always calculate the same 77 decided_cpuvec, so if two copies of the code are running it doesn't 78 matter which completes first, both write the same to __gmpn_cpuvec. 79 80 We need to go via decided_cpuvec because if one thread has completed 81 __gmpn_cpuvec then it may be making use of the threshold values in that 82 vector. If another thread is still running __gmpn_cpuvec_init then we 83 don't want it to write different values to those fields since some of the 84 asm routines only operate correctly up to their own defined threshold, 85 not an arbitrary value. */ 86 87void 88__gmpn_cpuvec_init (void) 89{ 90 struct cpuvec_t decided_cpuvec; 91 92 TRACE (printf ("__gmpn_cpuvec_init:\n")); 93 94 memset (&decided_cpuvec, '\0', sizeof (decided_cpuvec)); 95 96 CPUVEC_SETUP_x86_64; 97 CPUVEC_SETUP_fat; 98 99 if (1) 100 { 101 char vendor_string[13]; 102 char dummy_string[12]; 103 long fms; 104 int family, model; 105 106 __gmpn_cpuid (vendor_string, 0); 107 vendor_string[12] = 0; 108 109 fms = __gmpn_cpuid (dummy_string, 1); 110 family = ((fms >> 8) & 0xf) + ((fms >> 20) & 0xff); 111 model = ((fms >> 4) & 0xf) + ((fms >> 12) & 0xf0); 112 113 if (strcmp (vendor_string, "GenuineIntel") == 0) 114 { 115 switch (family) 116 { 117 case 4: 118 case 5: 119 abort (); 120 break; 121 122 case 6: 123 if (model == 28) 124 CPUVEC_SETUP_atom; 125 else 126 CPUVEC_SETUP_core2; 127 break; 128 129 case 15: 130 CPUVEC_SETUP_pentium4; 131 break; 132 } 133 } 134 else if (strcmp (vendor_string, "AuthenticAMD") == 0) 135 { 136 switch (family) 137 { 138 case 5: 139 case 6: 140 abort (); 141 break; 142 case 15: 143 /* CPUVEC_SETUP_athlon */ 144 break; 145 } 146 } 147 } 148 149 /* There's no x86 generic mpn_preinv_divrem_1 or mpn_preinv_mod_1. 150 Instead default to the plain versions from whichever CPU we detected. 151 The function arguments are compatible, no need for any glue code. */ 152 if (decided_cpuvec.preinv_divrem_1 == NULL) 153 decided_cpuvec.preinv_divrem_1 =(preinv_divrem_1_t)decided_cpuvec.divrem_1; 154 if (decided_cpuvec.preinv_mod_1 == NULL) 155 decided_cpuvec.preinv_mod_1 =(preinv_mod_1_t) decided_cpuvec.mod_1; 156 157 ASSERT_CPUVEC (decided_cpuvec); 158 CPUVEC_INSTALL (decided_cpuvec); 159 160 /* Set this once the threshold fields are ready. 161 Use volatile to prevent it getting moved. */ 162 ((volatile struct cpuvec_t *) &__gmpn_cpuvec)->initialized = 1; 163} 164