1/* Test fat binary setups.
2
3Copyright 2003, 2012 Free Software Foundation, Inc.
4
5This file is part of the GNU MP Library test suite.
6
7The GNU MP Library test suite is free software; you can redistribute it
8and/or modify it under the terms of the GNU General Public License as
9published by the Free Software Foundation; either version 3 of the License,
10or (at your option) any later version.
11
12The GNU MP Library test suite is distributed in the hope that it will be
13useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
15Public License for more details.
16
17You should have received a copy of the GNU General Public License along with
18the GNU MP Library test suite.  If not, see https://www.gnu.org/licenses/.  */
19
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23
24#include "gmp-impl.h"
25#include "longlong.h"
26#include "tests.h"
27
28
29/* In this program we're aiming to pick up certain subtle problems that
30   might creep into a fat binary.
31
32   1. We want to ensure the application entry point routines like
33      __gmpn_add_n dispatch to the correct field of __gmpn_cpuvec.
34
35      Note that these routines are not exercised as a side effect of other
36      tests (eg. the mpz routines).  Internally the fields of __gmpn_cpuvec
37      are used directly, so we need to write test code explicitly calling
38      the mpn functions, like an application will have.
39
40   2. We want to ensure the initial __gmpn_cpuvec data has the initializer
41      function pointers in the correct fields, and that those initializer
42      functions dispatch to their correct corresponding field once
43      initialization has been done.
44
45      Only one of the initializer routines executes in a normal program,
46      since that routine sets all the pointers to actual mpn functions.  We
47      forcibly reset __gmpn_cpuvec so we can run each.
48
49   In both cases for the above, the data put through the functions is
50   nothing special, just enough to verify that for instance an add_n is
51   really doing an add_n and has not for instance mistakenly gone to sub_n
52   or something.
53
54   The loop around each test will exercise the initializer routine on the
55   first iteration, and the dispatcher routine on the second.
56
57   If the dispatcher and/or initializer routines are generated mechanically
58   via macros (eg. mpn/x86/fat/fat_entry.asm) then there shouldn't be too
59   much risk of them going wrong, provided the structure layout is correctly
60   expressed.  But if they're in C then it's good to guard against typos in
61   what is rather repetitive code.  The initializer data for __gmpn_cpuvec
62   in fat.c is always done by hand and is likewise a bit repetitive.  */
63
64
65/* dummies when not a fat binary */
66#if ! WANT_FAT_BINARY
67struct cpuvec_t {
68  int  dummy;
69};
70struct cpuvec_t __gmpn_cpuvec;
71#define ITERATE_FAT_THRESHOLDS()  do { } while (0)
72#endif
73
74/* saved from program startup */
75struct cpuvec_t  initial_cpuvec;
76
77void
78check_functions (void)
79{
80  mp_limb_t  wp[2], xp[2], yp[2], r;
81  int  i;
82
83  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
84  for (i = 0; i < 2; i++)
85    {
86      xp[0] = 123;
87      yp[0] = 456;
88      mpn_add_n (wp, xp, yp, (mp_size_t) 1);
89      ASSERT_ALWAYS (wp[0] == 579);
90    }
91
92  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
93  for (i = 0; i < 2; i++)
94    {
95      xp[0] = 123;
96      wp[0] = 456;
97      r = mpn_addmul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(2));
98      ASSERT_ALWAYS (wp[0] == 702);
99      ASSERT_ALWAYS (r == 0);
100    }
101
102#if HAVE_NATIVE_mpn_copyd
103  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
104  for (i = 0; i < 2; i++)
105    {
106      xp[0] = 123;
107      xp[1] = 456;
108      mpn_copyd (xp+1, xp, (mp_size_t) 1);
109      ASSERT_ALWAYS (xp[1] == 123);
110    }
111#endif
112
113#if HAVE_NATIVE_mpn_copyi
114  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
115  for (i = 0; i < 2; i++)
116    {
117      xp[0] = 123;
118      xp[1] = 456;
119      mpn_copyi (xp, xp+1, (mp_size_t) 1);
120      ASSERT_ALWAYS (xp[0] == 456);
121    }
122#endif
123
124  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
125  for (i = 0; i < 2; i++)
126    {
127      xp[0] = 1605;
128      mpn_divexact_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(5));
129      ASSERT_ALWAYS (wp[0] == 321);
130    }
131
132  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
133  for (i = 0; i < 2; i++)
134    {
135      xp[0] = 1296;
136      r = mpn_divexact_by3c (wp, xp, (mp_size_t) 1, CNST_LIMB(0));
137      ASSERT_ALWAYS (wp[0] == 432);
138      ASSERT_ALWAYS (r == 0);
139    }
140
141  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
142  for (i = 0; i < 2; i++)
143    {
144      xp[0] = 287;
145      r = mpn_divrem_1 (wp, (mp_size_t) 1, xp, (mp_size_t) 1, CNST_LIMB(7));
146      ASSERT_ALWAYS (wp[1] == 41);
147      ASSERT_ALWAYS (wp[0] == 0);
148      ASSERT_ALWAYS (r == 0);
149    }
150
151  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
152  for (i = 0; i < 2; i++)
153    {
154      xp[0] = 12;
155      r = mpn_gcd_1 (xp, (mp_size_t) 1, CNST_LIMB(9));
156      ASSERT_ALWAYS (r == 3);
157    }
158
159  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
160  for (i = 0; i < 2; i++)
161    {
162      xp[0] = 0x1001;
163      mpn_lshift (wp, xp, (mp_size_t) 1, 1);
164      ASSERT_ALWAYS (wp[0] == 0x2002);
165    }
166
167  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
168  for (i = 0; i < 2; i++)
169    {
170      xp[0] = 14;
171      r = mpn_mod_1 (xp, (mp_size_t) 1, CNST_LIMB(4));
172      ASSERT_ALWAYS (r == 2);
173    }
174
175#if (GMP_NUMB_BITS % 4) == 0
176  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
177  for (i = 0; i < 2; i++)
178    {
179      int  bits = (GMP_NUMB_BITS / 4) * 3;
180      mp_limb_t  mod = (CNST_LIMB(1) << bits) - 1;
181      mp_limb_t  want = GMP_NUMB_MAX % mod;
182      xp[0] = GMP_NUMB_MAX;
183      r = mpn_mod_34lsub1 (xp, (mp_size_t) 1);
184      ASSERT_ALWAYS (r % mod == want);
185    }
186#endif
187
188  /*   DECL_modexact_1c_odd ((*modexact_1c_odd)); */
189
190  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
191  for (i = 0; i < 2; i++)
192    {
193      xp[0] = 14;
194      r = mpn_mul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(4));
195      ASSERT_ALWAYS (wp[0] == 56);
196      ASSERT_ALWAYS (r == 0);
197    }
198
199  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
200  for (i = 0; i < 2; i++)
201    {
202      xp[0] = 5;
203      yp[0] = 7;
204      mpn_mul_basecase (wp, xp, (mp_size_t) 1, yp, (mp_size_t) 1);
205      ASSERT_ALWAYS (wp[0] == 35);
206      ASSERT_ALWAYS (wp[1] == 0);
207    }
208
209  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
210  for (i = 0; i < 2; i++)
211    {
212      xp[0] = 5;
213      yp[0] = 7;
214      mpn_mullo_basecase (wp, xp, yp, (mp_size_t) 1);
215      ASSERT_ALWAYS (wp[0] == 35);
216    }
217
218#if HAVE_NATIVE_mpn_preinv_divrem_1 && GMP_NAIL_BITS == 0
219  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
220  for (i = 0; i < 2; i++)
221    {
222      xp[0] = 0x101;
223      r = mpn_preinv_divrem_1 (wp, (mp_size_t) 1, xp, (mp_size_t) 1,
224                               GMP_LIMB_HIGHBIT,
225                               refmpn_invert_limb (GMP_LIMB_HIGHBIT), 0);
226      ASSERT_ALWAYS (wp[0] == 0x202);
227      ASSERT_ALWAYS (wp[1] == 0);
228      ASSERT_ALWAYS (r == 0);
229    }
230#endif
231
232#if GMP_NAIL_BITS == 0
233  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
234  for (i = 0; i < 2; i++)
235    {
236      xp[0] = GMP_LIMB_HIGHBIT+123;
237      r = mpn_preinv_mod_1 (xp, (mp_size_t) 1, GMP_LIMB_HIGHBIT,
238                            refmpn_invert_limb (GMP_LIMB_HIGHBIT));
239      ASSERT_ALWAYS (r == 123);
240    }
241#endif
242
243  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
244  for (i = 0; i < 2; i++)
245    {
246      xp[0] = 0x8008;
247      mpn_rshift (wp, xp, (mp_size_t) 1, 1);
248      ASSERT_ALWAYS (wp[0] == 0x4004);
249    }
250
251  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
252  for (i = 0; i < 2; i++)
253    {
254      xp[0] = 5;
255      mpn_sqr_basecase (wp, xp, (mp_size_t) 1);
256      ASSERT_ALWAYS (wp[0] == 25);
257      ASSERT_ALWAYS (wp[1] == 0);
258    }
259
260  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
261  for (i = 0; i < 2; i++)
262    {
263      xp[0] = 999;
264      yp[0] = 666;
265      mpn_sub_n (wp, xp, yp, (mp_size_t) 1);
266      ASSERT_ALWAYS (wp[0] == 333);
267    }
268
269  memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));
270  for (i = 0; i < 2; i++)
271    {
272      xp[0] = 123;
273      wp[0] = 456;
274      r = mpn_submul_1 (wp, xp, (mp_size_t) 1, CNST_LIMB(2));
275      ASSERT_ALWAYS (wp[0] == 210);
276      ASSERT_ALWAYS (r == 0);
277    }
278}
279
280/* Expect the first use of each fat threshold to invoke the necessary
281   initialization.  */
282void
283check_thresholds (void)
284{
285#define ITERATE(name,field)                                             \
286  do {                                                                  \
287    __gmpn_cpuvec_initialized = 0;					\
288    memcpy (&__gmpn_cpuvec, &initial_cpuvec, sizeof (__gmpn_cpuvec));   \
289    ASSERT_ALWAYS (name != 0);                                          \
290    ASSERT_ALWAYS (name == __gmpn_cpuvec.field);                        \
291    ASSERT_ALWAYS (__gmpn_cpuvec_initialized);                          \
292  } while (0)
293
294  ITERATE_FAT_THRESHOLDS ();
295}
296
297
298int
299main (void)
300{
301  memcpy (&initial_cpuvec, &__gmpn_cpuvec, sizeof (__gmpn_cpuvec));
302
303  tests_start ();
304
305  check_functions ();
306  check_thresholds ();
307
308  tests_end ();
309  exit (0);
310}
311