1/*
2 * Copyright (c) 2018 Thomas Pornin <pornin@bolet.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "inner.h"
26
27#if BR_INT128 || BR_UMUL128
28
29#if BR_UMUL128
30#include <intrin.h>
31#endif
32
33static const unsigned char P256_G[] = {
34	0x04, 0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8,
35	0xBC, 0xE6, 0xE5, 0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D,
36	0x81, 0x2D, 0xEB, 0x33, 0xA0, 0xF4, 0xA1, 0x39, 0x45, 0xD8,
37	0x98, 0xC2, 0x96, 0x4F, 0xE3, 0x42, 0xE2, 0xFE, 0x1A, 0x7F,
38	0x9B, 0x8E, 0xE7, 0xEB, 0x4A, 0x7C, 0x0F, 0x9E, 0x16, 0x2B,
39	0xCE, 0x33, 0x57, 0x6B, 0x31, 0x5E, 0xCE, 0xCB, 0xB6, 0x40,
40	0x68, 0x37, 0xBF, 0x51, 0xF5
41};
42
43static const unsigned char P256_N[] = {
44	0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF,
45	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD,
46	0xA7, 0x17, 0x9E, 0x84, 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63,
47	0x25, 0x51
48};
49
50static const unsigned char *
51api_generator(int curve, size_t *len)
52{
53	(void)curve;
54	*len = sizeof P256_G;
55	return P256_G;
56}
57
58static const unsigned char *
59api_order(int curve, size_t *len)
60{
61	(void)curve;
62	*len = sizeof P256_N;
63	return P256_N;
64}
65
66static size_t
67api_xoff(int curve, size_t *len)
68{
69	(void)curve;
70	*len = 32;
71	return 1;
72}
73
74/*
75 * A field element is encoded as four 64-bit integers, in basis 2^64.
76 * Values may reach up to 2^256-1. Montgomery multiplication is used.
77 */
78
79/* R = 2^256 mod p */
80static const uint64_t F256_R[] = {
81	0x0000000000000001, 0xFFFFFFFF00000000,
82	0xFFFFFFFFFFFFFFFF, 0x00000000FFFFFFFE
83};
84
85/* Curve equation is y^2 = x^3 - 3*x + B. This constant is B*R mod p
86   (Montgomery representation of B). */
87static const uint64_t P256_B_MONTY[] = {
88	0xD89CDF6229C4BDDF, 0xACF005CD78843090,
89	0xE5A220ABF7212ED6, 0xDC30061D04874834
90};
91
92/*
93 * Addition in the field.
94 */
95static inline void
96f256_add(uint64_t *d, const uint64_t *a, const uint64_t *b)
97{
98#if BR_INT128
99	unsigned __int128 w;
100	uint64_t t;
101
102	w = (unsigned __int128)a[0] + b[0];
103	d[0] = (uint64_t)w;
104	w = (unsigned __int128)a[1] + b[1] + (w >> 64);
105	d[1] = (uint64_t)w;
106	w = (unsigned __int128)a[2] + b[2] + (w >> 64);
107	d[2] = (uint64_t)w;
108	w = (unsigned __int128)a[3] + b[3] + (w >> 64);
109	d[3] = (uint64_t)w;
110	t = (uint64_t)(w >> 64);
111
112	/*
113	 * 2^256 = 2^224 - 2^192 - 2^96 + 1 in the field.
114	 */
115	w = (unsigned __int128)d[0] + t;
116	d[0] = (uint64_t)w;
117	w = (unsigned __int128)d[1] + (w >> 64) - (t << 32);
118	d[1] = (uint64_t)w;
119	/* Here, carry "w >> 64" can only be 0 or -1 */
120	w = (unsigned __int128)d[2] - ((w >> 64) & 1);
121	d[2] = (uint64_t)w;
122	/* Again, carry is 0 or -1 */
123	d[3] += (uint64_t)(w >> 64) + (t << 32) - t;
124
125#elif BR_UMUL128
126
127	unsigned char cc;
128	uint64_t t;
129
130	cc = _addcarry_u64(0, a[0], b[0], &d[0]);
131	cc = _addcarry_u64(cc, a[1], b[1], &d[1]);
132	cc = _addcarry_u64(cc, a[2], b[2], &d[2]);
133	cc = _addcarry_u64(cc, a[3], b[3], &d[3]);
134
135	/*
136	 * If there is a carry, then we want to subtract p, which we
137	 * do by adding 2^256 - p.
138	 */
139	t = cc;
140	cc = _addcarry_u64(cc, d[0], 0, &d[0]);
141	cc = _addcarry_u64(cc, d[1], -(t << 32), &d[1]);
142	cc = _addcarry_u64(cc, d[2], -t, &d[2]);
143	(void)_addcarry_u64(cc, d[3], (t << 32) - (t << 1), &d[3]);
144
145#endif
146}
147
148/*
149 * Subtraction in the field.
150 */
151static inline void
152f256_sub(uint64_t *d, const uint64_t *a, const uint64_t *b)
153{
154#if BR_INT128
155
156	unsigned __int128 w;
157	uint64_t t;
158
159	w = (unsigned __int128)a[0] - b[0];
160	d[0] = (uint64_t)w;
161	w = (unsigned __int128)a[1] - b[1] - ((w >> 64) & 1);
162	d[1] = (uint64_t)w;
163	w = (unsigned __int128)a[2] - b[2] - ((w >> 64) & 1);
164	d[2] = (uint64_t)w;
165	w = (unsigned __int128)a[3] - b[3] - ((w >> 64) & 1);
166	d[3] = (uint64_t)w;
167	t = (uint64_t)(w >> 64) & 1;
168
169	/*
170	 * p = 2^256 - 2^224 + 2^192 + 2^96 - 1.
171	 */
172	w = (unsigned __int128)d[0] - t;
173	d[0] = (uint64_t)w;
174	w = (unsigned __int128)d[1] + (t << 32) - ((w >> 64) & 1);
175	d[1] = (uint64_t)w;
176	/* Here, carry "w >> 64" can only be 0 or +1 */
177	w = (unsigned __int128)d[2] + (w >> 64);
178	d[2] = (uint64_t)w;
179	/* Again, carry is 0 or +1 */
180	d[3] += (uint64_t)(w >> 64) - (t << 32) + t;
181
182#elif BR_UMUL128
183
184	unsigned char cc;
185	uint64_t t;
186
187	cc = _subborrow_u64(0, a[0], b[0], &d[0]);
188	cc = _subborrow_u64(cc, a[1], b[1], &d[1]);
189	cc = _subborrow_u64(cc, a[2], b[2], &d[2]);
190	cc = _subborrow_u64(cc, a[3], b[3], &d[3]);
191
192	/*
193	 * If there is a carry, then we need to add p.
194	 */
195	t = cc;
196	cc = _addcarry_u64(0, d[0], -t, &d[0]);
197	cc = _addcarry_u64(cc, d[1], (-t) >> 32, &d[1]);
198	cc = _addcarry_u64(cc, d[2], 0, &d[2]);
199	(void)_addcarry_u64(cc, d[3], t - (t << 32), &d[3]);
200
201#endif
202}
203
204/*
205 * Montgomery multiplication in the field.
206 */
207static void
208f256_montymul(uint64_t *d, const uint64_t *a, const uint64_t *b)
209{
210#if BR_INT128
211
212	uint64_t x, f, t0, t1, t2, t3, t4;
213	unsigned __int128 z, ff;
214	int i;
215
216	/*
217	 * When computing d <- d + a[u]*b, we also add f*p such
218	 * that d + a[u]*b + f*p is a multiple of 2^64. Since
219	 * p = -1 mod 2^64, we can compute f = d[0] + a[u]*b[0] mod 2^64.
220	 */
221
222	/*
223	 * Step 1: t <- (a[0]*b + f*p) / 2^64
224	 * We have f = a[0]*b[0] mod 2^64. Since p = -1 mod 2^64, this
225	 * ensures that (a[0]*b + f*p) is a multiple of 2^64.
226	 *
227	 * We also have: f*p = f*2^256 - f*2^224 + f*2^192 + f*2^96 - f.
228	 */
229	x = a[0];
230	z = (unsigned __int128)b[0] * x;
231	f = (uint64_t)z;
232	z = (unsigned __int128)b[1] * x + (z >> 64) + (uint64_t)(f << 32);
233	t0 = (uint64_t)z;
234	z = (unsigned __int128)b[2] * x + (z >> 64) + (uint64_t)(f >> 32);
235	t1 = (uint64_t)z;
236	z = (unsigned __int128)b[3] * x + (z >> 64) + f;
237	t2 = (uint64_t)z;
238	t3 = (uint64_t)(z >> 64);
239	ff = ((unsigned __int128)f << 64) - ((unsigned __int128)f << 32);
240	z = (unsigned __int128)t2 + (uint64_t)ff;
241	t2 = (uint64_t)z;
242	z = (unsigned __int128)t3 + (z >> 64) + (ff >> 64);
243	t3 = (uint64_t)z;
244	t4 = (uint64_t)(z >> 64);
245
246	/*
247	 * Steps 2 to 4: t <- (t + a[i]*b + f*p) / 2^64
248	 */
249	for (i = 1; i < 4; i ++) {
250		x = a[i];
251
252		/* t <- (t + x*b - f) / 2^64 */
253		z = (unsigned __int128)b[0] * x + t0;
254		f = (uint64_t)z;
255		z = (unsigned __int128)b[1] * x + t1 + (z >> 64);
256		t0 = (uint64_t)z;
257		z = (unsigned __int128)b[2] * x + t2 + (z >> 64);
258		t1 = (uint64_t)z;
259		z = (unsigned __int128)b[3] * x + t3 + (z >> 64);
260		t2 = (uint64_t)z;
261		z = t4 + (z >> 64);
262		t3 = (uint64_t)z;
263		t4 = (uint64_t)(z >> 64);
264
265		/* t <- t + f*2^32, carry in the upper half of z */
266		z = (unsigned __int128)t0 + (uint64_t)(f << 32);
267		t0 = (uint64_t)z;
268		z = (z >> 64) + (unsigned __int128)t1 + (uint64_t)(f >> 32);
269		t1 = (uint64_t)z;
270
271		/* t <- t + f*2^192 - f*2^160 + f*2^128 */
272		ff = ((unsigned __int128)f << 64)
273			- ((unsigned __int128)f << 32) + f;
274		z = (z >> 64) + (unsigned __int128)t2 + (uint64_t)ff;
275		t2 = (uint64_t)z;
276		z = (unsigned __int128)t3 + (z >> 64) + (ff >> 64);
277		t3 = (uint64_t)z;
278		t4 += (uint64_t)(z >> 64);
279	}
280
281	/*
282	 * At that point, we have computed t = (a*b + F*p) / 2^256, where
283	 * F is a 256-bit integer whose limbs are the "f" coefficients
284	 * in the steps above. We have:
285	 *   a <= 2^256-1
286	 *   b <= 2^256-1
287	 *   F <= 2^256-1
288	 * Hence:
289	 *   a*b + F*p <= (2^256-1)*(2^256-1) + p*(2^256-1)
290	 *   a*b + F*p <= 2^256*(2^256 - 2 + p) + 1 - p
291	 * Therefore:
292	 *   t < 2^256 + p - 2
293	 * Since p < 2^256, it follows that:
294	 *   t4 can be only 0 or 1
295	 *   t - p < 2^256
296	 * We can therefore subtract p from t, conditionally on t4, to
297	 * get a nonnegative result that fits on 256 bits.
298	 */
299	z = (unsigned __int128)t0 + t4;
300	t0 = (uint64_t)z;
301	z = (unsigned __int128)t1 - (t4 << 32) + (z >> 64);
302	t1 = (uint64_t)z;
303	z = (unsigned __int128)t2 - (z >> 127);
304	t2 = (uint64_t)z;
305	t3 = t3 - (uint64_t)(z >> 127) - t4 + (t4 << 32);
306
307	d[0] = t0;
308	d[1] = t1;
309	d[2] = t2;
310	d[3] = t3;
311
312#elif BR_UMUL128
313
314	uint64_t x, f, t0, t1, t2, t3, t4;
315	uint64_t zl, zh, ffl, ffh;
316	unsigned char k, m;
317	int i;
318
319	/*
320	 * When computing d <- d + a[u]*b, we also add f*p such
321	 * that d + a[u]*b + f*p is a multiple of 2^64. Since
322	 * p = -1 mod 2^64, we can compute f = d[0] + a[u]*b[0] mod 2^64.
323	 */
324
325	/*
326	 * Step 1: t <- (a[0]*b + f*p) / 2^64
327	 * We have f = a[0]*b[0] mod 2^64. Since p = -1 mod 2^64, this
328	 * ensures that (a[0]*b + f*p) is a multiple of 2^64.
329	 *
330	 * We also have: f*p = f*2^256 - f*2^224 + f*2^192 + f*2^96 - f.
331	 */
332	x = a[0];
333
334	zl = _umul128(b[0], x, &zh);
335	f = zl;
336	t0 = zh;
337
338	zl = _umul128(b[1], x, &zh);
339	k = _addcarry_u64(0, zl, t0, &zl);
340	(void)_addcarry_u64(k, zh, 0, &zh);
341	k = _addcarry_u64(0, zl, f << 32, &zl);
342	(void)_addcarry_u64(k, zh, 0, &zh);
343	t0 = zl;
344	t1 = zh;
345
346	zl = _umul128(b[2], x, &zh);
347	k = _addcarry_u64(0, zl, t1, &zl);
348	(void)_addcarry_u64(k, zh, 0, &zh);
349	k = _addcarry_u64(0, zl, f >> 32, &zl);
350	(void)_addcarry_u64(k, zh, 0, &zh);
351	t1 = zl;
352	t2 = zh;
353
354	zl = _umul128(b[3], x, &zh);
355	k = _addcarry_u64(0, zl, t2, &zl);
356	(void)_addcarry_u64(k, zh, 0, &zh);
357	k = _addcarry_u64(0, zl, f, &zl);
358	(void)_addcarry_u64(k, zh, 0, &zh);
359	t2 = zl;
360	t3 = zh;
361
362	t4 = _addcarry_u64(0, t3, f, &t3);
363	k = _subborrow_u64(0, t2, f << 32, &t2);
364	k = _subborrow_u64(k, t3, f >> 32, &t3);
365	(void)_subborrow_u64(k, t4, 0, &t4);
366
367	/*
368	 * Steps 2 to 4: t <- (t + a[i]*b + f*p) / 2^64
369	 */
370	for (i = 1; i < 4; i ++) {
371		x = a[i];
372		/* f = t0 + x * b[0]; -- computed below */
373
374		/* t <- (t + x*b - f) / 2^64 */
375		zl = _umul128(b[0], x, &zh);
376		k = _addcarry_u64(0, zl, t0, &f);
377		(void)_addcarry_u64(k, zh, 0, &t0);
378
379		zl = _umul128(b[1], x, &zh);
380		k = _addcarry_u64(0, zl, t0, &zl);
381		(void)_addcarry_u64(k, zh, 0, &zh);
382		k = _addcarry_u64(0, zl, t1, &t0);
383		(void)_addcarry_u64(k, zh, 0, &t1);
384
385		zl = _umul128(b[2], x, &zh);
386		k = _addcarry_u64(0, zl, t1, &zl);
387		(void)_addcarry_u64(k, zh, 0, &zh);
388		k = _addcarry_u64(0, zl, t2, &t1);
389		(void)_addcarry_u64(k, zh, 0, &t2);
390
391		zl = _umul128(b[3], x, &zh);
392		k = _addcarry_u64(0, zl, t2, &zl);
393		(void)_addcarry_u64(k, zh, 0, &zh);
394		k = _addcarry_u64(0, zl, t3, &t2);
395		(void)_addcarry_u64(k, zh, 0, &t3);
396
397		t4 = _addcarry_u64(0, t3, t4, &t3);
398
399		/* t <- t + f*2^32, carry in k */
400		k = _addcarry_u64(0, t0, f << 32, &t0);
401		k = _addcarry_u64(k, t1, f >> 32, &t1);
402
403		/* t <- t + f*2^192 - f*2^160 + f*2^128 */
404		m = _subborrow_u64(0, f, f << 32, &ffl);
405		(void)_subborrow_u64(m, f, f >> 32, &ffh);
406		k = _addcarry_u64(k, t2, ffl, &t2);
407		k = _addcarry_u64(k, t3, ffh, &t3);
408		(void)_addcarry_u64(k, t4, 0, &t4);
409	}
410
411	/*
412	 * At that point, we have computed t = (a*b + F*p) / 2^256, where
413	 * F is a 256-bit integer whose limbs are the "f" coefficients
414	 * in the steps above. We have:
415	 *   a <= 2^256-1
416	 *   b <= 2^256-1
417	 *   F <= 2^256-1
418	 * Hence:
419	 *   a*b + F*p <= (2^256-1)*(2^256-1) + p*(2^256-1)
420	 *   a*b + F*p <= 2^256*(2^256 - 2 + p) + 1 - p
421	 * Therefore:
422	 *   t < 2^256 + p - 2
423	 * Since p < 2^256, it follows that:
424	 *   t4 can be only 0 or 1
425	 *   t - p < 2^256
426	 * We can therefore subtract p from t, conditionally on t4, to
427	 * get a nonnegative result that fits on 256 bits.
428	 */
429	k = _addcarry_u64(0, t0, t4, &t0);
430	k = _addcarry_u64(k, t1, -(t4 << 32), &t1);
431	k = _addcarry_u64(k, t2, -t4, &t2);
432	(void)_addcarry_u64(k, t3, (t4 << 32) - (t4 << 1), &t3);
433
434	d[0] = t0;
435	d[1] = t1;
436	d[2] = t2;
437	d[3] = t3;
438
439#endif
440}
441
442/*
443 * Montgomery squaring in the field; currently a basic wrapper around
444 * multiplication (inline, should be optimized away).
445 * TODO: see if some extra speed can be gained here.
446 */
447static inline void
448f256_montysquare(uint64_t *d, const uint64_t *a)
449{
450	f256_montymul(d, a, a);
451}
452
453/*
454 * Convert to Montgomery representation.
455 */
456static void
457f256_tomonty(uint64_t *d, const uint64_t *a)
458{
459	/*
460	 * R2 = 2^512 mod p.
461	 * If R = 2^256 mod p, then R2 = R^2 mod p; and the Montgomery
462	 * multiplication of a by R2 is: a*R2/R = a*R mod p, i.e. the
463	 * conversion to Montgomery representation.
464	 */
465	static const uint64_t R2[] = {
466		0x0000000000000003,
467		0xFFFFFFFBFFFFFFFF,
468		0xFFFFFFFFFFFFFFFE,
469		0x00000004FFFFFFFD
470	};
471
472	f256_montymul(d, a, R2);
473}
474
475/*
476 * Convert from Montgomery representation.
477 */
478static void
479f256_frommonty(uint64_t *d, const uint64_t *a)
480{
481	/*
482	 * Montgomery multiplication by 1 is division by 2^256 modulo p.
483	 */
484	static const uint64_t one[] = { 1, 0, 0, 0 };
485
486	f256_montymul(d, a, one);
487}
488
489/*
490 * Inversion in the field. If the source value is 0 modulo p, then this
491 * returns 0 or p. This function uses Montgomery representation.
492 */
493static void
494f256_invert(uint64_t *d, const uint64_t *a)
495{
496	/*
497	 * We compute a^(p-2) mod p. The exponent pattern (from high to
498	 * low) is:
499	 *  - 32 bits of value 1
500	 *  - 31 bits of value 0
501	 *  - 1 bit of value 1
502	 *  - 96 bits of value 0
503	 *  - 94 bits of value 1
504	 *  - 1 bit of value 0
505	 *  - 1 bit of value 1
506	 * To speed up the square-and-multiply algorithm, we precompute
507	 * a^(2^31-1).
508	 */
509
510	uint64_t r[4], t[4];
511	int i;
512
513	memcpy(t, a, sizeof t);
514	for (i = 0; i < 30; i ++) {
515		f256_montysquare(t, t);
516		f256_montymul(t, t, a);
517	}
518
519	memcpy(r, t, sizeof t);
520	for (i = 224; i >= 0; i --) {
521		f256_montysquare(r, r);
522		switch (i) {
523		case 0:
524		case 2:
525		case 192:
526		case 224:
527			f256_montymul(r, r, a);
528			break;
529		case 3:
530		case 34:
531		case 65:
532			f256_montymul(r, r, t);
533			break;
534		}
535	}
536	memcpy(d, r, sizeof r);
537}
538
539/*
540 * Finalize reduction.
541 * Input value fits on 256 bits. This function subtracts p if and only
542 * if the input is greater than or equal to p.
543 */
544static inline void
545f256_final_reduce(uint64_t *a)
546{
547#if BR_INT128
548
549	uint64_t t0, t1, t2, t3, cc;
550	unsigned __int128 z;
551
552	/*
553	 * We add 2^224 - 2^192 - 2^96 + 1 to a. If there is no carry,
554	 * then a < p; otherwise, the addition result we computed is
555	 * the value we must return.
556	 */
557	z = (unsigned __int128)a[0] + 1;
558	t0 = (uint64_t)z;
559	z = (unsigned __int128)a[1] + (z >> 64) - ((uint64_t)1 << 32);
560	t1 = (uint64_t)z;
561	z = (unsigned __int128)a[2] - (z >> 127);
562	t2 = (uint64_t)z;
563	z = (unsigned __int128)a[3] - (z >> 127) + 0xFFFFFFFF;
564	t3 = (uint64_t)z;
565	cc = -(uint64_t)(z >> 64);
566
567	a[0] ^= cc & (a[0] ^ t0);
568	a[1] ^= cc & (a[1] ^ t1);
569	a[2] ^= cc & (a[2] ^ t2);
570	a[3] ^= cc & (a[3] ^ t3);
571
572#elif BR_UMUL128
573
574	uint64_t t0, t1, t2, t3, m;
575	unsigned char k;
576
577	k = _addcarry_u64(0, a[0], (uint64_t)1, &t0);
578	k = _addcarry_u64(k, a[1], -((uint64_t)1 << 32), &t1);
579	k = _addcarry_u64(k, a[2], -(uint64_t)1, &t2);
580	k = _addcarry_u64(k, a[3], ((uint64_t)1 << 32) - 2, &t3);
581	m = -(uint64_t)k;
582
583	a[0] ^= m & (a[0] ^ t0);
584	a[1] ^= m & (a[1] ^ t1);
585	a[2] ^= m & (a[2] ^ t2);
586	a[3] ^= m & (a[3] ^ t3);
587
588#endif
589}
590
591/*
592 * Points in affine and Jacobian coordinates.
593 *
594 *  - In affine coordinates, the point-at-infinity cannot be encoded.
595 *  - Jacobian coordinates (X,Y,Z) correspond to affine (X/Z^2,Y/Z^3);
596 *    if Z = 0 then this is the point-at-infinity.
597 */
598typedef struct {
599	uint64_t x[4];
600	uint64_t y[4];
601} p256_affine;
602
603typedef struct {
604	uint64_t x[4];
605	uint64_t y[4];
606	uint64_t z[4];
607} p256_jacobian;
608
609/*
610 * Decode a point. The returned point is in Jacobian coordinates, but
611 * with z = 1. If the encoding is invalid, or encodes a point which is
612 * not on the curve, or encodes the point at infinity, then this function
613 * returns 0. Otherwise, 1 is returned.
614 *
615 * The buffer is assumed to have length exactly 65 bytes.
616 */
617static uint32_t
618point_decode(p256_jacobian *P, const unsigned char *buf)
619{
620	uint64_t x[4], y[4], t[4], x3[4], tt;
621	uint32_t r;
622
623	/*
624	 * Header byte shall be 0x04.
625	 */
626	r = EQ(buf[0], 0x04);
627
628	/*
629	 * Decode X and Y coordinates, and convert them into
630	 * Montgomery representation.
631	 */
632	x[3] = br_dec64be(buf +  1);
633	x[2] = br_dec64be(buf +  9);
634	x[1] = br_dec64be(buf + 17);
635	x[0] = br_dec64be(buf + 25);
636	y[3] = br_dec64be(buf + 33);
637	y[2] = br_dec64be(buf + 41);
638	y[1] = br_dec64be(buf + 49);
639	y[0] = br_dec64be(buf + 57);
640	f256_tomonty(x, x);
641	f256_tomonty(y, y);
642
643	/*
644	 * Verify y^2 = x^3 + A*x + B. In curve P-256, A = -3.
645	 * Note that the Montgomery representation of 0 is 0. We must
646	 * take care to apply the final reduction to make sure we have
647	 * 0 and not p.
648	 */
649	f256_montysquare(t, y);
650	f256_montysquare(x3, x);
651	f256_montymul(x3, x3, x);
652	f256_sub(t, t, x3);
653	f256_add(t, t, x);
654	f256_add(t, t, x);
655	f256_add(t, t, x);
656	f256_sub(t, t, P256_B_MONTY);
657	f256_final_reduce(t);
658	tt = t[0] | t[1] | t[2] | t[3];
659	r &= EQ((uint32_t)(tt | (tt >> 32)), 0);
660
661	/*
662	 * Return the point in Jacobian coordinates (and Montgomery
663	 * representation).
664	 */
665	memcpy(P->x, x, sizeof x);
666	memcpy(P->y, y, sizeof y);
667	memcpy(P->z, F256_R, sizeof F256_R);
668	return r;
669}
670
671/*
672 * Final conversion for a point:
673 *  - The point is converted back to affine coordinates.
674 *  - Final reduction is performed.
675 *  - The point is encoded into the provided buffer.
676 *
677 * If the point is the point-at-infinity, all operations are performed,
678 * but the buffer contents are indeterminate, and 0 is returned. Otherwise,
679 * the encoded point is written in the buffer, and 1 is returned.
680 */
681static uint32_t
682point_encode(unsigned char *buf, const p256_jacobian *P)
683{
684	uint64_t t1[4], t2[4], z;
685
686	/* Set t1 = 1/z^2 and t2 = 1/z^3. */
687	f256_invert(t2, P->z);
688	f256_montysquare(t1, t2);
689	f256_montymul(t2, t2, t1);
690
691	/* Compute affine coordinates x (in t1) and y (in t2). */
692	f256_montymul(t1, P->x, t1);
693	f256_montymul(t2, P->y, t2);
694
695	/* Convert back from Montgomery representation, and finalize
696	   reductions. */
697	f256_frommonty(t1, t1);
698	f256_frommonty(t2, t2);
699	f256_final_reduce(t1);
700	f256_final_reduce(t2);
701
702	/* Encode. */
703	buf[0] = 0x04;
704	br_enc64be(buf +  1, t1[3]);
705	br_enc64be(buf +  9, t1[2]);
706	br_enc64be(buf + 17, t1[1]);
707	br_enc64be(buf + 25, t1[0]);
708	br_enc64be(buf + 33, t2[3]);
709	br_enc64be(buf + 41, t2[2]);
710	br_enc64be(buf + 49, t2[1]);
711	br_enc64be(buf + 57, t2[0]);
712
713	/* Return success if and only if P->z != 0. */
714	z = P->z[0] | P->z[1] | P->z[2] | P->z[3];
715	return NEQ((uint32_t)(z | z >> 32), 0);
716}
717
718/*
719 * Point doubling in Jacobian coordinates: point P is doubled.
720 * Note: if the source point is the point-at-infinity, then the result is
721 * still the point-at-infinity, which is correct. Moreover, if the three
722 * coordinates were zero, then they still are zero in the returned value.
723 *
724 * (Note: this is true even without the final reduction: if the three
725 * coordinates are encoded as four words of value zero each, then the
726 * result will also have all-zero coordinate encodings, not the alternate
727 * encoding as the integer p.)
728 */
729static void
730p256_double(p256_jacobian *P)
731{
732	/*
733	 * Doubling formulas are:
734	 *
735	 *   s = 4*x*y^2
736	 *   m = 3*(x + z^2)*(x - z^2)
737	 *   x' = m^2 - 2*s
738	 *   y' = m*(s - x') - 8*y^4
739	 *   z' = 2*y*z
740	 *
741	 * These formulas work for all points, including points of order 2
742	 * and points at infinity:
743	 *   - If y = 0 then z' = 0. But there is no such point in P-256
744	 *     anyway.
745	 *   - If z = 0 then z' = 0.
746	 */
747	uint64_t t1[4], t2[4], t3[4], t4[4];
748
749	/*
750	 * Compute z^2 in t1.
751	 */
752	f256_montysquare(t1, P->z);
753
754	/*
755	 * Compute x-z^2 in t2 and x+z^2 in t1.
756	 */
757	f256_add(t2, P->x, t1);
758	f256_sub(t1, P->x, t1);
759
760	/*
761	 * Compute 3*(x+z^2)*(x-z^2) in t1.
762	 */
763	f256_montymul(t3, t1, t2);
764	f256_add(t1, t3, t3);
765	f256_add(t1, t3, t1);
766
767	/*
768	 * Compute 4*x*y^2 (in t2) and 2*y^2 (in t3).
769	 */
770	f256_montysquare(t3, P->y);
771	f256_add(t3, t3, t3);
772	f256_montymul(t2, P->x, t3);
773	f256_add(t2, t2, t2);
774
775	/*
776	 * Compute x' = m^2 - 2*s.
777	 */
778	f256_montysquare(P->x, t1);
779	f256_sub(P->x, P->x, t2);
780	f256_sub(P->x, P->x, t2);
781
782	/*
783	 * Compute z' = 2*y*z.
784	 */
785	f256_montymul(t4, P->y, P->z);
786	f256_add(P->z, t4, t4);
787
788	/*
789	 * Compute y' = m*(s - x') - 8*y^4. Note that we already have
790	 * 2*y^2 in t3.
791	 */
792	f256_sub(t2, t2, P->x);
793	f256_montymul(P->y, t1, t2);
794	f256_montysquare(t4, t3);
795	f256_add(t4, t4, t4);
796	f256_sub(P->y, P->y, t4);
797}
798
799/*
800 * Point addition (Jacobian coordinates): P1 is replaced with P1+P2.
801 * This function computes the wrong result in the following cases:
802 *
803 *   - If P1 == 0 but P2 != 0
804 *   - If P1 != 0 but P2 == 0
805 *   - If P1 == P2
806 *
807 * In all three cases, P1 is set to the point at infinity.
808 *
809 * Returned value is 0 if one of the following occurs:
810 *
811 *   - P1 and P2 have the same Y coordinate.
812 *   - P1 == 0 and P2 == 0.
813 *   - The Y coordinate of one of the points is 0 and the other point is
814 *     the point at infinity.
815 *
816 * The third case cannot actually happen with valid points, since a point
817 * with Y == 0 is a point of order 2, and there is no point of order 2 on
818 * curve P-256.
819 *
820 * Therefore, assuming that P1 != 0 and P2 != 0 on input, then the caller
821 * can apply the following:
822 *
823 *   - If the result is not the point at infinity, then it is correct.
824 *   - Otherwise, if the returned value is 1, then this is a case of
825 *     P1+P2 == 0, so the result is indeed the point at infinity.
826 *   - Otherwise, P1 == P2, so a "double" operation should have been
827 *     performed.
828 *
829 * Note that you can get a returned value of 0 with a correct result,
830 * e.g. if P1 and P2 have the same Y coordinate, but distinct X coordinates.
831 */
832static uint32_t
833p256_add(p256_jacobian *P1, const p256_jacobian *P2)
834{
835	/*
836	 * Addtions formulas are:
837	 *
838	 *   u1 = x1 * z2^2
839	 *   u2 = x2 * z1^2
840	 *   s1 = y1 * z2^3
841	 *   s2 = y2 * z1^3
842	 *   h = u2 - u1
843	 *   r = s2 - s1
844	 *   x3 = r^2 - h^3 - 2 * u1 * h^2
845	 *   y3 = r * (u1 * h^2 - x3) - s1 * h^3
846	 *   z3 = h * z1 * z2
847	 */
848	uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt;
849	uint32_t ret;
850
851	/*
852	 * Compute u1 = x1*z2^2 (in t1) and s1 = y1*z2^3 (in t3).
853	 */
854	f256_montysquare(t3, P2->z);
855	f256_montymul(t1, P1->x, t3);
856	f256_montymul(t4, P2->z, t3);
857	f256_montymul(t3, P1->y, t4);
858
859	/*
860	 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
861	 */
862	f256_montysquare(t4, P1->z);
863	f256_montymul(t2, P2->x, t4);
864	f256_montymul(t5, P1->z, t4);
865	f256_montymul(t4, P2->y, t5);
866
867	/*
868	 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
869	 * We need to test whether r is zero, so we will do some extra
870	 * reduce.
871	 */
872	f256_sub(t2, t2, t1);
873	f256_sub(t4, t4, t3);
874	f256_final_reduce(t4);
875	tt = t4[0] | t4[1] | t4[2] | t4[3];
876	ret = (uint32_t)(tt | (tt >> 32));
877	ret = (ret | -ret) >> 31;
878
879	/*
880	 * Compute u1*h^2 (in t6) and h^3 (in t5);
881	 */
882	f256_montysquare(t7, t2);
883	f256_montymul(t6, t1, t7);
884	f256_montymul(t5, t7, t2);
885
886	/*
887	 * Compute x3 = r^2 - h^3 - 2*u1*h^2.
888	 */
889	f256_montysquare(P1->x, t4);
890	f256_sub(P1->x, P1->x, t5);
891	f256_sub(P1->x, P1->x, t6);
892	f256_sub(P1->x, P1->x, t6);
893
894	/*
895	 * Compute y3 = r*(u1*h^2 - x3) - s1*h^3.
896	 */
897	f256_sub(t6, t6, P1->x);
898	f256_montymul(P1->y, t4, t6);
899	f256_montymul(t1, t5, t3);
900	f256_sub(P1->y, P1->y, t1);
901
902	/*
903	 * Compute z3 = h*z1*z2.
904	 */
905	f256_montymul(t1, P1->z, P2->z);
906	f256_montymul(P1->z, t1, t2);
907
908	return ret;
909}
910
911/*
912 * Point addition (mixed coordinates): P1 is replaced with P1+P2.
913 * This is a specialised function for the case when P2 is a non-zero point
914 * in affine coordinates.
915 *
916 * This function computes the wrong result in the following cases:
917 *
918 *   - If P1 == 0
919 *   - If P1 == P2
920 *
921 * In both cases, P1 is set to the point at infinity.
922 *
923 * Returned value is 0 if one of the following occurs:
924 *
925 *   - P1 and P2 have the same Y (affine) coordinate.
926 *   - The Y coordinate of P2 is 0 and P1 is the point at infinity.
927 *
928 * The second case cannot actually happen with valid points, since a point
929 * with Y == 0 is a point of order 2, and there is no point of order 2 on
930 * curve P-256.
931 *
932 * Therefore, assuming that P1 != 0 on input, then the caller
933 * can apply the following:
934 *
935 *   - If the result is not the point at infinity, then it is correct.
936 *   - Otherwise, if the returned value is 1, then this is a case of
937 *     P1+P2 == 0, so the result is indeed the point at infinity.
938 *   - Otherwise, P1 == P2, so a "double" operation should have been
939 *     performed.
940 *
941 * Again, a value of 0 may be returned in some cases where the addition
942 * result is correct.
943 */
944static uint32_t
945p256_add_mixed(p256_jacobian *P1, const p256_affine *P2)
946{
947	/*
948	 * Addtions formulas are:
949	 *
950	 *   u1 = x1
951	 *   u2 = x2 * z1^2
952	 *   s1 = y1
953	 *   s2 = y2 * z1^3
954	 *   h = u2 - u1
955	 *   r = s2 - s1
956	 *   x3 = r^2 - h^3 - 2 * u1 * h^2
957	 *   y3 = r * (u1 * h^2 - x3) - s1 * h^3
958	 *   z3 = h * z1
959	 */
960	uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt;
961	uint32_t ret;
962
963	/*
964	 * Compute u1 = x1 (in t1) and s1 = y1 (in t3).
965	 */
966	memcpy(t1, P1->x, sizeof t1);
967	memcpy(t3, P1->y, sizeof t3);
968
969	/*
970	 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
971	 */
972	f256_montysquare(t4, P1->z);
973	f256_montymul(t2, P2->x, t4);
974	f256_montymul(t5, P1->z, t4);
975	f256_montymul(t4, P2->y, t5);
976
977	/*
978	 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
979	 * We need to test whether r is zero, so we will do some extra
980	 * reduce.
981	 */
982	f256_sub(t2, t2, t1);
983	f256_sub(t4, t4, t3);
984	f256_final_reduce(t4);
985	tt = t4[0] | t4[1] | t4[2] | t4[3];
986	ret = (uint32_t)(tt | (tt >> 32));
987	ret = (ret | -ret) >> 31;
988
989	/*
990	 * Compute u1*h^2 (in t6) and h^3 (in t5);
991	 */
992	f256_montysquare(t7, t2);
993	f256_montymul(t6, t1, t7);
994	f256_montymul(t5, t7, t2);
995
996	/*
997	 * Compute x3 = r^2 - h^3 - 2*u1*h^2.
998	 */
999	f256_montysquare(P1->x, t4);
1000	f256_sub(P1->x, P1->x, t5);
1001	f256_sub(P1->x, P1->x, t6);
1002	f256_sub(P1->x, P1->x, t6);
1003
1004	/*
1005	 * Compute y3 = r*(u1*h^2 - x3) - s1*h^3.
1006	 */
1007	f256_sub(t6, t6, P1->x);
1008	f256_montymul(P1->y, t4, t6);
1009	f256_montymul(t1, t5, t3);
1010	f256_sub(P1->y, P1->y, t1);
1011
1012	/*
1013	 * Compute z3 = h*z1*z2.
1014	 */
1015	f256_montymul(P1->z, P1->z, t2);
1016
1017	return ret;
1018}
1019
1020#if 0
1021/* unused */
1022/*
1023 * Point addition (mixed coordinates, complete): P1 is replaced with P1+P2.
1024 * This is a specialised function for the case when P2 is a non-zero point
1025 * in affine coordinates.
1026 *
1027 * This function returns the correct result in all cases.
1028 */
1029static uint32_t
1030p256_add_complete_mixed(p256_jacobian *P1, const p256_affine *P2)
1031{
1032	/*
1033	 * Addtions formulas, in the general case, are:
1034	 *
1035	 *   u1 = x1
1036	 *   u2 = x2 * z1^2
1037	 *   s1 = y1
1038	 *   s2 = y2 * z1^3
1039	 *   h = u2 - u1
1040	 *   r = s2 - s1
1041	 *   x3 = r^2 - h^3 - 2 * u1 * h^2
1042	 *   y3 = r * (u1 * h^2 - x3) - s1 * h^3
1043	 *   z3 = h * z1
1044	 *
1045	 * These formulas mishandle the two following cases:
1046	 *
1047	 *  - If P1 is the point-at-infinity (z1 = 0), then z3 is
1048	 *    incorrectly set to 0.
1049	 *
1050	 *  - If P1 = P2, then u1 = u2 and s1 = s2, and x3, y3 and z3
1051	 *    are all set to 0.
1052	 *
1053	 * However, if P1 + P2 = 0, then u1 = u2 but s1 != s2, and then
1054	 * we correctly get z3 = 0 (the point-at-infinity).
1055	 *
1056	 * To fix the case P1 = 0, we perform at the end a copy of P2
1057	 * over P1, conditional to z1 = 0.
1058	 *
1059	 * For P1 = P2: in that case, both h and r are set to 0, and
1060	 * we get x3, y3 and z3 equal to 0. We can test for that
1061	 * occurrence to make a mask which will be all-one if P1 = P2,
1062	 * or all-zero otherwise; then we can compute the double of P2
1063	 * and add it, combined with the mask, to (x3,y3,z3).
1064	 *
1065	 * Using the doubling formulas in p256_double() on (x2,y2),
1066	 * simplifying since P2 is affine (i.e. z2 = 1, implicitly),
1067	 * we get:
1068	 *   s = 4*x2*y2^2
1069	 *   m = 3*(x2 + 1)*(x2 - 1)
1070	 *   x' = m^2 - 2*s
1071	 *   y' = m*(s - x') - 8*y2^4
1072	 *   z' = 2*y2
1073	 * which requires only 6 multiplications. Added to the 11
1074	 * multiplications of the normal mixed addition in Jacobian
1075	 * coordinates, we get a cost of 17 multiplications in total.
1076	 */
1077	uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt, zz;
1078	int i;
1079
1080	/*
1081	 * Set zz to -1 if P1 is the point at infinity, 0 otherwise.
1082	 */
1083	zz = P1->z[0] | P1->z[1] | P1->z[2] | P1->z[3];
1084	zz = ((zz | -zz) >> 63) - (uint64_t)1;
1085
1086	/*
1087	 * Compute u1 = x1 (in t1) and s1 = y1 (in t3).
1088	 */
1089	memcpy(t1, P1->x, sizeof t1);
1090	memcpy(t3, P1->y, sizeof t3);
1091
1092	/*
1093	 * Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).
1094	 */
1095	f256_montysquare(t4, P1->z);
1096	f256_montymul(t2, P2->x, t4);
1097	f256_montymul(t5, P1->z, t4);
1098	f256_montymul(t4, P2->y, t5);
1099
1100	/*
1101	 * Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).
1102	 * reduce.
1103	 */
1104	f256_sub(t2, t2, t1);
1105	f256_sub(t4, t4, t3);
1106
1107	/*
1108	 * If both h = 0 and r = 0, then P1 = P2, and we want to set
1109	 * the mask tt to -1; otherwise, the mask will be 0.
1110	 */
1111	f256_final_reduce(t2);
1112	f256_final_reduce(t4);
1113	tt = t2[0] | t2[1] | t2[2] | t2[3] | t4[0] | t4[1] | t4[2] | t4[3];
1114	tt = ((tt | -tt) >> 63) - (uint64_t)1;
1115
1116	/*
1117	 * Compute u1*h^2 (in t6) and h^3 (in t5);
1118	 */
1119	f256_montysquare(t7, t2);
1120	f256_montymul(t6, t1, t7);
1121	f256_montymul(t5, t7, t2);
1122
1123	/*
1124	 * Compute x3 = r^2 - h^3 - 2*u1*h^2.
1125	 */
1126	f256_montysquare(P1->x, t4);
1127	f256_sub(P1->x, P1->x, t5);
1128	f256_sub(P1->x, P1->x, t6);
1129	f256_sub(P1->x, P1->x, t6);
1130
1131	/*
1132	 * Compute y3 = r*(u1*h^2 - x3) - s1*h^3.
1133	 */
1134	f256_sub(t6, t6, P1->x);
1135	f256_montymul(P1->y, t4, t6);
1136	f256_montymul(t1, t5, t3);
1137	f256_sub(P1->y, P1->y, t1);
1138
1139	/*
1140	 * Compute z3 = h*z1.
1141	 */
1142	f256_montymul(P1->z, P1->z, t2);
1143
1144	/*
1145	 * The "double" result, in case P1 = P2.
1146	 */
1147
1148	/*
1149	 * Compute z' = 2*y2 (in t1).
1150	 */
1151	f256_add(t1, P2->y, P2->y);
1152
1153	/*
1154	 * Compute 2*(y2^2) (in t2) and s = 4*x2*(y2^2) (in t3).
1155	 */
1156	f256_montysquare(t2, P2->y);
1157	f256_add(t2, t2, t2);
1158	f256_add(t3, t2, t2);
1159	f256_montymul(t3, P2->x, t3);
1160
1161	/*
1162	 * Compute m = 3*(x2^2 - 1) (in t4).
1163	 */
1164	f256_montysquare(t4, P2->x);
1165	f256_sub(t4, t4, F256_R);
1166	f256_add(t5, t4, t4);
1167	f256_add(t4, t4, t5);
1168
1169	/*
1170	 * Compute x' = m^2 - 2*s (in t5).
1171	 */
1172	f256_montysquare(t5, t4);
1173	f256_sub(t5, t3);
1174	f256_sub(t5, t3);
1175
1176	/*
1177	 * Compute y' = m*(s - x') - 8*y2^4 (in t6).
1178	 */
1179	f256_sub(t6, t3, t5);
1180	f256_montymul(t6, t6, t4);
1181	f256_montysquare(t7, t2);
1182	f256_sub(t6, t6, t7);
1183	f256_sub(t6, t6, t7);
1184
1185	/*
1186	 * We now have the alternate (doubling) coordinates in (t5,t6,t1).
1187	 * We combine them with (x3,y3,z3).
1188	 */
1189	for (i = 0; i < 4; i ++) {
1190		P1->x[i] |= tt & t5[i];
1191		P1->y[i] |= tt & t6[i];
1192		P1->z[i] |= tt & t1[i];
1193	}
1194
1195	/*
1196	 * If P1 = 0, then we get z3 = 0 (which is invalid); if z1 is 0,
1197	 * then we want to replace the result with a copy of P2. The
1198	 * test on z1 was done at the start, in the zz mask.
1199	 */
1200	for (i = 0; i < 4; i ++) {
1201		P1->x[i] ^= zz & (P1->x[i] ^ P2->x[i]);
1202		P1->y[i] ^= zz & (P1->y[i] ^ P2->y[i]);
1203		P1->z[i] ^= zz & (P1->z[i] ^ F256_R[i]);
1204	}
1205}
1206#endif
1207
1208/*
1209 * Inner function for computing a point multiplication. A window is
1210 * provided, with points 1*P to 15*P in affine coordinates.
1211 *
1212 * Assumptions:
1213 *  - All provided points are valid points on the curve.
1214 *  - Multiplier is non-zero, and smaller than the curve order.
1215 *  - Everything is in Montgomery representation.
1216 */
1217static void
1218point_mul_inner(p256_jacobian *R, const p256_affine *W,
1219	const unsigned char *k, size_t klen)
1220{
1221	p256_jacobian Q;
1222	uint32_t qz;
1223
1224	memset(&Q, 0, sizeof Q);
1225	qz = 1;
1226	while (klen -- > 0) {
1227		int i;
1228		unsigned bk;
1229
1230		bk = *k ++;
1231		for (i = 0; i < 2; i ++) {
1232			uint32_t bits;
1233			uint32_t bnz;
1234			p256_affine T;
1235			p256_jacobian U;
1236			uint32_t n;
1237			int j;
1238			uint64_t m;
1239
1240			p256_double(&Q);
1241			p256_double(&Q);
1242			p256_double(&Q);
1243			p256_double(&Q);
1244			bits = (bk >> 4) & 0x0F;
1245			bnz = NEQ(bits, 0);
1246
1247			/*
1248			 * Lookup point in window. If the bits are 0,
1249			 * we get something invalid, which is not a
1250			 * problem because we will use it only if the
1251			 * bits are non-zero.
1252			 */
1253			memset(&T, 0, sizeof T);
1254			for (n = 0; n < 15; n ++) {
1255				m = -(uint64_t)EQ(bits, n + 1);
1256				T.x[0] |= m & W[n].x[0];
1257				T.x[1] |= m & W[n].x[1];
1258				T.x[2] |= m & W[n].x[2];
1259				T.x[3] |= m & W[n].x[3];
1260				T.y[0] |= m & W[n].y[0];
1261				T.y[1] |= m & W[n].y[1];
1262				T.y[2] |= m & W[n].y[2];
1263				T.y[3] |= m & W[n].y[3];
1264			}
1265
1266			U = Q;
1267			p256_add_mixed(&U, &T);
1268
1269			/*
1270			 * If qz is still 1, then Q was all-zeros, and this
1271			 * is conserved through p256_double().
1272			 */
1273			m = -(uint64_t)(bnz & qz);
1274			for (j = 0; j < 4; j ++) {
1275				Q.x[j] |= m & T.x[j];
1276				Q.y[j] |= m & T.y[j];
1277				Q.z[j] |= m & F256_R[j];
1278			}
1279			CCOPY(bnz & ~qz, &Q, &U, sizeof Q);
1280			qz &= ~bnz;
1281			bk <<= 4;
1282		}
1283	}
1284	*R = Q;
1285}
1286
1287/*
1288 * Convert a window from Jacobian to affine coordinates. A single
1289 * field inversion is used. This function works for windows up to
1290 * 32 elements.
1291 *
1292 * The destination array (aff[]) and the source array (jac[]) may
1293 * overlap, provided that the start of aff[] is not after the start of
1294 * jac[]. Even if the arrays do _not_ overlap, the source array is
1295 * modified.
1296 */
1297static void
1298window_to_affine(p256_affine *aff, p256_jacobian *jac, int num)
1299{
1300	/*
1301	 * Convert the window points to affine coordinates. We use the
1302	 * following trick to mutualize the inversion computation: if
1303	 * we have z1, z2, z3, and z4, and want to inverse all of them,
1304	 * we compute u = 1/(z1*z2*z3*z4), and then we have:
1305	 *   1/z1 = u*z2*z3*z4
1306	 *   1/z2 = u*z1*z3*z4
1307	 *   1/z3 = u*z1*z2*z4
1308	 *   1/z4 = u*z1*z2*z3
1309	 *
1310	 * The partial products are computed recursively:
1311	 *
1312	 *  - on input (z_1,z_2), return (z_2,z_1) and z_1*z_2
1313	 *  - on input (z_1,z_2,... z_n):
1314	 *       recurse on (z_1,z_2,... z_(n/2)) -> r1 and m1
1315	 *       recurse on (z_(n/2+1),z_(n/2+2)... z_n) -> r2 and m2
1316	 *       multiply elements of r1 by m2 -> s1
1317	 *       multiply elements of r2 by m1 -> s2
1318	 *       return r1||r2 and m1*m2
1319	 *
1320	 * In the example below, we suppose that we have 14 elements.
1321	 * Let z1, z2,... zE be the 14 values to invert (index noted in
1322	 * hexadecimal, starting at 1).
1323	 *
1324	 *  - Depth 1:
1325	 *      swap(z1, z2); z12 = z1*z2
1326	 *      swap(z3, z4); z34 = z3*z4
1327	 *      swap(z5, z6); z56 = z5*z6
1328	 *      swap(z7, z8); z78 = z7*z8
1329	 *      swap(z9, zA); z9A = z9*zA
1330	 *      swap(zB, zC); zBC = zB*zC
1331	 *      swap(zD, zE); zDE = zD*zE
1332	 *
1333	 *  - Depth 2:
1334	 *      z1 <- z1*z34, z2 <- z2*z34, z3 <- z3*z12, z4 <- z4*z12
1335	 *      z1234 = z12*z34
1336	 *      z5 <- z5*z78, z6 <- z6*z78, z7 <- z7*z56, z8 <- z8*z56
1337	 *      z5678 = z56*z78
1338	 *      z9 <- z9*zBC, zA <- zA*zBC, zB <- zB*z9A, zC <- zC*z9A
1339	 *      z9ABC = z9A*zBC
1340	 *
1341	 *  - Depth 3:
1342	 *      z1 <- z1*z5678, z2 <- z2*z5678, z3 <- z3*z5678, z4 <- z4*z5678
1343	 *      z5 <- z5*z1234, z6 <- z6*z1234, z7 <- z7*z1234, z8 <- z8*z1234
1344	 *      z12345678 = z1234*z5678
1345	 *      z9 <- z9*zDE, zA <- zA*zDE, zB <- zB*zDE, zC <- zC*zDE
1346	 *      zD <- zD*z9ABC, zE*z9ABC
1347	 *      z9ABCDE = z9ABC*zDE
1348	 *
1349	 *  - Depth 4:
1350	 *      multiply z1..z8 by z9ABCDE
1351	 *      multiply z9..zE by z12345678
1352	 *      final z = z12345678*z9ABCDE
1353	 */
1354
1355	uint64_t z[16][4];
1356	int i, k, s;
1357#define zt   (z[15])
1358#define zu   (z[14])
1359#define zv   (z[13])
1360
1361	/*
1362	 * First recursion step (pairwise swapping and multiplication).
1363	 * If there is an odd number of elements, then we "invent" an
1364	 * extra one with coordinate Z = 1 (in Montgomery representation).
1365	 */
1366	for (i = 0; (i + 1) < num; i += 2) {
1367		memcpy(zt, jac[i].z, sizeof zt);
1368		memcpy(jac[i].z, jac[i + 1].z, sizeof zt);
1369		memcpy(jac[i + 1].z, zt, sizeof zt);
1370		f256_montymul(z[i >> 1], jac[i].z, jac[i + 1].z);
1371	}
1372	if ((num & 1) != 0) {
1373		memcpy(z[num >> 1], jac[num - 1].z, sizeof zt);
1374		memcpy(jac[num - 1].z, F256_R, sizeof F256_R);
1375	}
1376
1377	/*
1378	 * Perform further recursion steps. At the entry of each step,
1379	 * the process has been done for groups of 's' points. The
1380	 * integer k is the log2 of s.
1381	 */
1382	for (k = 1, s = 2; s < num; k ++, s <<= 1) {
1383		int n;
1384
1385		for (i = 0; i < num; i ++) {
1386			f256_montymul(jac[i].z, jac[i].z, z[(i >> k) ^ 1]);
1387		}
1388		n = (num + s - 1) >> k;
1389		for (i = 0; i < (n >> 1); i ++) {
1390			f256_montymul(z[i], z[i << 1], z[(i << 1) + 1]);
1391		}
1392		if ((n & 1) != 0) {
1393			memmove(z[n >> 1], z[n], sizeof zt);
1394		}
1395	}
1396
1397	/*
1398	 * Invert the final result, and convert all points.
1399	 */
1400	f256_invert(zt, z[0]);
1401	for (i = 0; i < num; i ++) {
1402		f256_montymul(zv, jac[i].z, zt);
1403		f256_montysquare(zu, zv);
1404		f256_montymul(zv, zv, zu);
1405		f256_montymul(aff[i].x, jac[i].x, zu);
1406		f256_montymul(aff[i].y, jac[i].y, zv);
1407	}
1408}
1409
1410/*
1411 * Multiply the provided point by an integer.
1412 * Assumptions:
1413 *  - Source point is a valid curve point.
1414 *  - Source point is not the point-at-infinity.
1415 *  - Integer is not 0, and is lower than the curve order.
1416 * If these conditions are not met, then the result is indeterminate
1417 * (but the process is still constant-time).
1418 */
1419static void
1420p256_mul(p256_jacobian *P, const unsigned char *k, size_t klen)
1421{
1422	union {
1423		p256_affine aff[15];
1424		p256_jacobian jac[15];
1425	} window;
1426	int i;
1427
1428	/*
1429	 * Compute window, in Jacobian coordinates.
1430	 */
1431	window.jac[0] = *P;
1432	for (i = 2; i < 16; i ++) {
1433		window.jac[i - 1] = window.jac[(i >> 1) - 1];
1434		if ((i & 1) == 0) {
1435			p256_double(&window.jac[i - 1]);
1436		} else {
1437			p256_add(&window.jac[i - 1], &window.jac[i >> 1]);
1438		}
1439	}
1440
1441	/*
1442	 * Convert the window points to affine coordinates. Point
1443	 * window[0] is the source point, already in affine coordinates.
1444	 */
1445	window_to_affine(window.aff, window.jac, 15);
1446
1447	/*
1448	 * Perform point multiplication.
1449	 */
1450	point_mul_inner(P, window.aff, k, klen);
1451}
1452
1453/*
1454 * Precomputed window for the conventional generator: P256_Gwin[n]
1455 * contains (n+1)*G (affine coordinates, in Montgomery representation).
1456 */
1457static const p256_affine P256_Gwin[] = {
1458	{
1459		{ 0x79E730D418A9143C, 0x75BA95FC5FEDB601,
1460		  0x79FB732B77622510, 0x18905F76A53755C6 },
1461		{ 0xDDF25357CE95560A, 0x8B4AB8E4BA19E45C,
1462		  0xD2E88688DD21F325, 0x8571FF1825885D85 }
1463	},
1464	{
1465		{ 0x850046D410DDD64D, 0xAA6AE3C1A433827D,
1466		  0x732205038D1490D9, 0xF6BB32E43DCF3A3B },
1467		{ 0x2F3648D361BEE1A5, 0x152CD7CBEB236FF8,
1468		  0x19A8FB0E92042DBE, 0x78C577510A5B8A3B }
1469	},
1470	{
1471		{ 0xFFAC3F904EEBC127, 0xB027F84A087D81FB,
1472		  0x66AD77DD87CBBC98, 0x26936A3FB6FF747E },
1473		{ 0xB04C5C1FC983A7EB, 0x583E47AD0861FE1A,
1474		  0x788208311A2EE98E, 0xD5F06A29E587CC07 }
1475	},
1476	{
1477		{ 0x74B0B50D46918DCC, 0x4650A6EDC623C173,
1478		  0x0CDAACACE8100AF2, 0x577362F541B0176B },
1479		{ 0x2D96F24CE4CBABA6, 0x17628471FAD6F447,
1480		  0x6B6C36DEE5DDD22E, 0x84B14C394C5AB863 }
1481	},
1482	{
1483		{ 0xBE1B8AAEC45C61F5, 0x90EC649A94B9537D,
1484		  0x941CB5AAD076C20C, 0xC9079605890523C8 },
1485		{ 0xEB309B4AE7BA4F10, 0x73C568EFE5EB882B,
1486		  0x3540A9877E7A1F68, 0x73A076BB2DD1E916 }
1487	},
1488	{
1489		{ 0x403947373E77664A, 0x55AE744F346CEE3E,
1490		  0xD50A961A5B17A3AD, 0x13074B5954213673 },
1491		{ 0x93D36220D377E44B, 0x299C2B53ADFF14B5,
1492		  0xF424D44CEF639F11, 0xA4C9916D4A07F75F }
1493	},
1494	{
1495		{ 0x0746354EA0173B4F, 0x2BD20213D23C00F7,
1496		  0xF43EAAB50C23BB08, 0x13BA5119C3123E03 },
1497		{ 0x2847D0303F5B9D4D, 0x6742F2F25DA67BDD,
1498		  0xEF933BDC77C94195, 0xEAEDD9156E240867 }
1499	},
1500	{
1501		{ 0x27F14CD19499A78F, 0x462AB5C56F9B3455,
1502		  0x8F90F02AF02CFC6B, 0xB763891EB265230D },
1503		{ 0xF59DA3A9532D4977, 0x21E3327DCF9EBA15,
1504		  0x123C7B84BE60BBF0, 0x56EC12F27706DF76 }
1505	},
1506	{
1507		{ 0x75C96E8F264E20E8, 0xABE6BFED59A7A841,
1508		  0x2CC09C0444C8EB00, 0xE05B3080F0C4E16B },
1509		{ 0x1EB7777AA45F3314, 0x56AF7BEDCE5D45E3,
1510		  0x2B6E019A88B12F1A, 0x086659CDFD835F9B }
1511	},
1512	{
1513		{ 0x2C18DBD19DC21EC8, 0x98F9868A0FCF8139,
1514		  0x737D2CD648250B49, 0xCC61C94724B3428F },
1515		{ 0x0C2B407880DD9E76, 0xC43A8991383FBE08,
1516		  0x5F7D2D65779BE5D2, 0x78719A54EB3B4AB5 }
1517	},
1518	{
1519		{ 0xEA7D260A6245E404, 0x9DE407956E7FDFE0,
1520		  0x1FF3A4158DAC1AB5, 0x3E7090F1649C9073 },
1521		{ 0x1A7685612B944E88, 0x250F939EE57F61C8,
1522		  0x0C0DAA891EAD643D, 0x68930023E125B88E }
1523	},
1524	{
1525		{ 0x04B71AA7D2697768, 0xABDEDEF5CA345A33,
1526		  0x2409D29DEE37385E, 0x4EE1DF77CB83E156 },
1527		{ 0x0CAC12D91CBB5B43, 0x170ED2F6CA895637,
1528		  0x28228CFA8ADE6D66, 0x7FF57C9553238ACA }
1529	},
1530	{
1531		{ 0xCCC425634B2ED709, 0x0E356769856FD30D,
1532		  0xBCBCD43F559E9811, 0x738477AC5395B759 },
1533		{ 0x35752B90C00EE17F, 0x68748390742ED2E3,
1534		  0x7CD06422BD1F5BC1, 0xFBC08769C9E7B797 }
1535	},
1536	{
1537		{ 0xA242A35BB0CF664A, 0x126E48F77F9707E3,
1538		  0x1717BF54C6832660, 0xFAAE7332FD12C72E },
1539		{ 0x27B52DB7995D586B, 0xBE29569E832237C2,
1540		  0xE8E4193E2A65E7DB, 0x152706DC2EAA1BBB }
1541	},
1542	{
1543		{ 0x72BCD8B7BC60055B, 0x03CC23EE56E27E4B,
1544		  0xEE337424E4819370, 0xE2AA0E430AD3DA09 },
1545		{ 0x40B8524F6383C45D, 0xD766355442A41B25,
1546		  0x64EFA6DE778A4797, 0x2042170A7079ADF4 }
1547	}
1548};
1549
1550/*
1551 * Multiply the conventional generator of the curve by the provided
1552 * integer. Return is written in *P.
1553 *
1554 * Assumptions:
1555 *  - Integer is not 0, and is lower than the curve order.
1556 * If this conditions is not met, then the result is indeterminate
1557 * (but the process is still constant-time).
1558 */
1559static void
1560p256_mulgen(p256_jacobian *P, const unsigned char *k, size_t klen)
1561{
1562	point_mul_inner(P, P256_Gwin, k, klen);
1563}
1564
1565/*
1566 * Return 1 if all of the following hold:
1567 *  - klen <= 32
1568 *  - k != 0
1569 *  - k is lower than the curve order
1570 * Otherwise, return 0.
1571 *
1572 * Constant-time behaviour: only klen may be observable.
1573 */
1574static uint32_t
1575check_scalar(const unsigned char *k, size_t klen)
1576{
1577	uint32_t z;
1578	int32_t c;
1579	size_t u;
1580
1581	if (klen > 32) {
1582		return 0;
1583	}
1584	z = 0;
1585	for (u = 0; u < klen; u ++) {
1586		z |= k[u];
1587	}
1588	if (klen == 32) {
1589		c = 0;
1590		for (u = 0; u < klen; u ++) {
1591			c |= -(int32_t)EQ0(c) & CMP(k[u], P256_N[u]);
1592		}
1593	} else {
1594		c = -1;
1595	}
1596	return NEQ(z, 0) & LT0(c);
1597}
1598
1599static uint32_t
1600api_mul(unsigned char *G, size_t Glen,
1601	const unsigned char *k, size_t klen, int curve)
1602{
1603	uint32_t r;
1604	p256_jacobian P;
1605
1606	(void)curve;
1607	if (Glen != 65) {
1608		return 0;
1609	}
1610	r = check_scalar(k, klen);
1611	r &= point_decode(&P, G);
1612	p256_mul(&P, k, klen);
1613	r &= point_encode(G, &P);
1614	return r;
1615}
1616
1617static size_t
1618api_mulgen(unsigned char *R,
1619	const unsigned char *k, size_t klen, int curve)
1620{
1621	p256_jacobian P;
1622
1623	(void)curve;
1624	p256_mulgen(&P, k, klen);
1625	point_encode(R, &P);
1626	return 65;
1627}
1628
1629static uint32_t
1630api_muladd(unsigned char *A, const unsigned char *B, size_t len,
1631	const unsigned char *x, size_t xlen,
1632	const unsigned char *y, size_t ylen, int curve)
1633{
1634	/*
1635	 * We might want to use Shamir's trick here: make a composite
1636	 * window of u*P+v*Q points, to merge the two doubling-ladders
1637	 * into one. This, however, has some complications:
1638	 *
1639	 *  - During the computation, we may hit the point-at-infinity.
1640	 *    Thus, we would need p256_add_complete_mixed() (complete
1641	 *    formulas for point addition), with a higher cost (17 muls
1642	 *    instead of 11).
1643	 *
1644	 *  - A 4-bit window would be too large, since it would involve
1645	 *    16*16-1 = 255 points. For the same window size as in the
1646	 *    p256_mul() case, we would need to reduce the window size
1647	 *    to 2 bits, and thus perform twice as many non-doubling
1648	 *    point additions.
1649	 *
1650	 *  - The window may itself contain the point-at-infinity, and
1651	 *    thus cannot be in all generality be made of affine points.
1652	 *    Instead, we would need to make it a window of points in
1653	 *    Jacobian coordinates. Even p256_add_complete_mixed() would
1654	 *    be inappropriate.
1655	 *
1656	 * For these reasons, the code below performs two separate
1657	 * point multiplications, then computes the final point addition
1658	 * (which is both a "normal" addition, and a doubling, to handle
1659	 * all cases).
1660	 */
1661
1662	p256_jacobian P, Q;
1663	uint32_t r, t, s;
1664	uint64_t z;
1665
1666	(void)curve;
1667	if (len != 65) {
1668		return 0;
1669	}
1670	r = point_decode(&P, A);
1671	p256_mul(&P, x, xlen);
1672	if (B == NULL) {
1673		p256_mulgen(&Q, y, ylen);
1674	} else {
1675		r &= point_decode(&Q, B);
1676		p256_mul(&Q, y, ylen);
1677	}
1678
1679	/*
1680	 * The final addition may fail in case both points are equal.
1681	 */
1682	t = p256_add(&P, &Q);
1683	f256_final_reduce(P.z);
1684	z = P.z[0] | P.z[1] | P.z[2] | P.z[3];
1685	s = EQ((uint32_t)(z | (z >> 32)), 0);
1686	p256_double(&Q);
1687
1688	/*
1689	 * If s is 1 then either P+Q = 0 (t = 1) or P = Q (t = 0). So we
1690	 * have the following:
1691	 *
1692	 *   s = 0, t = 0   return P (normal addition)
1693	 *   s = 0, t = 1   return P (normal addition)
1694	 *   s = 1, t = 0   return Q (a 'double' case)
1695	 *   s = 1, t = 1   report an error (P+Q = 0)
1696	 */
1697	CCOPY(s & ~t, &P, &Q, sizeof Q);
1698	point_encode(A, &P);
1699	r &= ~(s & t);
1700	return r;
1701}
1702
1703/* see bearssl_ec.h */
1704const br_ec_impl br_ec_p256_m64 = {
1705	(uint32_t)0x00800000,
1706	&api_generator,
1707	&api_order,
1708	&api_xoff,
1709	&api_mul,
1710	&api_mulgen,
1711	&api_muladd
1712};
1713
1714/* see bearssl_ec.h */
1715const br_ec_impl *
1716br_ec_p256_m64_get(void)
1717{
1718	return &br_ec_p256_m64;
1719}
1720
1721#else
1722
1723/* see bearssl_ec.h */
1724const br_ec_impl *
1725br_ec_p256_m64_get(void)
1726{
1727	return 0;
1728}
1729
1730#endif
1731