blob: bfe6c5f1351d7f7737f4bd645283dbb576b116d4 [file] [log] [blame]
David Brownfecda2d2017-09-07 10:20:34 -06001/* ecc.c - TinyCrypt implementation of ECC auxiliary functions */
2
3/*
4 *
5 * Copyright (c) 2013, Kenneth MacKay
6 * All rights reserved.
7 * https://github.com/kmackay/micro-ecc
8 *
9 * Redistribution and use in source and binary forms, with or without modification,
10 * are permitted provided that the following conditions are met:
11 * * Redistributions of source code must retain the above copyright notice, this
12 * list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
24 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 *
33 * - Redistributions of source code must retain the above copyright notice,
34 * this list of conditions and the following disclaimer.
35 *
36 * - Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 *
40 * - Neither the name of Intel Corporation nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
45 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
48 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
54 * POSSIBILITY OF SUCH DAMAGE.
55 */
56
57#include <tinycrypt/ecc.h>
58
59/* ------ Curve NIST P-256 constants: ------ */
60
61#define Curve_P {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, \
62 0x00000000, 0x00000000, 0x00000001, 0xFFFFFFFF}
63
64#define Curve_B {0x27D2604B, 0x3BCE3C3E, 0xCC53B0F6, 0x651D06B0, \
65 0x769886BC, 0xB3EBBD55, 0xAA3A93E7, 0x5AC635D8}
66
67#define Curve_N {0xFC632551, 0xF3B9CAC2, 0xA7179E84, 0xBCE6FAAD, \
68 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF}
69
70#define Curve_G {{0xD898C296, 0xF4A13945, 0x2DEB33A0, 0x77037D81, \
71 0x63A440F2, 0xF8BCE6E5, 0xE12C4247, 0x6B17D1F2}, \
72 {0x37BF51F5, 0xCBB64068, 0x6B315ECE, 0x2BCE3357, \
73 0x7C0F9E16, 0x8EE7EB4A, 0xFE1A7F9B, 0x4FE342E2} }
74
75#define Curve_P_Barrett {0x00000003, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFE, \
76 0xFFFFFFFE, 0xFFFFFFFE, 0xFFFFFFFF, 0x00000000, 0x00000001}
77
78#define Curve_N_Barrett {0xEEDF9BFE, 0x012FFD85, 0xDF1A6C21, 0x43190552, \
79 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, 0x00000000, 0x00000001}
80
81uint32_t curve_p[NUM_ECC_DIGITS] = Curve_P;
82uint32_t curve_b[NUM_ECC_DIGITS] = Curve_B;
83EccPoint curve_G = Curve_G;
84uint32_t curve_n[NUM_ECC_DIGITS] = Curve_N;
85uint32_t curve_pb[NUM_ECC_DIGITS + 1] = Curve_P_Barrett;
86uint32_t curve_nb[NUM_ECC_DIGITS + 1] = Curve_N_Barrett;
87
88/* ------ Static functions: ------ */
89
90/* Zeroing out p_vli. */
91static void vli_clear(uint32_t *p_vli)
92{
93 uint32_t i;
94
95 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
96 p_vli[i] = 0;
97 }
98}
99
100/* Returns nonzero if bit p_bit of p_vli is set.
101 * It is assumed that the value provided in 'bit' is within
102 * the boundaries of the word-array 'p_vli'.*/
103static uint32_t vli_testBit(uint32_t *p_vli, uint32_t p_bit)
104{
105 return (p_vli[p_bit / 32] & (1 << (p_bit % 32)));
106}
107
108uint32_t vli_isZero(uint32_t *p_vli)
109{
110 uint32_t acc = 0;
111
112 for (uint32_t i = 0; i < NUM_ECC_DIGITS; ++i) {
113 acc |= p_vli[i];
114 }
115
116 return (!acc);
117}
118
119/*
120 * Find the right-most nonzero 32-bit "digits" in p_vli.
121 *
122 * Side-channel countermeasure: algorithm strengthened against timing attack.
123 */
124static uint32_t vli_numDigits(uint32_t *p_vli)
125{
126 int32_t i;
127 uint32_t digits = 0;
128
129 for (i = NUM_ECC_DIGITS - 1; i >= 0 ; --i) {
130 digits += p_vli[i] || digits;
131 }
132
133 return digits;
134}
135
136/*
137 * Find the left-most non-zero bit in p_vli.
138 *
139 * Side-channel countermeasure: algorithm strengthened against timing attack.
140 */
141static uint32_t vli_numBits(uint32_t *p_vli)
142{
143 uint32_t l_digit;
144 uint32_t i, acc = 32;
145 uint32_t l_numDigits = vli_numDigits(p_vli);
146
147 l_digit = p_vli[l_numDigits - 1];
148
149 for (i = 0; i < 32; ++i) {
150 acc -= !l_digit;
151 l_digit >>= 1;
152 }
153
154 return ((l_numDigits - 1) * 32 + acc);
155}
156
157/*
158 * Computes p_result = p_left + p_right, returns carry.
159 *
160 * Side-channel countermeasure: algorithm strengthened against timing attack.
161 */
162static uint32_t vli_add(uint32_t *p_result, uint32_t *p_left,
163 uint32_t *p_right)
164{
165
166 uint32_t l_carry = 0;
167
168 for (uint32_t i = 0; i < NUM_ECC_DIGITS; ++i) {
169 uint32_t l_sum = p_left[i] + p_right[i] + l_carry;
170
171 l_carry = (l_sum < p_left[i]) | ((l_sum == p_left[i]) && l_carry);
172 p_result[i] = l_sum;
173 }
174
175 return l_carry;
176}
177
178
179/* Computes p_result = p_left * p_right. */
180static void vli_mult(uint32_t *p_result, uint32_t *p_left,
181 uint32_t *p_right, uint32_t word_size)
182{
183
184 uint64_t r01 = 0;
185 uint32_t r2 = 0;
186
187 /* Compute each digit of p_result in sequence, maintaining the carries. */
188 for (uint32_t k = 0; k < word_size*2 - 1; ++k) {
189
190 uint32_t l_min = (k < word_size ? 0 : (k + 1) - word_size);
191
192 for (uint32_t i = l_min; i <= k && i < word_size; ++i) {
193
194 uint64_t l_product = (uint64_t)p_left[i] * p_right[k - i];
195
196 r01 += l_product;
197 r2 += (r01 < l_product);
198 }
199 p_result[k] = (uint32_t)r01;
200 r01 = (r01 >> 32) | (((uint64_t)r2) << 32);
201 r2 = 0;
202 }
203
204 p_result[word_size * 2 - 1] = (uint32_t)r01;
205}
206
207/* Computes p_result = p_left^2. */
208static void vli_square(uint32_t *p_result, uint32_t *p_left)
209{
210
211 uint64_t r01 = 0;
212 uint32_t r2 = 0;
213 uint32_t i, k;
214
215 for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
216
217 uint32_t l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
218
219 for (i = l_min; i <= k && i <= k - i; ++i) {
220
221 uint64_t l_product = (uint64_t)p_left[i] * p_left[k - i];
222
223 if (i < k - i) {
224
225 r2 += l_product >> 63;
226 l_product *= 2;
227 }
228 r01 += l_product;
229 r2 += (r01 < l_product);
230 }
231 p_result[k] = (uint32_t)r01;
232 r01 = (r01 >> 32) | (((uint64_t)r2) << 32);
233 r2 = 0;
234 }
235
236 p_result[NUM_ECC_DIGITS * 2 - 1] = (uint32_t)r01;
237}
238
239/* Computes p_result = p_product % curve_p using Barrett reduction. */
240void vli_mmod_barrett(uint32_t *p_result, uint32_t *p_product,
241 uint32_t *p_mod, uint32_t *p_barrett)
242{
243 uint32_t i;
244 uint32_t q1[NUM_ECC_DIGITS + 1];
245
246 for (i = NUM_ECC_DIGITS - 1; i < 2 * NUM_ECC_DIGITS; i++) {
247 q1[i - (NUM_ECC_DIGITS - 1)] = p_product[i];
248 }
249
250 uint32_t q2[2*NUM_ECC_DIGITS + 2];
251
252 vli_mult(q2, q1, p_barrett, NUM_ECC_DIGITS + 1);
253 for (i = NUM_ECC_DIGITS + 1; i < 2 * NUM_ECC_DIGITS + 2; i++) {
254 q1[i - (NUM_ECC_DIGITS + 1)] = q2[i];
255 }
256
257 uint32_t prime2[2*NUM_ECC_DIGITS];
258
259 for (i = 0; i < NUM_ECC_DIGITS; i++) {
260 prime2[i] = p_mod[i];
261 prime2[NUM_ECC_DIGITS + i] = 0;
262 }
263
264 vli_mult(q2, q1, prime2, NUM_ECC_DIGITS + 1);
265 vli_sub(p_product, p_product, q2, 2 * NUM_ECC_DIGITS);
266
267 uint32_t borrow;
268
269 borrow = vli_sub(q1, p_product, prime2, NUM_ECC_DIGITS + 1);
270 vli_cond_set(p_product, p_product, q1, borrow);
271 p_product[NUM_ECC_DIGITS] = q1[NUM_ECC_DIGITS] * (!borrow);
272 borrow = vli_sub(q1, p_product, prime2, NUM_ECC_DIGITS + 1);
273 vli_cond_set(p_product, p_product, q1, borrow);
274 p_product[NUM_ECC_DIGITS] = q1[NUM_ECC_DIGITS] * (!borrow);
275 borrow = vli_sub(q1, p_product, prime2, NUM_ECC_DIGITS + 1);
276 vli_cond_set(p_product, p_product, q1, borrow);
277 p_product[NUM_ECC_DIGITS] = q1[NUM_ECC_DIGITS] * (!borrow);
278
279 for (i = 0; i < NUM_ECC_DIGITS; i++) {
280 p_result[i] = p_product[i];
281 }
282}
283
284/*
285 * Computes modular exponentiation.
286 *
287 * Side-channel countermeasure: algorithm strengthened against timing attack.
288 */
289static void vli_modExp(uint32_t *p_result, uint32_t *p_base,
290 uint32_t *p_exp, uint32_t *p_mod, uint32_t *p_barrett)
291{
292
293 uint32_t acc[NUM_ECC_DIGITS], tmp[NUM_ECC_DIGITS], product[2 * NUM_ECC_DIGITS];
294 uint32_t j;
295 int32_t i;
296
297 vli_clear(acc);
298 acc[0] = 1;
299
300 for (i = NUM_ECC_DIGITS - 1; i >= 0; i--) {
301 for (j = 1 << 31; j > 0; j = j >> 1) {
302 vli_square(product, acc);
303 vli_mmod_barrett(acc, product, p_mod, p_barrett);
304 vli_mult(product, acc, p_base, NUM_ECC_DIGITS);
305 vli_mmod_barrett(tmp, product, p_mod, p_barrett);
306 vli_cond_set(acc, tmp, acc, j & p_exp[i]);
307 }
308 }
309
310 vli_set(p_result, acc);
311}
312
313/* Conversion from Affine coordinates to Jacobi coordinates. */
314static void EccPoint_fromAffine(EccPointJacobi *p_point_jacobi,
315 EccPoint *p_point) {
316
317 vli_set(p_point_jacobi->X, p_point->x);
318 vli_set(p_point_jacobi->Y, p_point->y);
319 vli_clear(p_point_jacobi->Z);
320 p_point_jacobi->Z[0] = 1;
321}
322
323/*
324 * Elliptic curve point doubling in Jacobi coordinates: P = P + P.
325 *
326 * Requires 4 squares and 4 multiplications.
327 */
328static void EccPoint_double(EccPointJacobi *P)
329{
330
331 uint32_t m[NUM_ECC_DIGITS], s[NUM_ECC_DIGITS], t[NUM_ECC_DIGITS];
332
333 vli_modSquare_fast(t, P->Z);
334 vli_modSub(m, P->X, t, curve_p);
335 vli_modAdd(s, P->X, t, curve_p);
336 vli_modMult_fast(m, m, s);
337 vli_modAdd(s, m, m, curve_p);
338 vli_modAdd(m, s, m, curve_p); /* m = 3X^2 - 3Z^4 */
339 vli_modSquare_fast(t, P->Y);
340 vli_modMult_fast(s, P->X, t);
341 vli_modAdd(s, s, s, curve_p);
342 vli_modAdd(s, s, s, curve_p); /* s = 4XY^2 */
343 vli_modMult_fast(P->Z, P->Y, P->Z);
344 vli_modAdd(P->Z, P->Z, P->Z, curve_p); /* Z' = 2YZ */
345 vli_modSquare_fast(P->X, m);
346 vli_modSub(P->X, P->X, s, curve_p);
347 vli_modSub(P->X, P->X, s, curve_p); /* X' = m^2 - 2s */
348 vli_modSquare_fast(P->Y, t);
349 vli_modAdd(P->Y, P->Y, P->Y, curve_p);
350 vli_modAdd(P->Y, P->Y, P->Y, curve_p);
351 vli_modAdd(P->Y, P->Y, P->Y, curve_p);
352 vli_modSub(t, s, P->X, curve_p);
353 vli_modMult_fast(t, t, m);
354 vli_modSub(P->Y, t, P->Y, curve_p); /* Y' = m(s - X') - 8Y^4 */
355
356}
357
358/* Copy input to target. */
359static void EccPointJacobi_set(EccPointJacobi *target, EccPointJacobi *input)
360{
361 vli_set(target->X, input->X);
362 vli_set(target->Y, input->Y);
363 vli_set(target->Z, input->Z);
364}
365
366/* ------ Externally visible functions (see header file for comments): ------ */
367
368void vli_set(uint32_t *p_dest, uint32_t *p_src)
369{
370
371 uint32_t i;
372
373 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
374 p_dest[i] = p_src[i];
375 }
376}
377
378int32_t vli_cmp(uint32_t *p_left, uint32_t *p_right, int32_t word_size)
379{
380
381 int32_t i, cmp = 0;
382
383 for (i = word_size-1; i >= 0; --i) {
384 cmp |= ((p_left[i] > p_right[i]) - (p_left[i] < p_right[i])) * (!cmp);
385 }
386
387 return cmp;
388}
389
390uint32_t vli_sub(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
391 uint32_t word_size)
392{
393
394 uint32_t l_borrow = 0;
395
396 for (uint32_t i = 0; i < word_size; ++i) {
397 uint32_t l_diff = p_left[i] - p_right[i] - l_borrow;
398
399 l_borrow = (l_diff > p_left[i]) | ((l_diff == p_left[i]) && l_borrow);
400 p_result[i] = l_diff;
401 }
402
403 return l_borrow;
404}
405
406void vli_cond_set(uint32_t *output, uint32_t *p_true, uint32_t *p_false,
407 uint32_t cond)
408{
409 uint32_t i;
410
411 cond = (!cond);
412
413 for (i = 0; i < NUM_ECC_DIGITS; i++) {
414 output[i] = (p_true[i]*(!cond)) | (p_false[i]*cond);
415 }
416}
417
418void vli_modAdd(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
419 uint32_t *p_mod)
420{
421 uint32_t l_carry = vli_add(p_result, p_left, p_right);
422 uint32_t p_temp[NUM_ECC_DIGITS];
423
424 l_carry = l_carry == vli_sub(p_temp, p_result, p_mod, NUM_ECC_DIGITS);
425 vli_cond_set(p_result, p_temp, p_result, l_carry);
426}
427
428void vli_modSub(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
429 uint32_t *p_mod)
430{
431 uint32_t l_borrow = vli_sub(p_result, p_left, p_right, NUM_ECC_DIGITS);
432 uint32_t p_temp[NUM_ECC_DIGITS];
433
434 vli_add(p_temp, p_result, p_mod);
435 vli_cond_set(p_result, p_temp, p_result, l_borrow);
436}
437
438void vli_modMult_fast(uint32_t *p_result, uint32_t *p_left,
439 uint32_t *p_right)
440{
441 uint32_t l_product[2 * NUM_ECC_DIGITS];
442
443 vli_mult(l_product, p_left, p_right, NUM_ECC_DIGITS);
444 vli_mmod_barrett(p_result, l_product, curve_p, curve_pb);
445}
446
447void vli_modSquare_fast(uint32_t *p_result, uint32_t *p_left)
448{
449 uint32_t l_product[2 * NUM_ECC_DIGITS];
450
451 vli_square(l_product, p_left);
452 vli_mmod_barrett(p_result, l_product, curve_p, curve_pb);
453}
454
455void vli_modMult(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
456 uint32_t *p_mod, uint32_t *p_barrett)
457{
458
459 uint32_t l_product[2 * NUM_ECC_DIGITS];
460
461 vli_mult(l_product, p_left, p_right, NUM_ECC_DIGITS);
462 vli_mmod_barrett(p_result, l_product, p_mod, p_barrett);
463}
464
465void vli_modInv(uint32_t *p_result, uint32_t *p_input, uint32_t *p_mod,
466 uint32_t *p_barrett)
467{
468 uint32_t p_power[NUM_ECC_DIGITS];
469
470 vli_set(p_power, p_mod);
471 p_power[0] -= 2;
472 vli_modExp(p_result, p_input, p_power, p_mod, p_barrett);
473}
474
475uint32_t EccPoint_isZero(EccPoint *p_point)
476{
477 return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
478}
479
480uint32_t EccPointJacobi_isZero(EccPointJacobi *p_point_jacobi)
481{
482 return vli_isZero(p_point_jacobi->Z);
483}
484
485void EccPoint_toAffine(EccPoint *p_point, EccPointJacobi *p_point_jacobi)
486{
487
488 if (vli_isZero(p_point_jacobi->Z)) {
489 vli_clear(p_point->x);
490 vli_clear(p_point->y);
491 return;
492 }
493
494 uint32_t z[NUM_ECC_DIGITS];
495
496 vli_set(z, p_point_jacobi->Z);
497 vli_modInv(z, z, curve_p, curve_pb);
498 vli_modSquare_fast(p_point->x, z);
499 vli_modMult_fast(p_point->y, p_point->x, z);
500 vli_modMult_fast(p_point->x, p_point->x, p_point_jacobi->X);
501 vli_modMult_fast(p_point->y, p_point->y, p_point_jacobi->Y);
502}
503
504void EccPoint_add(EccPointJacobi *P1, EccPointJacobi *P2)
505{
506
507 uint32_t s1[NUM_ECC_DIGITS], u1[NUM_ECC_DIGITS], t[NUM_ECC_DIGITS];
508 uint32_t h[NUM_ECC_DIGITS], r[NUM_ECC_DIGITS];
509
510 vli_modSquare_fast(r, P1->Z);
511 vli_modSquare_fast(s1, P2->Z);
512 vli_modMult_fast(u1, P1->X, s1); /* u1 = X1 Z2^2 */
513 vli_modMult_fast(h, P2->X, r);
514 vli_modMult_fast(s1, P1->Y, s1);
515 vli_modMult_fast(s1, s1, P2->Z); /* s1 = Y1 Z2^3 */
516 vli_modMult_fast(r, P2->Y, r);
517 vli_modMult_fast(r, r, P1->Z);
518 vli_modSub(h, h, u1, curve_p); /* h = X2 Z1^2 - u1 */
519 vli_modSub(r, r, s1, curve_p); /* r = Y2 Z1^3 - s1 */
520
521 if (vli_isZero(h)) {
522 if (vli_isZero(r)) {
523 /* P1 = P2 */
524 EccPoint_double(P1);
525 return;
526 }
527 /* point at infinity */
528 vli_clear(P1->Z);
529 return;
530 }
531
532 vli_modMult_fast(P1->Z, P1->Z, P2->Z);
533 vli_modMult_fast(P1->Z, P1->Z, h); /* Z3 = h Z1 Z2 */
534 vli_modSquare_fast(t, h);
535 vli_modMult_fast(h, t, h);
536 vli_modMult_fast(u1, u1, t);
537 vli_modSquare_fast(P1->X, r);
538 vli_modSub(P1->X, P1->X, h, curve_p);
539 vli_modSub(P1->X, P1->X, u1, curve_p);
540 vli_modSub(P1->X, P1->X, u1, curve_p); /* X3 = r^2 - h^3 - 2 u1 h^2 */
541 vli_modMult_fast(t, s1, h);
542 vli_modSub(P1->Y, u1, P1->X, curve_p);
543 vli_modMult_fast(P1->Y, P1->Y, r);
544 vli_modSub(P1->Y, P1->Y, t, curve_p); /* Y3 = r(u1 h^2 - X3) - s1 h^3 */
545}
546
547/*
548 * Elliptic curve scalar multiplication with result in Jacobi coordinates:
549 *
550 * p_result = p_scalar * p_point.
551 */
552void EccPoint_mult_safe(EccPointJacobi *p_result, EccPoint *p_point, uint32_t *p_scalar)
553{
554
555 int32_t i;
556 uint32_t bit;
557 EccPointJacobi p_point_jacobi, p_tmp;
558
559 EccPoint_fromAffine(p_result, p_point);
560 EccPoint_fromAffine(&p_point_jacobi, p_point);
561
562 for (i = vli_numBits(p_scalar) - 2; i >= 0; i--) {
563 EccPoint_double(p_result);
564 EccPointJacobi_set(&p_tmp, p_result);
565 EccPoint_add(&p_tmp, &p_point_jacobi);
566 bit = vli_testBit(p_scalar, i);
567 vli_cond_set(p_result->X, p_tmp.X, p_result->X, bit);
568 vli_cond_set(p_result->Y, p_tmp.Y, p_result->Y, bit);
569 vli_cond_set(p_result->Z, p_tmp.Z, p_result->Z, bit);
570 }
571}
572
573/* Ellptic curve scalar multiplication with result in Jacobi coordinates */
574/* p_result = p_scalar * p_point */
575void EccPoint_mult_unsafe(EccPointJacobi *p_result, EccPoint *p_point, uint32_t *p_scalar)
576{
577 int i;
578 EccPointJacobi p_point_jacobi;
579 EccPoint_fromAffine(p_result, p_point);
580 EccPoint_fromAffine(&p_point_jacobi, p_point);
581
582 for(i = vli_numBits(p_scalar) - 2; i >= 0; i--)
583 {
584 EccPoint_double(p_result);
585 if (vli_testBit(p_scalar, i))
586 {
587 EccPoint_add(p_result, &p_point_jacobi);
588 }
589 }
590}
591
592/* -------- Conversions between big endian and little endian: -------- */
593
594void ecc_bytes2native(uint32_t p_native[NUM_ECC_DIGITS],
595 uint8_t p_bytes[NUM_ECC_DIGITS * 4])
596{
597
598 uint32_t i;
599
600 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
601 uint8_t *p_digit = p_bytes + 4 * (NUM_ECC_DIGITS - 1 - i);
602
603 p_native[i] = ((uint32_t)p_digit[0] << 24) |
604 ((uint32_t)p_digit[1] << 16) |
605 ((uint32_t)p_digit[2] << 8) |
606 (uint32_t)p_digit[3];
607 }
608}
609
610void ecc_native2bytes(uint8_t p_bytes[NUM_ECC_DIGITS * 4],
611 uint32_t p_native[NUM_ECC_DIGITS])
612{
613
614 uint32_t i;
615
616 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
617 uint8_t *p_digit = p_bytes + 4 * (NUM_ECC_DIGITS - 1 - i);
618
619 p_digit[0] = p_native[i] >> 24;
620 p_digit[1] = p_native[i] >> 16;
621 p_digit[2] = p_native[i] >> 8;
622 p_digit[3] = p_native[i];
623 }
624}
625