You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1562 lines
42 KiB

  1. /* Copyright (c) 2015 Cryptography Research, Inc.
  2. * Released under the MIT License. See LICENSE.txt for license information.
  3. */
  4. /**
  5. * @file decaf.c
  6. * @author Mike Hamburg
  7. * @brief Decaf high-level functions.
  8. */
  9. #define _XOPEN_SOURCE 600 /* for posix_memalign */
  10. #define __STDC_WANT_LIB_EXT1__ 1 /* for memset_s */
  11. #include <decaf.h>
  12. #include <string.h>
  13. #include "field.h"
  14. #include "decaf_config.h"
  15. #define WBITS DECAF_WORD_BITS
  16. /* Rename table for eventual factoring into .c.inc, MSR ECC style */
  17. #define SCALAR_LIMBS DECAF_255_SCALAR_LIMBS
  18. #define SCALAR_BITS DECAF_255_SCALAR_BITS
  19. #define NLIMBS DECAF_255_LIMBS
  20. #define API_NS(_id) decaf_255_##_id
  21. #define API_NS2(_pref,_id) _pref##_decaf_255_##_id
  22. #define scalar_t decaf_255_scalar_t
  23. #define point_t decaf_255_point_t
  24. #define precomputed_s decaf_255_precomputed_s
  25. #define SER_BYTES DECAF_255_SER_BYTES
  26. #if WBITS == 64
  27. typedef __int128_t decaf_sdword_t;
  28. #define SC_LIMB(x) (x##ull)
  29. #elif WBITS == 32
  30. typedef int64_t decaf_sdword_t;
  31. #define SC_LIMB(x) (x##ull)&((1ull<<32)-1), (x##ull)>>32
  32. #else
  33. #error "Only supporting 32- and 64-bit platforms right now"
  34. #endif
  35. #define sv static void
  36. #define snv static void __attribute__((noinline))
  37. #define siv static inline void __attribute__((always_inline))
  38. static const gf ZERO = {{{0}}}, ONE = {{{1}}};
  39. static const int EDWARDS_D = -121665;
  40. static const scalar_t sc_p = {{{
  41. SC_LIMB(0x5812631a5cf5d3ed),
  42. SC_LIMB(0x14def9dea2f79cd6),
  43. SC_LIMB(0),
  44. SC_LIMB(0x1000000000000000)
  45. }}};
  46. const scalar_t API_NS(scalar_one) = {{{1}}}, API_NS(scalar_zero) = {{{0}}};
  47. extern const scalar_t sc_r2;
  48. extern const decaf_word_t MONTGOMERY_FACTOR;
  49. /* sqrt(9) = 3 from the curve spec. Not exported, but used by pregen tool. */
  50. const unsigned char base_point_ser_for_pregen[SER_BYTES] = {
  51. 3 /*PinkBikeShed: 5 */, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  52. };
  53. extern const point_t API_NS(point_base);
  54. /* Projective Niels coordinates */
  55. typedef struct { gf a, b, c; } niels_s, niels_t[1];
  56. typedef struct { niels_t n; gf z; } __attribute__((aligned(32))) pniels_s, pniels_t[1]; /* MAGIC alignment */
  57. /* Precomputed base */
  58. struct precomputed_s { niels_t table [DECAF_COMBS_N<<(DECAF_COMBS_T-1)]; };
  59. extern const gf API_NS(precomputed_base_as_fe)[];
  60. const precomputed_s *API_NS(precomputed_base) =
  61. (const precomputed_s *) &API_NS(precomputed_base_as_fe);
  62. const size_t API_NS2(sizeof,precomputed_s) = sizeof(precomputed_s);
  63. const size_t API_NS2(alignof,precomputed_s) = 32;
  64. #ifdef __clang__
  65. #if 100*__clang_major__ + __clang_minor__ > 305
  66. #define UNROLL _Pragma("clang loop unroll(full)") // PERF FIXME: vectorize?
  67. #endif
  68. #endif
  69. #ifndef UNROLL
  70. #define UNROLL
  71. #endif
  72. #define FOR_LIMB(i,op) { unsigned int i=0; for (i=0; i<NLIMBS; i++) { op; }}
  73. #define FOR_LIMB_U(i,op) { unsigned int i=0; UNROLL for (i=0; i<NLIMBS; i++) { op; }}
  74. /** Copy x = y */
  75. siv gf_cpy(gf x, const gf y) { x[0] = y[0]; }
  76. /** Constant time, x = is_z ? z : y */
  77. siv cond_sel(gf x, const gf y, const gf z, decaf_bool_t is_z) {
  78. constant_time_select(x,z,y,sizeof(gf),is_z);
  79. }
  80. /** Constant time, if (neg) x=-x; */
  81. sv cond_neg(gf x, decaf_bool_t neg) {
  82. gf y;
  83. gf_sub(y,ZERO,x);
  84. cond_sel(x,x,y,neg);
  85. }
  86. /** Constant time, if (swap) (x,y) = (y,x); */
  87. siv cond_swap(gf x, gf_s *__restrict__ y, decaf_bool_t swap) {
  88. FOR_LIMB_U(i, {
  89. decaf_word_t s = (x->limb[i] ^ y->limb[i]) & swap;
  90. x->limb[i] ^= s;
  91. y->limb[i] ^= s;
  92. });
  93. }
  94. /** Compare a==b */
  95. static decaf_word_t __attribute__((noinline)) gf_eq(const gf a, const gf b) {
  96. gf c;
  97. gf_sub(c,a,b);
  98. gf_strong_reduce(c);
  99. decaf_word_t ret=0;
  100. FOR_LIMB(i, ret |= c->limb[i] );
  101. /* Hope the compiler is too dumb to optimize this, thus noinline */
  102. return ((decaf_dword_t)ret - 1) >> WBITS;
  103. }
  104. /** Inverse square root using addition chain. */
  105. static decaf_bool_t gf_isqrt_chk(gf y, const gf x, decaf_bool_t allow_zero) {
  106. gf tmp0, tmp1;
  107. gf_isr((gf_s *)y, (const gf_s *)x);
  108. gf_sqr(tmp0,y);
  109. gf_mul(tmp1,tmp0,x);
  110. return gf_eq(tmp1,ONE) | (allow_zero & gf_eq(tmp1,ZERO));
  111. }
  112. /** Inverse. */
  113. sv gf_invert(gf y, const gf x) {
  114. gf t1, t2;
  115. gf_sqr(t1, x); // o^2
  116. decaf_bool_t ret = gf_isqrt_chk(t2, t1, 0); // +-1/sqrt(o^2) = +-1/o
  117. (void)ret; assert(ret);
  118. gf_sqr(t1, t2);
  119. gf_mul(t2, t1, x); // not direct to y in case of alias.
  120. gf_cpy(y, t2);
  121. }
  122. /**
  123. * Mul by signed int. Not constant-time WRT the sign of that int.
  124. * Just uses a full mul (PERF)
  125. */
  126. static inline void gf_mulw_sgn(gf c, const gf a, int w) {
  127. if (w>0) {
  128. gf_mulw(c, a, w);
  129. } else {
  130. gf_mulw(c, a, -w);
  131. gf_sub(c,ZERO,c);
  132. }
  133. }
  134. /** Return high bit of x = low bit of 2x mod p */
  135. static decaf_word_t hibit(const gf x) {
  136. gf y;
  137. gf_add(y,x,x);
  138. gf_strong_reduce(y);
  139. return -(y->limb[0]&1);
  140. }
  141. /** Return high bit of x = low bit of 2x mod p */
  142. static decaf_word_t lobit(const gf x) {
  143. gf y;
  144. gf_cpy(y,x);
  145. gf_strong_reduce(y);
  146. return -(y->limb[0]&1);
  147. }
  148. /** {extra,accum} - sub +? p
  149. * Must have extra <= 1
  150. */
  151. snv sc_subx(
  152. scalar_t out,
  153. const decaf_word_t accum[SCALAR_LIMBS],
  154. const scalar_t sub,
  155. const scalar_t p,
  156. decaf_word_t extra
  157. ) {
  158. decaf_sdword_t chain = 0;
  159. unsigned int i;
  160. for (i=0; i<SCALAR_LIMBS; i++) {
  161. chain = (chain + accum[i]) - sub->limb[i];
  162. out->limb[i] = chain;
  163. chain >>= WBITS;
  164. }
  165. decaf_bool_t borrow = chain+extra; /* = 0 or -1 */
  166. chain = 0;
  167. for (i=0; i<SCALAR_LIMBS; i++) {
  168. chain = (chain + out->limb[i]) + (p->limb[i] & borrow);
  169. out->limb[i] = chain;
  170. chain >>= WBITS;
  171. }
  172. }
  173. snv sc_montmul (
  174. scalar_t out,
  175. const scalar_t a,
  176. const scalar_t b
  177. ) {
  178. unsigned int i,j;
  179. decaf_word_t accum[SCALAR_LIMBS+1] = {0};
  180. decaf_word_t hi_carry = 0;
  181. for (i=0; i<SCALAR_LIMBS; i++) {
  182. decaf_word_t mand = a->limb[i];
  183. const decaf_word_t *mier = b->limb;
  184. decaf_dword_t chain = 0;
  185. for (j=0; j<SCALAR_LIMBS; j++) {
  186. chain += ((decaf_dword_t)mand)*mier[j] + accum[j];
  187. accum[j] = chain;
  188. chain >>= WBITS;
  189. }
  190. accum[j] = chain;
  191. mand = accum[0] * MONTGOMERY_FACTOR;
  192. chain = 0;
  193. mier = sc_p->limb;
  194. for (j=0; j<SCALAR_LIMBS; j++) {
  195. chain += (decaf_dword_t)mand*mier[j] + accum[j];
  196. if (j) accum[j-1] = chain;
  197. chain >>= WBITS;
  198. }
  199. chain += accum[j];
  200. chain += hi_carry;
  201. accum[j-1] = chain;
  202. hi_carry = chain >> WBITS;
  203. }
  204. sc_subx(out, accum, sc_p, sc_p, hi_carry);
  205. }
  206. void API_NS(scalar_mul) (
  207. scalar_t out,
  208. const scalar_t a,
  209. const scalar_t b
  210. ) {
  211. sc_montmul(out,a,b);
  212. sc_montmul(out,out,sc_r2);
  213. }
  214. /* PERF: could implement this */
  215. siv sc_montsqr (
  216. scalar_t out,
  217. const scalar_t a
  218. ) {
  219. sc_montmul(out,a,a);
  220. }
  221. decaf_bool_t API_NS(scalar_invert) (
  222. scalar_t out,
  223. const scalar_t a
  224. ) {
  225. #if 0
  226. /* FIELD MAGIC. TODO PERF: not updated for 25519 */
  227. scalar_t chain[7], tmp;
  228. sc_montmul(chain[0],a,sc_r2);
  229. unsigned int i,j;
  230. /* Addition chain generated by a not-too-clever SAGE script. First part: compute a^(2^222-1) */
  231. const struct { uint8_t widx, sidx, sct, midx; } muls [] = {
  232. {2,0,1,0}, {3,2,1,0}, {4,3,1,0}, {5,4,1,0}, /* 0x3,7,f,1f */
  233. {1,5,1,0}, {1,1,3,3}, {6,1,9,1}, {1,6,1,0}, {6,1,18,6}, /* a^(2^37-1) */
  234. {1,6,37,6}, {1,1,37,6}, {1,1,111,1} /* a^(2^222-1) */
  235. };
  236. /* Second part: sliding window */
  237. const struct { uint8_t sct, midx; } muls1 [] = {
  238. {6, 5}, {4, 2}, {3, 0}, {2, 0}, {4, 0}, {8, 5},
  239. {2, 0}, {5, 3}, {4, 0}, {4, 0}, {5, 3}, {3, 2},
  240. {3, 2}, {3, 2}, {2, 0}, {3, 0}, {4, 2}, {2, 0},
  241. {4, 3}, {3, 2}, {2, 0}, {3, 2}, {5, 2}, {3, 2},
  242. {2, 0}, {3, 0}, {7, 0}, {5, 0}, {3, 2}, {3, 2},
  243. {4, 2}, {5, 0}, {5, 3}, {3, 0}, {2, 0}, {5, 2},
  244. {4, 3}, {4, 0}, {3, 2}, {7, 4}, {2, 0}, {2, 0},
  245. {2, 0}, {2, 0}, {3, 0}, {5, 2}, {5, 4}, {5, 2},
  246. {5, 0}, {2, 0}, {3, 0}, {3, 0}, {2, 0}, {2, 0},
  247. {2, 0}, {3, 2}, {2, 0}, {3, 2}, {5, 0}, {4, 0},
  248. {6, 4}, {4, 0}
  249. };
  250. for (i=0; i<sizeof(muls)/sizeof(muls[0]); i++) {
  251. sc_montsqr(tmp, chain[muls[i].sidx]);
  252. for (j=1; j<muls[i].sct; j++) {
  253. sc_montsqr(tmp, tmp);
  254. }
  255. sc_montmul(chain[muls[i].widx], tmp, chain[muls[i].midx]);
  256. }
  257. for (i=0; i<sizeof(muls1)/sizeof(muls1[0]); i++) {
  258. sc_montsqr(tmp, chain[1]);
  259. for (j=1; j<muls1[i].sct; j++) {
  260. sc_montsqr(tmp, tmp);
  261. }
  262. sc_montmul(chain[1], tmp, chain[muls1[i].midx]);
  263. }
  264. sc_montmul(out,chain[1],API_NS(scalar_one));
  265. for (i=0; i<sizeof(chain)/sizeof(chain[0]); i++) {
  266. API_NS(scalar_destroy)(chain[i]);
  267. }
  268. return ~API_NS(scalar_eq)(out,API_NS(scalar_zero));
  269. #else
  270. scalar_t b, ma;
  271. int i;
  272. sc_montmul(b,API_NS(scalar_one),sc_r2);
  273. sc_montmul(ma,a,sc_r2);
  274. for (i=SCALAR_BITS-1; i>=0; i--) {
  275. sc_montsqr(b,b);
  276. decaf_word_t w = sc_p->limb[i/WBITS];
  277. if (i<WBITS) {
  278. assert(w >= 2);
  279. w-=2;
  280. }
  281. if (1 & w>>(i%WBITS)) {
  282. sc_montmul(b,b,ma);
  283. }
  284. }
  285. sc_montmul(out,b,API_NS(scalar_one));
  286. API_NS(scalar_destroy)(b);
  287. API_NS(scalar_destroy)(ma);
  288. return ~API_NS(scalar_eq)(out,API_NS(scalar_zero));
  289. #endif
  290. }
  291. void API_NS(scalar_sub) (
  292. scalar_t out,
  293. const scalar_t a,
  294. const scalar_t b
  295. ) {
  296. sc_subx(out, a->limb, b, sc_p, 0);
  297. }
  298. void API_NS(scalar_add) (
  299. scalar_t out,
  300. const scalar_t a,
  301. const scalar_t b
  302. ) {
  303. decaf_dword_t chain = 0;
  304. unsigned int i;
  305. for (i=0; i<SCALAR_LIMBS; i++) {
  306. chain = (chain + a->limb[i]) + b->limb[i];
  307. out->limb[i] = chain;
  308. chain >>= WBITS;
  309. }
  310. sc_subx(out, out->limb, sc_p, sc_p, chain);
  311. }
  312. snv sc_halve (
  313. scalar_t out,
  314. const scalar_t a,
  315. const scalar_t p
  316. ) {
  317. decaf_word_t mask = -(a->limb[0] & 1);
  318. decaf_dword_t chain = 0;
  319. unsigned int i;
  320. for (i=0; i<SCALAR_LIMBS; i++) {
  321. chain = (chain + a->limb[i]) + (p->limb[i] & mask);
  322. out->limb[i] = chain;
  323. chain >>= WBITS;
  324. }
  325. for (i=0; i<SCALAR_LIMBS-1; i++) {
  326. out->limb[i] = out->limb[i]>>1 | out->limb[i+1]<<(WBITS-1);
  327. }
  328. out->limb[i] = out->limb[i]>>1 | chain<<(WBITS-1);
  329. }
  330. void API_NS(scalar_set_unsigned) (
  331. scalar_t out,
  332. decaf_word_t w
  333. ) {
  334. memset(out,0,sizeof(scalar_t));
  335. out->limb[0] = w;
  336. }
  337. decaf_bool_t API_NS(scalar_eq) (
  338. const scalar_t a,
  339. const scalar_t b
  340. ) {
  341. decaf_word_t diff = 0;
  342. unsigned int i;
  343. for (i=0; i<SCALAR_LIMBS; i++) {
  344. diff |= a->limb[i] ^ b->limb[i];
  345. }
  346. return (((decaf_dword_t)diff)-1)>>WBITS;
  347. }
  348. /* *** API begins here *** */
  349. /** identity = (0,1) */
  350. const point_t API_NS(point_identity) = {{{{{0}}},{{{1}}},{{{1}}},{{{0}}}}};
  351. static void gf_encode ( unsigned char ser[SER_BYTES], gf a ) {
  352. gf_serialize(ser, (gf_s *)a);
  353. }
  354. extern const gf SQRT_MINUS_ONE, SQRT_ONE_MINUS_D; /* Intern this? */
  355. static void deisogenize (
  356. gf_s *__restrict__ s,
  357. gf_s *__restrict__ minus_t_over_s,
  358. const point_t p,
  359. decaf_bool_t toggle_hibit_s,
  360. decaf_bool_t toggle_hibit_t_over_s,
  361. decaf_bool_t toggle_rotation
  362. ) {
  363. gf c, d, x, t;
  364. gf_s *b = s, *a = minus_t_over_s;
  365. /* TODO: intern below */
  366. gf_mul ( x, p->x, SQRT_MINUS_ONE);
  367. gf_mul ( t, p->t, SQRT_MINUS_ONE);
  368. gf_sub ( x, ZERO, x );
  369. gf_sub ( t, ZERO, t );
  370. gf DEBUG;
  371. gf_add ( a, p->z, x );
  372. gf_sub ( b, p->z, x );
  373. gf_mul ( c, a, b ); /* "zx" = Z^2 - X^2 */
  374. gf_cpy(DEBUG,c);
  375. gf_mul ( a, p->z, t ); /* "tz" = T*Z */
  376. gf_sqr ( b, a );
  377. gf_mul ( d, b, c ); /* (TZ)^2 * (Z^2-X^2) */
  378. decaf_bool_t ok = gf_isqrt_chk ( b, d, DECAF_TRUE );
  379. (void)ok; assert(ok);
  380. gf_mul ( d, b, a ); /* "osx" = 1 / sqrt(z^2-x^2) */
  381. gf_mul ( a, b, c );
  382. gf_mul ( b, a, d ); /* 1/tz */
  383. decaf_bool_t rotate;
  384. {
  385. gf e;
  386. gf_sqr(e, p->z);
  387. gf_mul(a, e, b); /* z^2 / tz = z/t = 1/xy */
  388. rotate = hibit(a) ^ toggle_rotation;
  389. /*
  390. * Curve25519: cond select between zx * 1/tz or sqrt(1-d); y=-x
  391. * Pink bike shed: frob = zx * 1/tz
  392. */
  393. gf_mul ( a, b, c ); /* this is the case for PinkBikeShed */
  394. cond_sel ( a, a, SQRT_ONE_MINUS_D, rotate );
  395. cond_sel ( x, p->y, x, rotate );
  396. }
  397. gf_mul ( c, a, d ); // new "osx"
  398. gf_mul ( a, c, p->z );
  399. gf_add ( a, a, a ); // 2 * "osx" * Z
  400. decaf_bool_t tg1 = rotate ^ toggle_hibit_t_over_s ^~ hibit(a);
  401. cond_neg ( c, tg1 );
  402. cond_neg ( a, rotate ^ tg1 );
  403. gf_mul ( d, b, p->z );
  404. gf_add ( d, d, c );
  405. gf_mul ( b, d, x ); /* here "x" = y unless rotate */
  406. cond_neg ( b, toggle_hibit_s ^ hibit(b) );
  407. }
  408. void API_NS(point_encode)( unsigned char ser[SER_BYTES], const point_t p ) {
  409. gf s, mtos;
  410. deisogenize(s,mtos,p,0,0,0);
  411. gf_encode ( ser, s );
  412. }
  413. /**
  414. * Deserialize a bool, return TRUE if < p.
  415. */
  416. static decaf_bool_t gf_deser(gf s, const unsigned char ser[SER_BYTES]) {
  417. return gf_deserialize((gf_s *)s, ser);
  418. }
  419. decaf_bool_t API_NS(point_decode) (
  420. point_t p,
  421. const unsigned char ser[SER_BYTES],
  422. decaf_bool_t allow_identity
  423. ) {
  424. gf s, a, b, c, d, e, f;
  425. decaf_bool_t succ = gf_deser(s, ser), zero = gf_eq(s, ZERO);
  426. succ &= allow_identity | ~zero;
  427. succ &= ~hibit(s);
  428. gf_sqr ( a, s );
  429. gf_sub ( f, ONE, a ); /* f = 1-s^2 = 1-as^2 since a=1 */
  430. succ &= ~ gf_eq( f, ZERO );
  431. gf_sqr ( b, f );
  432. gf_mulw_sgn ( c, a, 4-4*EDWARDS_D );
  433. gf_add ( c, c, b ); /* t^2 */
  434. gf_mul ( d, f, s ); /* s(1-s^2) for denoms */
  435. gf_sqr ( e, d );
  436. gf_mul ( b, c, e );
  437. succ &= gf_isqrt_chk ( e, b, DECAF_TRUE ); /* e = 1/(t s (1-s^2)) */
  438. gf_mul ( b, e, d ); /* 1/t */
  439. gf_mul ( d, e, c ); /* d = t / (s(1-s^2)) */
  440. gf_mul ( e, d, f ); /* t/s */
  441. decaf_bool_t negtos = hibit(e);
  442. cond_neg(b, negtos);
  443. cond_neg(d, negtos);
  444. gf_add ( p->z, ONE, a); /* Z = 1+s^2 */
  445. succ &= ~gf_eq( p->z, ZERO ); /* FUTURE: unnecessary? */
  446. gf_mul ( a, p->z, d); /* t(1+s^2) / s(1-s^2) = 2/xy */
  447. succ &= ~lobit(a); /* = ~hibit(a/2), since hibit(x) = lobit(2x) */
  448. gf_mul ( a, f, b ); /* y = (1-s^2) / t */
  449. gf_mul ( p->y, p->z, a ); /* Y = yZ */
  450. gf_add ( p->x, s, s );
  451. gf_mul ( p->t, p->x, a ); /* T = 2s (1-as^2)/t */
  452. /* TODO: integrate */
  453. gf_cpy(a, p->x);
  454. gf_mul(p->x, a, SQRT_MINUS_ONE);
  455. gf_cpy(a, p->t);
  456. gf_mul(p->t, a, SQRT_MINUS_ONE);
  457. p->y->limb[0] -= zero;
  458. /* Curve25519: succ &= ~hibit(p->t); except there is a *i somewhere here */
  459. assert(API_NS(point_valid)(p) | ~succ);
  460. return succ;
  461. }
  462. void API_NS(point_sub) (
  463. point_t p,
  464. const point_t q,
  465. const point_t r
  466. ) {
  467. gf a, b, c, d;
  468. gf_sub_nr ( b, q->y, q->x );
  469. gf_sub_nr ( d, r->y, r->x );
  470. gf_add_nr ( c, r->y, r->x );
  471. gf_mul ( a, c, b );
  472. gf_add_nr ( b, q->y, q->x );
  473. gf_mul ( p->y, d, b );
  474. gf_mul ( b, r->t, q->t );
  475. gf_mulw_sgn ( p->x, b, -2*EDWARDS_D );
  476. gf_add_nr ( b, a, p->y );
  477. gf_sub_nr ( c, p->y, a );
  478. gf_mul ( a, q->z, r->z );
  479. gf_add_nr ( a, a, a );
  480. gf_add_nr ( p->y, a, p->x );
  481. gf_sub_nr ( a, a, p->x );
  482. gf_mul ( p->z, a, p->y );
  483. gf_mul ( p->x, p->y, c );
  484. gf_mul ( p->y, a, b );
  485. gf_mul ( p->t, b, c );
  486. }
  487. void API_NS(point_add) (
  488. point_t p,
  489. const point_t q,
  490. const point_t r
  491. ) {
  492. gf a, b, c, d;
  493. gf_sub_nr ( b, q->y, q->x );
  494. gf_sub_nr ( c, r->y, r->x );
  495. gf_add_nr ( d, r->y, r->x );
  496. gf_mul ( a, c, b );
  497. gf_add_nr ( b, q->y, q->x );
  498. gf_mul ( p->y, d, b );
  499. gf_mul ( b, r->t, q->t );
  500. gf_mulw_sgn ( p->x, b, -2*EDWARDS_D );
  501. gf_add_nr ( b, a, p->y );
  502. gf_sub_nr ( c, p->y, a );
  503. gf_mul ( a, q->z, r->z );
  504. gf_add_nr ( a, a, a );
  505. gf_sub_nr ( p->y, a, p->x );
  506. gf_add_nr ( a, a, p->x );
  507. gf_mul ( p->z, a, p->y );
  508. gf_mul ( p->x, p->y, c );
  509. gf_mul ( p->y, a, b );
  510. gf_mul ( p->t, b, c );
  511. }
  512. snv point_double_internal (
  513. point_t p,
  514. const point_t q,
  515. decaf_bool_t before_double
  516. ) {
  517. gf a, b, c, d;
  518. gf_sqr ( c, q->x );
  519. gf_sqr ( a, q->y );
  520. gf_add_nr ( d, c, a );
  521. gf_add_nr ( p->t, q->y, q->x );
  522. gf_sqr ( b, p->t );
  523. gf_subx_nr ( b, b, d, 3 );
  524. gf_sub_nr ( p->t, a, c );
  525. gf_sqr ( p->x, q->z );
  526. gf_add_nr ( p->z, p->x, p->x );
  527. gf_subx_nr ( a, p->z, p->t, 4 );
  528. gf_mul ( p->x, a, b );
  529. gf_mul ( p->z, p->t, a );
  530. gf_mul ( p->y, p->t, d );
  531. if (!before_double) gf_mul ( p->t, b, d );
  532. }
  533. void API_NS(point_double)(point_t p, const point_t q) {
  534. point_double_internal(p,q,0);
  535. }
  536. void API_NS(point_negate) (
  537. point_t nega,
  538. const point_t a
  539. ) {
  540. gf_sub(nega->x, ZERO, a->x);
  541. gf_cpy(nega->y, a->y);
  542. gf_cpy(nega->z, a->z);
  543. gf_sub(nega->t, ZERO, a->t);
  544. }
  545. siv scalar_decode_short (
  546. scalar_t s,
  547. const unsigned char ser[SER_BYTES],
  548. unsigned int nbytes
  549. ) {
  550. unsigned int i,j,k=0;
  551. for (i=0; i<SCALAR_LIMBS; i++) {
  552. decaf_word_t out = 0;
  553. for (j=0; j<sizeof(decaf_word_t) && k<nbytes; j++,k++) {
  554. out |= ((decaf_word_t)ser[k])<<(8*j);
  555. }
  556. s->limb[i] = out;
  557. }
  558. }
  559. decaf_bool_t API_NS(scalar_decode)(
  560. scalar_t s,
  561. const unsigned char ser[SER_BYTES]
  562. ) {
  563. unsigned int i;
  564. scalar_decode_short(s, ser, SER_BYTES);
  565. decaf_sdword_t accum = 0;
  566. for (i=0; i<SCALAR_LIMBS; i++) {
  567. accum = (accum + s->limb[i] - sc_p->limb[i]) >> WBITS;
  568. }
  569. API_NS(scalar_mul)(s,s,API_NS(scalar_one)); /* ham-handed reduce */
  570. return accum;
  571. }
  572. void API_NS(scalar_destroy) (
  573. scalar_t scalar
  574. ) {
  575. decaf_bzero(scalar, sizeof(scalar_t));
  576. }
  577. static inline void ignore_result ( decaf_bool_t boo ) {
  578. (void)boo;
  579. }
  580. void API_NS(scalar_decode_long)(
  581. scalar_t s,
  582. const unsigned char *ser,
  583. size_t ser_len
  584. ) {
  585. if (ser_len == 0) {
  586. API_NS(scalar_copy)(s, API_NS(scalar_zero));
  587. return;
  588. }
  589. size_t i;
  590. scalar_t t1, t2;
  591. i = ser_len - (ser_len%SER_BYTES);
  592. if (i==ser_len) i -= SER_BYTES;
  593. scalar_decode_short(t1, &ser[i], ser_len-i);
  594. if (ser_len == sizeof(scalar_t)) {
  595. assert(i==0);
  596. /* ham-handed reduce */
  597. API_NS(scalar_mul)(s,t1,API_NS(scalar_one));
  598. API_NS(scalar_destroy)(t1);
  599. return;
  600. }
  601. while (i) {
  602. i -= SER_BYTES;
  603. sc_montmul(t1,t1,sc_r2);
  604. ignore_result( API_NS(scalar_decode)(t2, ser+i) );
  605. API_NS(scalar_add)(t1, t1, t2);
  606. }
  607. API_NS(scalar_copy)(s, t1);
  608. API_NS(scalar_destroy)(t1);
  609. API_NS(scalar_destroy)(t2);
  610. }
  611. void API_NS(scalar_encode)(
  612. unsigned char ser[SER_BYTES],
  613. const scalar_t s
  614. ) {
  615. unsigned int i,j,k=0;
  616. for (i=0; i<SCALAR_LIMBS; i++) {
  617. for (j=0; j<sizeof(decaf_word_t); j++,k++) {
  618. ser[k] = s->limb[i] >> (8*j);
  619. }
  620. }
  621. }
  622. /* Operations on [p]niels */
  623. siv cond_neg_niels (
  624. niels_t n,
  625. decaf_bool_t neg
  626. ) {
  627. cond_swap(n->a, n->b, neg);
  628. cond_neg(n->c, neg);
  629. }
  630. static void pt_to_pniels (
  631. pniels_t b,
  632. const point_t a
  633. ) {
  634. gf_sub ( b->n->a, a->y, a->x );
  635. gf_add ( b->n->b, a->x, a->y );
  636. gf_mulw_sgn ( b->n->c, a->t, -2*EDWARDS_D );
  637. gf_add ( b->z, a->z, a->z );
  638. }
  639. static void pniels_to_pt (
  640. point_t e,
  641. const pniels_t d
  642. ) {
  643. gf eu;
  644. gf_add ( eu, d->n->b, d->n->a );
  645. gf_sub ( e->y, d->n->b, d->n->a );
  646. gf_mul ( e->t, e->y, eu);
  647. gf_mul ( e->x, d->z, e->y );
  648. gf_mul ( e->y, d->z, eu );
  649. gf_sqr ( e->z, d->z );
  650. }
  651. snv niels_to_pt (
  652. point_t e,
  653. const niels_t n
  654. ) {
  655. gf_add ( e->y, n->b, n->a );
  656. gf_sub ( e->x, n->b, n->a );
  657. gf_mul ( e->t, e->y, e->x );
  658. gf_cpy ( e->z, ONE );
  659. }
  660. snv add_niels_to_pt (
  661. point_t d,
  662. const niels_t e,
  663. decaf_bool_t before_double
  664. ) {
  665. gf a, b, c;
  666. gf_sub_nr ( b, d->y, d->x );
  667. gf_mul ( a, e->a, b );
  668. gf_add_nr ( b, d->x, d->y );
  669. gf_mul ( d->y, e->b, b );
  670. gf_mul ( d->x, e->c, d->t );
  671. gf_add_nr ( c, a, d->y );
  672. gf_sub_nr ( b, d->y, a );
  673. gf_sub_nr ( d->y, d->z, d->x );
  674. gf_add_nr ( a, d->x, d->z );
  675. gf_mul ( d->z, a, d->y );
  676. gf_mul ( d->x, d->y, b );
  677. gf_mul ( d->y, a, c );
  678. if (!before_double) gf_mul ( d->t, b, c );
  679. }
  680. snv sub_niels_from_pt (
  681. point_t d,
  682. const niels_t e,
  683. decaf_bool_t before_double
  684. ) {
  685. gf a, b, c;
  686. gf_sub_nr ( b, d->y, d->x );
  687. gf_mul ( a, e->b, b );
  688. gf_add_nr ( b, d->x, d->y );
  689. gf_mul ( d->y, e->a, b );
  690. gf_mul ( d->x, e->c, d->t );
  691. gf_add_nr ( c, a, d->y );
  692. gf_sub_nr ( b, d->y, a );
  693. gf_add_nr ( d->y, d->z, d->x );
  694. gf_sub_nr ( a, d->z, d->x );
  695. gf_mul ( d->z, a, d->y );
  696. gf_mul ( d->x, d->y, b );
  697. gf_mul ( d->y, a, c );
  698. if (!before_double) gf_mul ( d->t, b, c );
  699. }
  700. sv add_pniels_to_pt (
  701. point_t p,
  702. const pniels_t pn,
  703. decaf_bool_t before_double
  704. ) {
  705. gf L0;
  706. gf_mul ( L0, p->z, pn->z );
  707. gf_cpy ( p->z, L0 );
  708. add_niels_to_pt( p, pn->n, before_double );
  709. }
  710. sv sub_pniels_from_pt (
  711. point_t p,
  712. const pniels_t pn,
  713. decaf_bool_t before_double
  714. ) {
  715. gf L0;
  716. gf_mul ( L0, p->z, pn->z );
  717. gf_cpy ( p->z, L0 );
  718. sub_niels_from_pt( p, pn->n, before_double );
  719. }
  720. extern const scalar_t API_NS(point_scalarmul_adjustment);
  721. siv constant_time_lookup_xx (
  722. void *__restrict__ out_,
  723. const void *table_,
  724. decaf_word_t elem_bytes,
  725. decaf_word_t n_table,
  726. decaf_word_t idx
  727. ) {
  728. constant_time_lookup(out_,table_,elem_bytes,n_table,idx);
  729. }
  730. snv prepare_fixed_window(
  731. pniels_t *multiples,
  732. const point_t b,
  733. int ntable
  734. ) {
  735. point_t tmp;
  736. pniels_t pn;
  737. int i;
  738. point_double_internal(tmp, b, 0);
  739. pt_to_pniels(pn, tmp);
  740. pt_to_pniels(multiples[0], b);
  741. API_NS(point_copy)(tmp, b);
  742. for (i=1; i<ntable; i++) {
  743. add_pniels_to_pt(tmp, pn, 0);
  744. pt_to_pniels(multiples[i], tmp);
  745. }
  746. }
  747. void API_NS(point_scalarmul) (
  748. point_t a,
  749. const point_t b,
  750. const scalar_t scalar
  751. ) {
  752. const int WINDOW = DECAF_WINDOW_BITS,
  753. WINDOW_MASK = (1<<WINDOW)-1,
  754. WINDOW_T_MASK = WINDOW_MASK >> 1,
  755. NTABLE = 1<<(WINDOW-1);
  756. scalar_t scalar1x;
  757. API_NS(scalar_add)(scalar1x, scalar, API_NS(point_scalarmul_adjustment));
  758. sc_halve(scalar1x,scalar1x,sc_p);
  759. /* Set up a precomputed table with odd multiples of b. */
  760. pniels_t pn, multiples[NTABLE];
  761. point_t tmp;
  762. prepare_fixed_window(multiples, b, NTABLE);
  763. /* Initialize. */
  764. int i,j,first=1;
  765. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  766. for (; i>=0; i-=WINDOW) {
  767. /* Fetch another block of bits */
  768. decaf_word_t bits = scalar1x->limb[i/WBITS] >> (i%WBITS);
  769. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  770. bits ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  771. }
  772. bits &= WINDOW_MASK;
  773. decaf_word_t inv = (bits>>(WINDOW-1))-1;
  774. bits ^= inv;
  775. /* Add in from table. Compute t only on last iteration. */
  776. constant_time_lookup_xx(pn, multiples, sizeof(pn), NTABLE, bits & WINDOW_T_MASK);
  777. cond_neg_niels(pn->n, inv);
  778. if (first) {
  779. pniels_to_pt(tmp, pn);
  780. first = 0;
  781. } else {
  782. /* Using Hisil et al's lookahead method instead of extensible here
  783. * for no particular reason. Double WINDOW times, but only compute t on
  784. * the last one.
  785. */
  786. for (j=0; j<WINDOW-1; j++)
  787. point_double_internal(tmp, tmp, -1);
  788. point_double_internal(tmp, tmp, 0);
  789. add_pniels_to_pt(tmp, pn, i ? -1 : 0);
  790. }
  791. }
  792. /* Write out the answer */
  793. API_NS(point_copy)(a,tmp);
  794. }
  795. void API_NS(point_double_scalarmul) (
  796. point_t a,
  797. const point_t b,
  798. const scalar_t scalarb,
  799. const point_t c,
  800. const scalar_t scalarc
  801. ) {
  802. const int WINDOW = DECAF_WINDOW_BITS,
  803. WINDOW_MASK = (1<<WINDOW)-1,
  804. WINDOW_T_MASK = WINDOW_MASK >> 1,
  805. NTABLE = 1<<(WINDOW-1);
  806. scalar_t scalar1x, scalar2x;
  807. API_NS(scalar_add)(scalar1x, scalarb, API_NS(point_scalarmul_adjustment));
  808. sc_halve(scalar1x,scalar1x,sc_p);
  809. API_NS(scalar_add)(scalar2x, scalarc, API_NS(point_scalarmul_adjustment));
  810. sc_halve(scalar2x,scalar2x,sc_p);
  811. /* Set up a precomputed table with odd multiples of b. */
  812. pniels_t pn, multiples1[NTABLE], multiples2[NTABLE];
  813. point_t tmp;
  814. prepare_fixed_window(multiples1, b, NTABLE);
  815. prepare_fixed_window(multiples2, c, NTABLE);
  816. /* Initialize. */
  817. int i,j,first=1;
  818. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  819. for (; i>=0; i-=WINDOW) {
  820. /* Fetch another block of bits */
  821. decaf_word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  822. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  823. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  824. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  825. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  826. }
  827. bits1 &= WINDOW_MASK;
  828. bits2 &= WINDOW_MASK;
  829. decaf_word_t inv1 = (bits1>>(WINDOW-1))-1;
  830. decaf_word_t inv2 = (bits2>>(WINDOW-1))-1;
  831. bits1 ^= inv1;
  832. bits2 ^= inv2;
  833. /* Add in from table. Compute t only on last iteration. */
  834. constant_time_lookup_xx(pn, multiples1, sizeof(pn), NTABLE, bits1 & WINDOW_T_MASK);
  835. cond_neg_niels(pn->n, inv1);
  836. if (first) {
  837. pniels_to_pt(tmp, pn);
  838. first = 0;
  839. } else {
  840. /* Using Hisil et al's lookahead method instead of extensible here
  841. * for no particular reason. Double WINDOW times, but only compute t on
  842. * the last one.
  843. */
  844. for (j=0; j<WINDOW-1; j++)
  845. point_double_internal(tmp, tmp, -1);
  846. point_double_internal(tmp, tmp, 0);
  847. add_pniels_to_pt(tmp, pn, 0);
  848. }
  849. constant_time_lookup_xx(pn, multiples2, sizeof(pn), NTABLE, bits2 & WINDOW_T_MASK);
  850. cond_neg_niels(pn->n, inv2);
  851. add_pniels_to_pt(tmp, pn, i?-1:0);
  852. }
  853. /* Write out the answer */
  854. API_NS(point_copy)(a,tmp);
  855. }
  856. decaf_bool_t API_NS(point_eq) ( const point_t p, const point_t q ) {
  857. /* equality mod 2-torsion compares x/y */
  858. gf a, b;
  859. gf_mul ( a, p->y, q->x );
  860. gf_mul ( b, q->y, p->x );
  861. decaf_bool_t succ = gf_eq(a,b);
  862. /* Interesting note: the 4tor would normally be rotation.
  863. * But because of the *i twist, it's actually
  864. * (x,y) <-> (iy,ix)
  865. */
  866. gf_mul ( a, p->y, q->y );
  867. gf_mul ( b, q->x, p->x );
  868. succ |= gf_eq(a,b);
  869. return succ;
  870. }
  871. void API_NS(point_from_hash_nonuniform) (
  872. point_t p,
  873. const unsigned char ser[SER_BYTES]
  874. ) {
  875. // TODO: simplify since we don't return a hint anymore
  876. gf r0,r,a,b,c,dee,D,N,rN,e;
  877. gf_deser(r0,ser);
  878. gf_strong_reduce(r0);
  879. gf_sqr(a,r0);
  880. //gf_sub(r,ZERO,a); /*gf_mulw_sgn(r,a,QUADRATIC_NONRESIDUE);*/
  881. gf_mul(r,a,SQRT_MINUS_ONE);
  882. gf_mulw_sgn(dee,ONE,EDWARDS_D);
  883. gf_mulw_sgn(c,r,EDWARDS_D);
  884. /* Compute D := (dr+a-d)(dr-ar-d) with a=1 */
  885. gf_sub(a,c,dee);
  886. gf_add(a,a,ONE);
  887. decaf_bool_t special_identity_case = gf_eq(a,ZERO);
  888. gf_sub(b,c,r);
  889. gf_sub(b,b,dee);
  890. gf_mul(D,a,b);
  891. /* compute N := (r+1)(a-2d) */
  892. gf_add(a,r,ONE);
  893. gf_mulw_sgn(N,a,1-2*EDWARDS_D);
  894. /* e = +-1/sqrt(+-ND) */
  895. gf_mul(rN,r,N);
  896. gf_mul(a,rN,D);
  897. decaf_bool_t square = gf_isqrt_chk(e,a,DECAF_FALSE);
  898. decaf_bool_t r_is_zero = gf_eq(r,ZERO);
  899. square |= r_is_zero;
  900. square |= special_identity_case;
  901. /* b <- t/s */
  902. cond_sel(c,r0,r,square); /* r? = sqr ? r : 1 */
  903. /* In two steps to avoid overflow on 32-bit arch */
  904. gf_mulw_sgn(a,c,1-2*EDWARDS_D);
  905. gf_mulw_sgn(b,a,1-2*EDWARDS_D);
  906. gf_sub(c,r,ONE);
  907. gf_mul(a,b,c); /* = r? * (r-1) * (a-2d)^2 with a=1 */
  908. gf_mul(b,a,e);
  909. cond_neg(b,~square);
  910. cond_sel(c,r0,ONE,square);
  911. gf_mul(a,e,c);
  912. gf_mul(c,a,D); /* 1/s except for sign. FUTURE: simplify using this. */
  913. gf_sub(b,b,c);
  914. /* a <- s = e * N * (sqr ? r : r0)
  915. * e^2 r N D = 1
  916. * 1/s = 1/(e * N * (sqr ? r : r0)) = e * D * (sqr ? 1 : r0)
  917. */
  918. gf_mul(a,N,r0);
  919. cond_sel(rN,a,rN,square);
  920. gf_mul(a,rN,e);
  921. gf_mul(c,a,b);
  922. /* Normalize/negate */
  923. decaf_bool_t neg_s = hibit(a)^~square;
  924. cond_neg(a,neg_s); /* ends up negative if ~square */
  925. /* b <- t */
  926. cond_sel(b,c,ONE,gf_eq(c,ZERO)); /* 0,0 -> 1,0 */
  927. /* isogenize */
  928. gf_mul(c,a,SQRT_MINUS_ONE);
  929. gf_cpy(a,c); // TODO rename
  930. gf_sqr(c,a); /* s^2 */
  931. gf_add(a,a,a); /* 2s */
  932. gf_add(e,c,ONE);
  933. gf_mul(p->t,a,e); /* 2s(1+s^2) */
  934. gf_mul(p->x,a,b); /* 2st */
  935. gf_sub(a,ONE,c);
  936. gf_mul(p->y,e,a); /* (1+s^2)(1-s^2) */
  937. gf_mul(p->z,a,b); /* (1-s^2)t */
  938. assert(API_NS(point_valid)(p));
  939. }
  940. decaf_bool_t
  941. API_NS(invert_elligator_nonuniform) (
  942. unsigned char recovered_hash[DECAF_255_SER_BYTES],
  943. const point_t p,
  944. uint16_t hint_
  945. ) {
  946. uint64_t hint = hint_;
  947. decaf_bool_t sgn_s = -(hint & 1),
  948. sgn_t_over_s = -(hint>>1 & 1),
  949. sgn_r0 = -(hint>>2 & 1),
  950. sgn_ed_T = -(hint>>3 & 1);
  951. gf a, b, c, d;
  952. deisogenize(a,c,p,sgn_s,sgn_t_over_s,sgn_ed_T);
  953. /* ok, a = s; c = -t/s */
  954. gf_mul(b,c,a);
  955. gf_sub(b,ONE,b); /* t+1 */
  956. gf_sqr(c,a); /* s^2 */
  957. decaf_bool_t is_identity = gf_eq(p->t,ZERO);
  958. { /* identity adjustments */
  959. /* in case of identity, currently c=0, t=0, b=1, will encode to 1 */
  960. /* if hint is 0, -> 0 */
  961. /* if hint is to neg t/s, then go to infinity, effectively set s to 1 */
  962. cond_sel(c,c,ONE,is_identity & sgn_t_over_s);
  963. cond_sel(b,b,ZERO,is_identity & ~sgn_t_over_s & ~sgn_s); /* identity adjust */
  964. }
  965. gf_mulw_sgn(d,c,2*EDWARDS_D-1); /* $d = (2d-a)s^2 */
  966. gf_add(a,d,b); /* num? */
  967. gf_sub(d,d,b); /* den? */
  968. gf_mul(b,a,d); /* n*d */
  969. cond_sel(a,d,a,sgn_s);
  970. gf_mul(d,b,SQRT_MINUS_ONE);
  971. decaf_bool_t succ = gf_isqrt_chk(c,d,DECAF_TRUE);
  972. gf_mul(b,a,c);
  973. cond_neg(b, sgn_r0^hibit(b));
  974. succ &= ~(gf_eq(b,ZERO) & sgn_r0);
  975. succ &= ~(is_identity & sgn_ed_T); /* NB: there are no preimages of rotated identity. */
  976. gf_encode(recovered_hash, b);
  977. /* TODO: deal with overflow flag */
  978. return succ;
  979. }
  980. void API_NS(point_from_hash_uniform) (
  981. point_t pt,
  982. const unsigned char hashed_data[2*SER_BYTES]
  983. ) {
  984. point_t pt2;
  985. API_NS(point_from_hash_nonuniform)(pt,hashed_data);
  986. API_NS(point_from_hash_nonuniform)(pt2,&hashed_data[SER_BYTES]);
  987. API_NS(point_add)(pt,pt,pt2);
  988. }
  989. decaf_bool_t
  990. API_NS(invert_elligator_uniform) (
  991. unsigned char partial_hash[2*SER_BYTES],
  992. const point_t p,
  993. uint16_t hint
  994. ) {
  995. point_t pt2;
  996. API_NS(point_from_hash_nonuniform)(pt2,&partial_hash[SER_BYTES]);
  997. API_NS(point_sub)(pt2,p,pt2);
  998. return API_NS(invert_elligator_nonuniform)(partial_hash,pt2,hint);
  999. }
  1000. decaf_bool_t API_NS(point_valid) (
  1001. const point_t p
  1002. ) {
  1003. gf a,b,c;
  1004. gf_mul(a,p->x,p->y);
  1005. gf_mul(b,p->z,p->t);
  1006. decaf_bool_t out = gf_eq(a,b);
  1007. gf_sqr(a,p->x);
  1008. gf_sqr(b,p->y);
  1009. gf_sub(a,b,a);
  1010. gf_sqr(b,p->t);
  1011. gf_mulw_sgn(c,b,-EDWARDS_D);
  1012. gf_sqr(b,p->z);
  1013. gf_add(b,b,c);
  1014. out &= gf_eq(a,b);
  1015. out &= ~gf_eq(p->z,ZERO);
  1016. return out;
  1017. }
  1018. void API_NS(point_debugging_torque) (
  1019. point_t q,
  1020. const point_t p
  1021. ) {
  1022. #if 0
  1023. gf_sub(q->x,ZERO,p->x);
  1024. gf_sub(q->y,ZERO,p->y);
  1025. gf_cpy(q->z,p->z);
  1026. gf_cpy(q->t,p->t);
  1027. #else
  1028. gf tmp;
  1029. gf_mul(tmp,p->x,SQRT_MINUS_ONE);
  1030. gf_mul(q->x,p->y,SQRT_MINUS_ONE);
  1031. gf_cpy(q->y,tmp);
  1032. gf_cpy(q->z,p->z);
  1033. gf_sub(q->t,ZERO,p->t);
  1034. #endif
  1035. }
  1036. void API_NS(point_debugging_pscale) (
  1037. point_t q,
  1038. const point_t p,
  1039. const uint8_t factor[SER_BYTES]
  1040. ) {
  1041. gf gfac,tmp;
  1042. ignore_result(gf_deser(gfac,factor));
  1043. cond_sel(gfac,gfac,ONE,gf_eq(gfac,ZERO));
  1044. gf_mul(tmp,p->x,gfac);
  1045. gf_cpy(q->x,tmp);
  1046. gf_mul(tmp,p->y,gfac);
  1047. gf_cpy(q->y,tmp);
  1048. gf_mul(tmp,p->z,gfac);
  1049. gf_cpy(q->z,tmp);
  1050. gf_mul(tmp,p->t,gfac);
  1051. gf_cpy(q->t,tmp);
  1052. }
  1053. static void gf_batch_invert (
  1054. gf *__restrict__ out,
  1055. /* const */ gf *in,
  1056. unsigned int n
  1057. ) {
  1058. gf t1;
  1059. assert(n>1);
  1060. gf_cpy(out[1], in[0]);
  1061. int i;
  1062. for (i=1; i<(int) (n-1); i++) {
  1063. gf_mul(out[i+1], out[i], in[i]);
  1064. }
  1065. gf_mul(out[0], out[n-1], in[n-1]);
  1066. gf_invert(out[0], out[0]);
  1067. for (i=n-1; i>0; i--) {
  1068. gf_mul(t1, out[i], out[0]);
  1069. gf_cpy(out[i], t1);
  1070. gf_mul(t1, out[0], in[i]);
  1071. gf_cpy(out[0], t1);
  1072. }
  1073. }
  1074. static void batch_normalize_niels (
  1075. niels_t *table,
  1076. gf *zs,
  1077. gf *zis,
  1078. int n
  1079. ) {
  1080. int i;
  1081. gf product;
  1082. gf_batch_invert(zis, zs, n);
  1083. for (i=0; i<n; i++) {
  1084. gf_mul(product, table[i]->a, zis[i]);
  1085. gf_strong_reduce(product);
  1086. gf_cpy(table[i]->a, product);
  1087. gf_mul(product, table[i]->b, zis[i]);
  1088. gf_strong_reduce(product);
  1089. gf_cpy(table[i]->b, product);
  1090. gf_mul(product, table[i]->c, zis[i]);
  1091. gf_strong_reduce(product);
  1092. gf_cpy(table[i]->c, product);
  1093. }
  1094. }
  1095. void API_NS(precompute) (
  1096. precomputed_s *table,
  1097. const point_t base
  1098. ) {
  1099. const unsigned int n = DECAF_COMBS_N, t = DECAF_COMBS_T, s = DECAF_COMBS_S;
  1100. assert(n*t*s >= SCALAR_BITS);
  1101. point_t working, start, doubles[t-1];
  1102. API_NS(point_copy)(working, base);
  1103. pniels_t pn_tmp;
  1104. gf zs[n<<(t-1)], zis[n<<(t-1)];
  1105. unsigned int i,j,k;
  1106. /* Compute n tables */
  1107. for (i=0; i<n; i++) {
  1108. /* Doubling phase */
  1109. for (j=0; j<t; j++) {
  1110. if (j) API_NS(point_add)(start, start, working);
  1111. else API_NS(point_copy)(start, working);
  1112. if (j==t-1 && i==n-1) break;
  1113. point_double_internal(working, working,0);
  1114. if (j<t-1) API_NS(point_copy)(doubles[j], working);
  1115. for (k=0; k<s-1; k++)
  1116. point_double_internal(working, working, k<s-2);
  1117. }
  1118. /* Gray-code phase */
  1119. for (j=0;; j++) {
  1120. int gray = j ^ (j>>1);
  1121. int idx = (((i+1)<<(t-1))-1) ^ gray;
  1122. pt_to_pniels(pn_tmp, start);
  1123. memcpy(table->table[idx], pn_tmp->n, sizeof(pn_tmp->n));
  1124. gf_cpy(zs[idx], pn_tmp->z);
  1125. if (j >= (1u<<(t-1)) - 1) break;
  1126. int delta = (j+1) ^ ((j+1)>>1) ^ gray;
  1127. for (k=0; delta>1; k++)
  1128. delta >>=1;
  1129. if (gray & (1<<k)) {
  1130. API_NS(point_add)(start, start, doubles[k]);
  1131. } else {
  1132. API_NS(point_sub)(start, start, doubles[k]);
  1133. }
  1134. }
  1135. }
  1136. batch_normalize_niels(table->table,zs,zis,n<<(t-1));
  1137. }
  1138. extern const scalar_t API_NS(precomputed_scalarmul_adjustment);
  1139. siv constant_time_lookup_xx_niels (
  1140. niels_s *__restrict__ ni,
  1141. const niels_t *table,
  1142. int nelts,
  1143. int idx
  1144. ) {
  1145. constant_time_lookup_xx(ni, table, sizeof(niels_s), nelts, idx);
  1146. }
  1147. void API_NS(precomputed_scalarmul) (
  1148. point_t out,
  1149. const precomputed_s *table,
  1150. const scalar_t scalar
  1151. ) {
  1152. int i;
  1153. unsigned j,k;
  1154. const unsigned int n = DECAF_COMBS_N, t = DECAF_COMBS_T, s = DECAF_COMBS_S;
  1155. scalar_t scalar1x;
  1156. API_NS(scalar_add)(scalar1x, scalar, API_NS(precomputed_scalarmul_adjustment));
  1157. sc_halve(scalar1x,scalar1x,sc_p);
  1158. niels_t ni;
  1159. for (i=s-1; i>=0; i--) {
  1160. if (i != (int)s-1) point_double_internal(out,out,0);
  1161. for (j=0; j<n; j++) {
  1162. int tab = 0;
  1163. for (k=0; k<t; k++) {
  1164. unsigned int bit = i + s*(k + j*t);
  1165. if (bit < SCALAR_BITS) {
  1166. tab |= (scalar1x->limb[bit/WBITS] >> (bit%WBITS) & 1) << k;
  1167. }
  1168. }
  1169. decaf_bool_t invert = (tab>>(t-1))-1;
  1170. tab ^= invert;
  1171. tab &= (1<<(t-1)) - 1;
  1172. constant_time_lookup_xx_niels(ni, &table->table[j<<(t-1)], 1<<(t-1), tab);
  1173. cond_neg_niels(ni, invert);
  1174. if ((i!=(int)s-1)||j) {
  1175. add_niels_to_pt(out, ni, j==n-1 && i);
  1176. } else {
  1177. niels_to_pt(out, ni);
  1178. }
  1179. }
  1180. }
  1181. }
  1182. /* TODO: restore Curve25519 Montgomery ladder? */
  1183. decaf_bool_t API_NS(direct_scalarmul) (
  1184. uint8_t scaled[SER_BYTES],
  1185. const uint8_t base[SER_BYTES],
  1186. const scalar_t scalar,
  1187. decaf_bool_t allow_identity,
  1188. decaf_bool_t short_circuit
  1189. ) {
  1190. point_t basep;
  1191. decaf_bool_t succ = API_NS(point_decode)(basep, base, allow_identity);
  1192. if (short_circuit & ~succ) return succ;
  1193. API_NS(point_scalarmul)(basep, basep, scalar);
  1194. API_NS(point_encode)(scaled, basep);
  1195. return succ;
  1196. }
  1197. /**
  1198. * @cond internal
  1199. * Control for variable-time scalar multiply algorithms.
  1200. */
  1201. struct smvt_control {
  1202. int power, addend;
  1203. };
  1204. static int recode_wnaf (
  1205. struct smvt_control *control, /* [nbits/(tableBits+1) + 3] */
  1206. const scalar_t scalar,
  1207. unsigned int tableBits
  1208. ) {
  1209. int current = 0, i, j;
  1210. unsigned int position = 0;
  1211. /* PERF: negate scalar if it's large
  1212. * PERF: this is a pretty simplistic algorithm. I'm sure there's a faster one...
  1213. * PERF MINOR: not technically WNAF, since last digits can be adjacent. Could be rtl.
  1214. */
  1215. for (i=SCALAR_BITS-1; i >= 0; i--) {
  1216. int bit = (scalar->limb[i/WORD_BITS] >> (i%WORD_BITS)) & 1;
  1217. current = 2*current + bit;
  1218. /*
  1219. * Sizing: |current| >= 2^(tableBits+1) -> |current| = 2^0
  1220. * So current loses (tableBits+1) bits every time. It otherwise gains
  1221. * 1 bit per iteration. The number of iterations is
  1222. * (nbits + 2 + tableBits), and an additional control word is added at
  1223. * the end. So the total number of control words is at most
  1224. * ceil((nbits+1) / (tableBits+1)) + 2 = floor((nbits)/(tableBits+1)) + 2.
  1225. * There's also the stopper with power -1, for a total of +3.
  1226. */
  1227. if (current >= (2<<tableBits) || current <= -1 - (2<<tableBits)) {
  1228. int delta = (current + 1) >> 1; /* |delta| < 2^tablebits */
  1229. current = -(current & 1);
  1230. for (j=i; (delta & 1) == 0; j++) {
  1231. delta >>= 1;
  1232. }
  1233. control[position].power = j+1;
  1234. control[position].addend = delta;
  1235. position++;
  1236. assert(position <= SCALAR_BITS/(tableBits+1) + 2);
  1237. }
  1238. }
  1239. if (current) {
  1240. for (j=0; (current & 1) == 0; j++) {
  1241. current >>= 1;
  1242. }
  1243. control[position].power = j;
  1244. control[position].addend = current;
  1245. position++;
  1246. assert(position <= SCALAR_BITS/(tableBits+1) + 2);
  1247. }
  1248. control[position].power = -1;
  1249. control[position].addend = 0;
  1250. return position;
  1251. }
  1252. sv prepare_wnaf_table(
  1253. pniels_t *output,
  1254. const point_t working,
  1255. unsigned int tbits
  1256. ) {
  1257. point_t tmp;
  1258. int i;
  1259. pt_to_pniels(output[0], working);
  1260. if (tbits == 0) return;
  1261. API_NS(point_double)(tmp,working);
  1262. pniels_t twop;
  1263. pt_to_pniels(twop, tmp);
  1264. add_pniels_to_pt(tmp, output[0],0);
  1265. pt_to_pniels(output[1], tmp);
  1266. for (i=2; i < 1<<tbits; i++) {
  1267. add_pniels_to_pt(tmp, twop,0);
  1268. pt_to_pniels(output[i], tmp);
  1269. }
  1270. }
  1271. extern const gf API_NS(precomputed_wnaf_as_fe)[];
  1272. static const niels_t *API_NS(wnaf_base) = (const niels_t *)API_NS(precomputed_wnaf_as_fe);
  1273. const size_t API_NS2(sizeof,precomputed_wnafs) __attribute((visibility("hidden")))
  1274. = sizeof(niels_t)<<DECAF_WNAF_FIXED_TABLE_BITS;
  1275. void API_NS(precompute_wnafs) (
  1276. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1277. const point_t base
  1278. ) __attribute__ ((visibility ("hidden")));
  1279. void API_NS(precompute_wnafs) (
  1280. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1281. const point_t base
  1282. ) {
  1283. pniels_t tmp[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1284. gf zs[1<<DECAF_WNAF_FIXED_TABLE_BITS], zis[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1285. int i;
  1286. prepare_wnaf_table(tmp,base,DECAF_WNAF_FIXED_TABLE_BITS);
  1287. for (i=0; i<1<<DECAF_WNAF_FIXED_TABLE_BITS; i++) {
  1288. memcpy(out[i], tmp[i]->n, sizeof(niels_t));
  1289. gf_cpy(zs[i], tmp[i]->z);
  1290. }
  1291. batch_normalize_niels(out, zs, zis, 1<<DECAF_WNAF_FIXED_TABLE_BITS);
  1292. }
  1293. void API_NS(base_double_scalarmul_non_secret) (
  1294. point_t combo,
  1295. const scalar_t scalar1,
  1296. const point_t base2,
  1297. const scalar_t scalar2
  1298. ) {
  1299. const int table_bits_var = DECAF_WNAF_VAR_TABLE_BITS,
  1300. table_bits_pre = DECAF_WNAF_FIXED_TABLE_BITS;
  1301. struct smvt_control control_var[SCALAR_BITS/(table_bits_var+1)+3];
  1302. struct smvt_control control_pre[SCALAR_BITS/(table_bits_pre+1)+3];
  1303. int ncb_pre = recode_wnaf(control_pre, scalar1, table_bits_pre);
  1304. int ncb_var = recode_wnaf(control_var, scalar2, table_bits_var);
  1305. pniels_t precmp_var[1<<table_bits_var];
  1306. prepare_wnaf_table(precmp_var, base2, table_bits_var);
  1307. int contp=0, contv=0, i = control_var[0].power;
  1308. if (i < 0) {
  1309. API_NS(point_copy)(combo, API_NS(point_identity));
  1310. return;
  1311. } else if (i > control_pre[0].power) {
  1312. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1313. contv++;
  1314. } else if (i == control_pre[0].power && i >=0 ) {
  1315. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1316. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1], i);
  1317. contv++; contp++;
  1318. } else {
  1319. i = control_pre[0].power;
  1320. niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1]);
  1321. contp++;
  1322. }
  1323. for (i--; i >= 0; i--) {
  1324. int cv = (i==control_var[contv].power), cp = (i==control_pre[contp].power);
  1325. point_double_internal(combo,combo,i && !(cv||cp));
  1326. if (cv) {
  1327. assert(control_var[contv].addend);
  1328. if (control_var[contv].addend > 0) {
  1329. add_pniels_to_pt(combo, precmp_var[control_var[contv].addend >> 1], i&&!cp);
  1330. } else {
  1331. sub_pniels_from_pt(combo, precmp_var[(-control_var[contv].addend) >> 1], i&&!cp);
  1332. }
  1333. contv++;
  1334. }
  1335. if (cp) {
  1336. assert(control_pre[contp].addend);
  1337. if (control_pre[contp].addend > 0) {
  1338. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[contp].addend >> 1], i);
  1339. } else {
  1340. sub_niels_from_pt(combo, API_NS(wnaf_base)[(-control_pre[contp].addend) >> 1], i);
  1341. }
  1342. contp++;
  1343. }
  1344. }
  1345. assert(contv == ncb_var); (void)ncb_var;
  1346. assert(contp == ncb_pre); (void)ncb_pre;
  1347. }
  1348. void API_NS(point_destroy) (
  1349. point_t point
  1350. ) {
  1351. decaf_bzero(point, sizeof(point_t));
  1352. }
  1353. void API_NS(precomputed_destroy) (
  1354. precomputed_s *pre
  1355. ) {
  1356. decaf_bzero(pre, API_NS2(sizeof,precomputed_s));
  1357. }