You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1916 lines
52 KiB

  1. /** @brief Decaf high-level functions. */
  2. #define _XOPEN_SOURCE 600 /* for posix_memalign */
  3. #define __STDC_WANT_LIB_EXT1__ 1 /* for memset_s */
  4. #include <string.h>
  5. #include "word.h"
  6. #include "field.h"
  7. #include <decaf.h>
  8. /* Template stuff */
  9. #define API_NS(_id) $(c_ns)_##_id
  10. #define SCALAR_BITS $(C_NS)_SCALAR_BITS
  11. #define SCALAR_LIMBS $(C_NS)_SCALAR_LIMBS
  12. #define scalar_t API_NS(scalar_t)
  13. #define point_t API_NS(point_t)
  14. #define precomputed_s API_NS(precomputed_s)
  15. #define IMAGINE_TWIST $(imagine_twist)
  16. #define COFACTOR $(cofactor)
  17. /** Comb config: number of combs, n, t, s. */
  18. #define COMBS_N $(combs.n)
  19. #define COMBS_T $(combs.t)
  20. #define COMBS_S $(combs.s)
  21. #define DECAF_WINDOW_BITS $(window_bits)
  22. #define DECAF_WNAF_FIXED_TABLE_BITS $(wnaf.fixed)
  23. #define DECAF_WNAF_VAR_TABLE_BITS $(wnaf.var)
  24. static const int EDWARDS_D = $(d);
  25. static const scalar_t sc_p = {{{
  26. $(ser(q,64,"SC_LIMB"))
  27. }}}, sc_r2 = {{{
  28. $(ser(((2**128)**((scalar_bits+63)/64))%q,64,"SC_LIMB"))
  29. }}}, point_scalarmul_adjustment = {{{
  30. $(ser((2**(scalar_bits-1+window_bits - ((scalar_bits-1)%window_bits)) - 1) % q,64,"SC_LIMB"))
  31. }}}, precomputed_scalarmul_adjustment = {{{
  32. $(ser((2**(combs.n*combs.t*combs.s) - 1) % q,64,"SC_LIMB"))
  33. }}};
  34. static const decaf_word_t MONTGOMERY_FACTOR = (decaf_word_t)0x$("%x" % pow(-q,2**64-1,2**64))ull;
  35. const uint8_t API_NS(x_base_point)[SER_BYTES] /* TODO */ = {
  36. $(ser(mont_base,8))
  37. };
  38. #if COFACTOR==8
  39. static const gf SQRT_ONE_MINUS_D = {FIELD_LITERAL(
  40. $(sqrt_one_minus_d)
  41. )};
  42. #endif
  43. /* End of template stuff */
  44. #if (COFACTOR == 8) && !IMAGINE_TWIST
  45. /* FUTURE: Curve41417 doesn't have these properties. */
  46. #error "Currently require IMAGINE_TWIST (and thus p=5 mod 8) for cofactor 8"
  47. #endif
  48. #if IMAGINE_TWIST && (P_MOD_8 != 5)
  49. #error "Cannot use IMAGINE_TWIST except for p == 5 mod 8"
  50. #endif
  51. #if (COFACTOR != 8) && (COFACTOR != 4)
  52. #error "COFACTOR must be 4 or 8"
  53. #endif
  54. #if IMAGINE_TWIST
  55. extern const gf SQRT_MINUS_ONE;
  56. #endif
  57. #define WBITS DECAF_WORD_BITS /* NB this may be different from ARCH_WORD_BITS */
  58. const scalar_t API_NS(scalar_one) = {{{1}}}, API_NS(scalar_zero) = {{{0}}};
  59. extern const point_t API_NS(point_base);
  60. /* Projective Niels coordinates */
  61. typedef struct { gf a, b, c; } niels_s, niels_t[1];
  62. typedef struct { niels_t n; gf z; } __attribute__((aligned(sizeof(big_register_t))))
  63. pniels_s, pniels_t[1];
  64. /* Precomputed base */
  65. struct precomputed_s { niels_t table [COMBS_N<<(COMBS_T-1)]; };
  66. extern const gf API_NS(precomputed_base_as_fe)[];
  67. const precomputed_s *API_NS(precomputed_base) =
  68. (const precomputed_s *) &API_NS(precomputed_base_as_fe);
  69. const size_t API_NS(sizeof_precomputed_s) = sizeof(precomputed_s);
  70. const size_t API_NS(alignof_precomputed_s) = sizeof(big_register_t);
  71. #define FOR_LIMB(i,op) { unsigned int i=0; for (i=0; i<NLIMBS; i++) { op; }}
  72. #define FOR_LIMB_U(i,op) { unsigned int i=0; UNROLL for (i=0; i<NLIMBS; i++) { op; }}
  73. /* The plan on booleans:
  74. *
  75. * The external interface uses decaf_bool_t, but this might be a different
  76. * size than our particular arch's word_t (and thus mask_t). Also, the caller
  77. * isn't guaranteed to pass it as nonzero. So bool_to_mask converts word sizes
  78. * and checks nonzero.
  79. *
  80. * On the flip side, mask_t is always -1 or 0, but it might be a different size
  81. * than decaf_bool_t.
  82. *
  83. * On the third hand, we have success vs boolean types, but that's handled in
  84. * common.h: it converts between decaf_bool_t and decaf_error_t.
  85. */
  86. static INLINE decaf_bool_t mask_to_bool (mask_t m) {
  87. return (decaf_sword_t)(sword_t)m;
  88. }
  89. static INLINE mask_t bool_to_mask (decaf_bool_t m) {
  90. /* On most arches this will be optimized to a simple cast. */
  91. mask_t ret = 0;
  92. unsigned int limit = sizeof(decaf_bool_t)/sizeof(mask_t);
  93. if (limit < 1) limit = 1;
  94. for (unsigned int i=0; i<limit; i++) {
  95. ret |= ~ word_is_zero(m >> (i*8*sizeof(word_t)));
  96. }
  97. return ret;
  98. }
  99. /** Constant time, x = is_z ? z : y */
  100. static INLINE void
  101. cond_sel(gf x, const gf y, const gf z, mask_t is_z) {
  102. constant_time_select(x,y,z,sizeof(gf),is_z,0);
  103. }
  104. /** Constant time, if (neg) x=-x; */
  105. static void
  106. cond_neg(gf x, mask_t neg) {
  107. gf y;
  108. gf_sub(y,ZERO,x);
  109. cond_sel(x,x,y,neg);
  110. }
  111. /** Constant time, if (swap) (x,y) = (y,x); */
  112. static INLINE void
  113. cond_swap(gf x, gf_s *__restrict__ y, mask_t swap) {
  114. constant_time_cond_swap(x,y,sizeof(gf_s),swap);
  115. }
  116. /** Inverse square root using addition chain. */
  117. static mask_t
  118. gf_isqrt_chk(gf y, const gf x, mask_t allow_zero) {
  119. gf tmp0, tmp1;
  120. gf_isr((gf_s *)y, (const gf_s *)x);
  121. gf_sqr(tmp0,y);
  122. gf_mul(tmp1,tmp0,x);
  123. return gf_eq(tmp1,ONE) | (allow_zero & gf_eq(tmp1,ZERO));
  124. }
  125. /** Inverse. */
  126. static void
  127. gf_invert(gf y, const gf x) {
  128. gf t1, t2;
  129. gf_sqr(t1, x); // o^2
  130. mask_t ret = gf_isqrt_chk(t2, t1, 0); // +-1/sqrt(o^2) = +-1/o
  131. (void)ret; assert(ret);
  132. gf_sqr(t1, t2);
  133. gf_mul(t2, t1, x); // not direct to y in case of alias.
  134. gf_copy(y, t2);
  135. }
  136. /** Mul by signed int. Not constant-time WRT the sign of that int. */
  137. static INLINE void
  138. gf_mulw_sgn(gf c, const gf a, int32_t w) {
  139. if (w>0) {
  140. gf_mulw(c, a, w);
  141. } else {
  142. gf_mulw(c, a, -w);
  143. gf_sub(c,ZERO,c);
  144. }
  145. }
  146. /** Return high bit of x = low bit of 2x mod p */
  147. static mask_t hibit(const gf x) {
  148. gf y;
  149. gf_add(y,x,x);
  150. gf_strong_reduce(y);
  151. return -(y->limb[0]&1);
  152. }
  153. #if COFACTOR==8
  154. /** Return high bit of x = low bit of 2x mod p */
  155. static mask_t lobit(const gf x) {
  156. gf y;
  157. gf_copy(y,x);
  158. gf_strong_reduce(y);
  159. return -(y->limb[0]&1);
  160. }
  161. #endif
  162. /** {extra,accum} - sub +? p
  163. * Must have extra <= 1
  164. */
  165. static NOINLINE void
  166. sc_subx(
  167. scalar_t out,
  168. const decaf_word_t accum[SCALAR_LIMBS],
  169. const scalar_t sub,
  170. const scalar_t p,
  171. decaf_word_t extra
  172. ) {
  173. decaf_dsword_t chain = 0;
  174. unsigned int i;
  175. for (i=0; i<SCALAR_LIMBS; i++) {
  176. chain = (chain + accum[i]) - sub->limb[i];
  177. out->limb[i] = chain;
  178. chain >>= WBITS;
  179. }
  180. decaf_word_t borrow = chain+extra; /* = 0 or -1 */
  181. chain = 0;
  182. for (i=0; i<SCALAR_LIMBS; i++) {
  183. chain = (chain + out->limb[i]) + (p->limb[i] & borrow);
  184. out->limb[i] = chain;
  185. chain >>= WBITS;
  186. }
  187. }
  188. static NOINLINE void
  189. sc_montmul (
  190. scalar_t out,
  191. const scalar_t a,
  192. const scalar_t b
  193. ) {
  194. unsigned int i,j;
  195. decaf_word_t accum[SCALAR_LIMBS+1] = {0};
  196. decaf_word_t hi_carry = 0;
  197. for (i=0; i<SCALAR_LIMBS; i++) {
  198. decaf_word_t mand = a->limb[i];
  199. const decaf_word_t *mier = b->limb;
  200. decaf_dword_t chain = 0;
  201. for (j=0; j<SCALAR_LIMBS; j++) {
  202. chain += ((decaf_dword_t)mand)*mier[j] + accum[j];
  203. accum[j] = chain;
  204. chain >>= WBITS;
  205. }
  206. accum[j] = chain;
  207. mand = accum[0] * MONTGOMERY_FACTOR;
  208. chain = 0;
  209. mier = sc_p->limb;
  210. for (j=0; j<SCALAR_LIMBS; j++) {
  211. chain += (decaf_dword_t)mand*mier[j] + accum[j];
  212. if (j) accum[j-1] = chain;
  213. chain >>= WBITS;
  214. }
  215. chain += accum[j];
  216. chain += hi_carry;
  217. accum[j-1] = chain;
  218. hi_carry = chain >> WBITS;
  219. }
  220. sc_subx(out, accum, sc_p, sc_p, hi_carry);
  221. }
  222. void API_NS(scalar_mul) (
  223. scalar_t out,
  224. const scalar_t a,
  225. const scalar_t b
  226. ) {
  227. sc_montmul(out,a,b);
  228. sc_montmul(out,out,sc_r2);
  229. }
  230. /* PERF: could implement this */
  231. static INLINE void sc_montsqr (scalar_t out, const scalar_t a) {
  232. sc_montmul(out,a,a);
  233. }
  234. decaf_error_t API_NS(scalar_invert) (
  235. scalar_t out,
  236. const scalar_t a
  237. ) {
  238. /* Fermat's little theorem, sliding window.
  239. * Sliding window is fine here because the modulus isn't secret.
  240. */
  241. const int SCALAR_WINDOW_BITS = 3;
  242. scalar_t precmp[1<<SCALAR_WINDOW_BITS];
  243. const int LAST = (1<<SCALAR_WINDOW_BITS)-1;
  244. /* Precompute precmp = [a^1,a^3,...] */
  245. sc_montmul(precmp[0],a,sc_r2);
  246. if (LAST > 0) sc_montmul(precmp[LAST],precmp[0],precmp[0]);
  247. int i;
  248. for (i=1; i<=LAST; i++) {
  249. sc_montmul(precmp[i],precmp[i-1],precmp[LAST]);
  250. }
  251. /* Sliding window */
  252. unsigned residue = 0, trailing = 0, started = 0;
  253. for (i=SCALAR_BITS-1; i>=-SCALAR_WINDOW_BITS; i--) {
  254. if (started) sc_montsqr(out,out);
  255. decaf_word_t w = (i>=0) ? sc_p->limb[i/WBITS] : 0;
  256. if (i >= 0 && i<WBITS) {
  257. assert(w >= 2);
  258. w-=2;
  259. }
  260. residue = (residue<<1) | ((w>>(i%WBITS))&1);
  261. if (residue>>SCALAR_WINDOW_BITS != 0) {
  262. assert(trailing == 0);
  263. trailing = residue;
  264. residue = 0;
  265. }
  266. if (trailing > 0 && (trailing & ((1<<SCALAR_WINDOW_BITS)-1)) == 0) {
  267. if (started) {
  268. sc_montmul(out,out,precmp[trailing>>(SCALAR_WINDOW_BITS+1)]);
  269. } else {
  270. API_NS(scalar_copy)(out,precmp[trailing>>(SCALAR_WINDOW_BITS+1)]);
  271. started = 1;
  272. }
  273. trailing = 0;
  274. }
  275. trailing <<= 1;
  276. }
  277. assert(residue==0);
  278. assert(trailing==0);
  279. /* Demontgomerize */
  280. sc_montmul(out,out,API_NS(scalar_one));
  281. decaf_bzero(precmp, sizeof(precmp));
  282. return decaf_succeed_if(~API_NS(scalar_eq)(out,API_NS(scalar_zero)));
  283. }
  284. void API_NS(scalar_sub) (
  285. scalar_t out,
  286. const scalar_t a,
  287. const scalar_t b
  288. ) {
  289. sc_subx(out, a->limb, b, sc_p, 0);
  290. }
  291. void API_NS(scalar_add) (
  292. scalar_t out,
  293. const scalar_t a,
  294. const scalar_t b
  295. ) {
  296. decaf_dword_t chain = 0;
  297. unsigned int i;
  298. for (i=0; i<SCALAR_LIMBS; i++) {
  299. chain = (chain + a->limb[i]) + b->limb[i];
  300. out->limb[i] = chain;
  301. chain >>= WBITS;
  302. }
  303. sc_subx(out, out->limb, sc_p, sc_p, chain);
  304. }
  305. static NOINLINE void
  306. sc_halve (
  307. scalar_t out,
  308. const scalar_t a,
  309. const scalar_t p
  310. ) {
  311. decaf_word_t mask = -(a->limb[0] & 1);
  312. decaf_dword_t chain = 0;
  313. unsigned int i;
  314. for (i=0; i<SCALAR_LIMBS; i++) {
  315. chain = (chain + a->limb[i]) + (p->limb[i] & mask);
  316. out->limb[i] = chain;
  317. chain >>= WBITS;
  318. }
  319. for (i=0; i<SCALAR_LIMBS-1; i++) {
  320. out->limb[i] = out->limb[i]>>1 | out->limb[i+1]<<(WBITS-1);
  321. }
  322. out->limb[i] = out->limb[i]>>1 | chain<<(WBITS-1);
  323. }
  324. void
  325. API_NS(scalar_set_unsigned) (
  326. scalar_t out,
  327. uint64_t w
  328. ) {
  329. memset(out,0,sizeof(scalar_t));
  330. unsigned int i = 0;
  331. for (; i<sizeof(uint64_t)/sizeof(decaf_word_t); i++) {
  332. out->limb[i] = w;
  333. w >>= (sizeof(uint64_t) > sizeof(decaf_word_t)) ? 8*sizeof(decaf_word_t) : 0;
  334. }
  335. }
  336. decaf_bool_t
  337. API_NS(scalar_eq) (
  338. const scalar_t a,
  339. const scalar_t b
  340. ) {
  341. decaf_word_t diff = 0;
  342. unsigned int i;
  343. for (i=0; i<SCALAR_LIMBS; i++) {
  344. diff |= a->limb[i] ^ b->limb[i];
  345. }
  346. return mask_to_bool(word_is_zero(diff));
  347. }
  348. /** identity = (0,1) */
  349. const point_t API_NS(point_identity) = {{{{{0}}},{{{1}}},{{{1}}},{{{0}}}}};
  350. static void
  351. deisogenize (
  352. gf_s *__restrict__ s,
  353. gf_s *__restrict__ minus_t_over_s,
  354. const point_t p,
  355. mask_t toggle_hibit_s,
  356. mask_t toggle_hibit_t_over_s,
  357. mask_t toggle_rotation
  358. ) {
  359. #if COFACTOR == 4 && !IMAGINE_TWIST
  360. (void) toggle_rotation;
  361. gf b, d;
  362. gf_s *c = s, *a = minus_t_over_s;
  363. gf_mulw_sgn(a, p->y, 1-EDWARDS_D);
  364. gf_mul(c, a, p->t); /* -dYT, with EDWARDS_D = d-1 */
  365. gf_mul(a, p->x, p->z);
  366. gf_sub(d, c, a); /* aXZ-dYT with a=-1 */
  367. gf_add(a, p->z, p->y);
  368. gf_sub(b, p->z, p->y);
  369. gf_mul(c, b, a);
  370. gf_mulw_sgn(b, c, -EDWARDS_D); /* (a-d)(Z+Y)(Z-Y) */
  371. mask_t ok = gf_isqrt_chk ( a, b, DECAF_TRUE); /* r in the paper */
  372. (void)ok; assert(ok);
  373. gf_mulw_sgn (b, a, -EDWARDS_D); /* u in the paper */
  374. gf_mul(c,a,d); /* r(aZX-dYT) */
  375. gf_mul(a,b,p->z); /* uZ */
  376. gf_add(a,a,a); /* 2uZ */
  377. cond_neg(c, toggle_hibit_t_over_s ^ ~hibit(a)); /* u <- -u if negative. */
  378. cond_neg(a, toggle_hibit_t_over_s ^ ~hibit(a)); /* t/s <-? -t/s */
  379. gf_add(d,c,p->y);
  380. gf_mul(s,b,d);
  381. cond_neg(s, toggle_hibit_s ^ hibit(s));
  382. #else
  383. /* More complicated because of rotation */
  384. /* MAGIC This code is wrong for certain non-Curve25519 curves;
  385. * check if it's because of Cofactor==8 or IMAGINE_ROTATION */
  386. gf c, d;
  387. gf_s *b = s, *a = minus_t_over_s;
  388. #if IMAGINE_TWIST
  389. gf x, t;
  390. gf_mul ( x, p->x, SQRT_MINUS_ONE);
  391. gf_mul ( t, p->t, SQRT_MINUS_ONE);
  392. gf_sub ( x, ZERO, x );
  393. gf_sub ( t, ZERO, t );
  394. gf_add ( a, p->z, x );
  395. gf_sub ( b, p->z, x );
  396. gf_mul ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 - X^2 */
  397. #else
  398. const gf_s *x = p->x, *t = p->t;
  399. /* Won't hit the cond_sel below because COFACTOR==8 requires IMAGINE_TWIST for now. */
  400. gf_sqr ( a, p->z );
  401. gf_sqr ( b, p->x );
  402. gf_add ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 + X^2 */
  403. #endif
  404. gf_mul ( a, p->z, t ); /* "tz" = T*Z */
  405. gf_sqr ( b, a );
  406. gf_mul ( d, b, c ); /* (TZ)^2 * (Z^2-aX^2) */
  407. mask_t ok = gf_isqrt_chk ( b, d, DECAF_TRUE );
  408. (void)ok; assert(ok);
  409. gf_mul ( d, b, a ); /* "osx" = 1 / sqrt(z^2-ax^2) */
  410. gf_mul ( a, b, c );
  411. gf_mul ( b, a, d ); /* 1/tz */
  412. mask_t rotate;
  413. #if (COFACTOR == 8)
  414. gf e;
  415. gf_sqr(e, p->z);
  416. gf_mul(a, e, b); /* z^2 / tz = z/t = 1/xy */
  417. rotate = hibit(a) ^ toggle_rotation;
  418. /* Curve25519: cond select between zx * 1/tz or sqrt(1-d); y=-x */
  419. gf_mul ( a, b, c );
  420. cond_sel ( a, a, SQRT_ONE_MINUS_D, rotate );
  421. cond_sel ( x, p->y, x, rotate );
  422. #else
  423. (void)toggle_rotation;
  424. rotate = 0;
  425. #endif
  426. gf_mul ( c, a, d ); // new "osx"
  427. gf_mul ( a, c, p->z );
  428. gf_add ( a, a, a ); // 2 * "osx" * Z
  429. mask_t tg1 = rotate ^ toggle_hibit_t_over_s ^~ hibit(a);
  430. cond_neg ( c, tg1 );
  431. cond_neg ( a, rotate ^ tg1 );
  432. gf_mul ( d, b, p->z );
  433. gf_add ( d, d, c );
  434. gf_mul ( b, d, x ); /* here "x" = y unless rotate */
  435. cond_neg ( b, toggle_hibit_s ^ hibit(b) );
  436. #endif
  437. }
  438. void API_NS(point_encode)( unsigned char ser[SER_BYTES], const point_t p ) {
  439. gf s, mtos;
  440. deisogenize(s,mtos,p,0,0,0);
  441. gf_serialize ( ser, s );
  442. }
  443. decaf_error_t API_NS(point_decode) (
  444. point_t p,
  445. const unsigned char ser[SER_BYTES],
  446. decaf_bool_t allow_identity
  447. ) {
  448. gf s, a, b, c, d, e, f;
  449. mask_t succ = gf_deserialize(s, ser);
  450. mask_t zero = gf_eq(s, ZERO);
  451. succ &= bool_to_mask(allow_identity) | ~zero;
  452. succ &= ~hibit(s);
  453. gf_sqr ( a, s );
  454. #if IMAGINE_TWIST
  455. gf_sub ( f, ONE, a ); /* f = 1-as^2 = 1-s^2*/
  456. #else
  457. gf_add ( f, ONE, a ); /* f = 1-as^2 = 1+s^2 */
  458. #endif
  459. succ &= ~ gf_eq( f, ZERO );
  460. gf_sqr ( b, f );
  461. gf_mulw_sgn ( c, a, 4*IMAGINE_TWIST-4*EDWARDS_D );
  462. gf_add ( c, c, b ); /* t^2 */
  463. gf_mul ( d, f, s ); /* s(1-as^2) for denoms */
  464. gf_sqr ( e, d );
  465. gf_mul ( b, c, e );
  466. succ &= gf_isqrt_chk ( e, b, DECAF_TRUE ); /* e = 1/(t s (1-as^2)) */
  467. gf_mul ( b, e, d ); /* 1/t */
  468. gf_mul ( d, e, c ); /* d = t / (s(1-as^2)) */
  469. gf_mul ( e, d, f ); /* t/s */
  470. mask_t negtos = hibit(e);
  471. cond_neg(b, negtos);
  472. cond_neg(d, negtos);
  473. #if IMAGINE_TWIST
  474. gf_add ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  475. #else
  476. gf_sub ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  477. #endif
  478. #if COFACTOR == 8
  479. gf_mul ( a, p->z, d); /* t(1+s^2) / s(1-s^2) = 2/xy */
  480. succ &= ~lobit(a); /* = ~hibit(a/2), since hibit(x) = lobit(2x) */
  481. #endif
  482. gf_mul ( a, f, b ); /* y = (1-s^2) / t */
  483. gf_mul ( p->y, p->z, a ); /* Y = yZ */
  484. #if IMAGINE_TWIST
  485. gf_add ( b, s, s );
  486. gf_mul(p->x, b, SQRT_MINUS_ONE); /* Curve25519 */
  487. #else
  488. gf_add ( p->x, s, s );
  489. #endif
  490. gf_mul ( p->t, p->x, a ); /* T = 2s (1-as^2)/t */
  491. p->y->limb[0] -= zero;
  492. assert(API_NS(point_valid)(p) | ~succ);
  493. return decaf_succeed_if(mask_to_bool(succ));
  494. }
  495. #if IMAGINE_TWIST
  496. #define TWISTED_D (-(EDWARDS_D))
  497. #else
  498. #define TWISTED_D ((EDWARDS_D)-1)
  499. #endif
  500. #if TWISTED_D < 0
  501. #define EFF_D (-(TWISTED_D))
  502. #define NEG_D 1
  503. #else
  504. #define EFF_D TWISTED_D
  505. #define NEG_D 0
  506. #endif
  507. void API_NS(point_sub) (
  508. point_t p,
  509. const point_t q,
  510. const point_t r
  511. ) {
  512. gf a, b, c, d;
  513. gf_sub_nr ( b, q->y, q->x );
  514. gf_sub_nr ( d, r->y, r->x );
  515. gf_add_nr ( c, r->y, r->x );
  516. gf_mul ( a, c, b );
  517. gf_add_nr ( b, q->y, q->x );
  518. gf_mul ( p->y, d, b );
  519. gf_mul ( b, r->t, q->t );
  520. gf_mulw_sgn ( p->x, b, 2*EFF_D );
  521. gf_add_nr ( b, a, p->y );
  522. gf_sub_nr ( c, p->y, a );
  523. gf_mul ( a, q->z, r->z );
  524. gf_add_nr ( a, a, a );
  525. #if NEG_D
  526. gf_sub_nr ( p->y, a, p->x );
  527. gf_add_nr ( a, a, p->x );
  528. #else
  529. gf_add_nr ( p->y, a, p->x );
  530. gf_sub_nr ( a, a, p->x );
  531. #endif
  532. gf_mul ( p->z, a, p->y );
  533. gf_mul ( p->x, p->y, c );
  534. gf_mul ( p->y, a, b );
  535. gf_mul ( p->t, b, c );
  536. }
  537. void API_NS(point_add) (
  538. point_t p,
  539. const point_t q,
  540. const point_t r
  541. ) {
  542. gf a, b, c, d;
  543. gf_sub_nr ( b, q->y, q->x );
  544. gf_sub_nr ( c, r->y, r->x );
  545. gf_add_nr ( d, r->y, r->x );
  546. gf_mul ( a, c, b );
  547. gf_add_nr ( b, q->y, q->x );
  548. gf_mul ( p->y, d, b );
  549. gf_mul ( b, r->t, q->t );
  550. gf_mulw_sgn ( p->x, b, 2*EFF_D );
  551. gf_add_nr ( b, a, p->y );
  552. gf_sub_nr ( c, p->y, a );
  553. gf_mul ( a, q->z, r->z );
  554. gf_add_nr ( a, a, a );
  555. #if NEG_D
  556. gf_add_nr ( p->y, a, p->x );
  557. gf_sub_nr ( a, a, p->x );
  558. #else
  559. gf_sub_nr ( p->y, a, p->x );
  560. gf_add_nr ( a, a, p->x );
  561. #endif
  562. gf_mul ( p->z, a, p->y );
  563. gf_mul ( p->x, p->y, c );
  564. gf_mul ( p->y, a, b );
  565. gf_mul ( p->t, b, c );
  566. }
  567. static NOINLINE void
  568. point_double_internal (
  569. point_t p,
  570. const point_t q,
  571. int before_double
  572. ) {
  573. gf a, b, c, d;
  574. gf_sqr ( c, q->x );
  575. gf_sqr ( a, q->y );
  576. gf_add_nr ( d, c, a );
  577. gf_add_nr ( p->t, q->y, q->x );
  578. gf_sqr ( b, p->t );
  579. gf_subx_nr ( b, b, d, 3 );
  580. gf_sub_nr ( p->t, a, c );
  581. gf_sqr ( p->x, q->z );
  582. gf_add_nr ( p->z, p->x, p->x );
  583. gf_subx_nr ( a, p->z, p->t, 4 );
  584. gf_mul ( p->x, a, b );
  585. gf_mul ( p->z, p->t, a );
  586. gf_mul ( p->y, p->t, d );
  587. if (!before_double) gf_mul ( p->t, b, d );
  588. }
  589. void API_NS(point_double)(point_t p, const point_t q) {
  590. point_double_internal(p,q,0);
  591. }
  592. void API_NS(point_negate) (
  593. point_t nega,
  594. const point_t a
  595. ) {
  596. gf_sub(nega->x, ZERO, a->x);
  597. gf_copy(nega->y, a->y);
  598. gf_copy(nega->z, a->z);
  599. gf_sub(nega->t, ZERO, a->t);
  600. }
  601. static INLINE void
  602. scalar_decode_short (
  603. scalar_t s,
  604. const unsigned char ser[SER_BYTES],
  605. unsigned int nbytes
  606. ) {
  607. unsigned int i,j,k=0;
  608. for (i=0; i<SCALAR_LIMBS; i++) {
  609. decaf_word_t out = 0;
  610. for (j=0; j<sizeof(decaf_word_t) && k<nbytes; j++,k++) {
  611. out |= ((decaf_word_t)ser[k])<<(8*j);
  612. }
  613. s->limb[i] = out;
  614. }
  615. }
  616. decaf_error_t API_NS(scalar_decode)(
  617. scalar_t s,
  618. const unsigned char ser[SER_BYTES]
  619. ) {
  620. unsigned int i;
  621. scalar_decode_short(s, ser, SER_BYTES);
  622. decaf_dsword_t accum = 0;
  623. for (i=0; i<SCALAR_LIMBS; i++) {
  624. accum = (accum + s->limb[i] - sc_p->limb[i]) >> WBITS;
  625. }
  626. /* Here accum == 0 or -1 */
  627. API_NS(scalar_mul)(s,s,API_NS(scalar_one)); /* ham-handed reduce */
  628. return decaf_succeed_if(~word_is_zero(accum));
  629. }
  630. void API_NS(scalar_destroy) (
  631. scalar_t scalar
  632. ) {
  633. decaf_bzero(scalar, sizeof(scalar_t));
  634. }
  635. static INLINE void ignore_result ( decaf_bool_t boo ) {
  636. (void)boo;
  637. }
  638. void API_NS(scalar_decode_long)(
  639. scalar_t s,
  640. const unsigned char *ser,
  641. size_t ser_len
  642. ) {
  643. if (ser_len == 0) {
  644. API_NS(scalar_copy)(s, API_NS(scalar_zero));
  645. return;
  646. }
  647. size_t i;
  648. scalar_t t1, t2;
  649. i = ser_len - (ser_len%SER_BYTES);
  650. if (i==ser_len) i -= SER_BYTES;
  651. scalar_decode_short(t1, &ser[i], ser_len-i);
  652. if (ser_len == sizeof(scalar_t)) {
  653. assert(i==0);
  654. /* ham-handed reduce */
  655. API_NS(scalar_mul)(s,t1,API_NS(scalar_one));
  656. API_NS(scalar_destroy)(t1);
  657. return;
  658. }
  659. while (i) {
  660. i -= SER_BYTES;
  661. sc_montmul(t1,t1,sc_r2);
  662. ignore_result( API_NS(scalar_decode)(t2, ser+i) );
  663. API_NS(scalar_add)(t1, t1, t2);
  664. }
  665. API_NS(scalar_copy)(s, t1);
  666. API_NS(scalar_destroy)(t1);
  667. API_NS(scalar_destroy)(t2);
  668. }
  669. void API_NS(scalar_encode)(
  670. unsigned char ser[SER_BYTES],
  671. const scalar_t s
  672. ) {
  673. unsigned int i,j,k=0;
  674. for (i=0; i<SCALAR_LIMBS; i++) {
  675. for (j=0; j<sizeof(decaf_word_t); j++,k++) {
  676. ser[k] = s->limb[i] >> (8*j);
  677. }
  678. }
  679. }
  680. /* Operations on [p]niels */
  681. static INLINE void
  682. cond_neg_niels (
  683. niels_t n,
  684. mask_t neg
  685. ) {
  686. cond_swap(n->a, n->b, neg);
  687. cond_neg(n->c, neg);
  688. }
  689. static NOINLINE void pt_to_pniels (
  690. pniels_t b,
  691. const point_t a
  692. ) {
  693. gf_sub ( b->n->a, a->y, a->x );
  694. gf_add ( b->n->b, a->x, a->y );
  695. gf_mulw_sgn ( b->n->c, a->t, 2*TWISTED_D );
  696. gf_add ( b->z, a->z, a->z );
  697. }
  698. static NOINLINE void pniels_to_pt (
  699. point_t e,
  700. const pniels_t d
  701. ) {
  702. gf eu;
  703. gf_add ( eu, d->n->b, d->n->a );
  704. gf_sub ( e->y, d->n->b, d->n->a );
  705. gf_mul ( e->t, e->y, eu);
  706. gf_mul ( e->x, d->z, e->y );
  707. gf_mul ( e->y, d->z, eu );
  708. gf_sqr ( e->z, d->z );
  709. }
  710. static NOINLINE void
  711. niels_to_pt (
  712. point_t e,
  713. const niels_t n
  714. ) {
  715. gf_add ( e->y, n->b, n->a );
  716. gf_sub ( e->x, n->b, n->a );
  717. gf_mul ( e->t, e->y, e->x );
  718. gf_copy ( e->z, ONE );
  719. }
  720. static NOINLINE void
  721. add_niels_to_pt (
  722. point_t d,
  723. const niels_t e,
  724. int before_double
  725. ) {
  726. gf a, b, c;
  727. gf_sub_nr ( b, d->y, d->x );
  728. gf_mul ( a, e->a, b );
  729. gf_add_nr ( b, d->x, d->y );
  730. gf_mul ( d->y, e->b, b );
  731. gf_mul ( d->x, e->c, d->t );
  732. gf_add_nr ( c, a, d->y );
  733. gf_sub_nr ( b, d->y, a );
  734. gf_sub_nr ( d->y, d->z, d->x );
  735. gf_add_nr ( a, d->x, d->z );
  736. gf_mul ( d->z, a, d->y );
  737. gf_mul ( d->x, d->y, b );
  738. gf_mul ( d->y, a, c );
  739. if (!before_double) gf_mul ( d->t, b, c );
  740. }
  741. static NOINLINE void
  742. sub_niels_from_pt (
  743. point_t d,
  744. const niels_t e,
  745. int before_double
  746. ) {
  747. gf a, b, c;
  748. gf_sub_nr ( b, d->y, d->x );
  749. gf_mul ( a, e->b, b );
  750. gf_add_nr ( b, d->x, d->y );
  751. gf_mul ( d->y, e->a, b );
  752. gf_mul ( d->x, e->c, d->t );
  753. gf_add_nr ( c, a, d->y );
  754. gf_sub_nr ( b, d->y, a );
  755. gf_add_nr ( d->y, d->z, d->x );
  756. gf_sub_nr ( a, d->z, d->x );
  757. gf_mul ( d->z, a, d->y );
  758. gf_mul ( d->x, d->y, b );
  759. gf_mul ( d->y, a, c );
  760. if (!before_double) gf_mul ( d->t, b, c );
  761. }
  762. static void
  763. add_pniels_to_pt (
  764. point_t p,
  765. const pniels_t pn,
  766. int before_double
  767. ) {
  768. gf L0;
  769. gf_mul ( L0, p->z, pn->z );
  770. gf_copy ( p->z, L0 );
  771. add_niels_to_pt( p, pn->n, before_double );
  772. }
  773. static void
  774. sub_pniels_from_pt (
  775. point_t p,
  776. const pniels_t pn,
  777. int before_double
  778. ) {
  779. gf L0;
  780. gf_mul ( L0, p->z, pn->z );
  781. gf_copy ( p->z, L0 );
  782. sub_niels_from_pt( p, pn->n, before_double );
  783. }
  784. static INLINE void
  785. constant_time_lookup_xx (
  786. void *__restrict__ out_,
  787. const void *table_,
  788. word_t elem_bytes,
  789. word_t n_table,
  790. word_t idx
  791. ) {
  792. constant_time_lookup(out_,table_,elem_bytes,n_table,idx);
  793. }
  794. static NOINLINE void
  795. prepare_fixed_window(
  796. pniels_t *multiples,
  797. const point_t b,
  798. int ntable
  799. ) {
  800. point_t tmp;
  801. pniels_t pn;
  802. int i;
  803. point_double_internal(tmp, b, 0);
  804. pt_to_pniels(pn, tmp);
  805. pt_to_pniels(multiples[0], b);
  806. API_NS(point_copy)(tmp, b);
  807. for (i=1; i<ntable; i++) {
  808. add_pniels_to_pt(tmp, pn, 0);
  809. pt_to_pniels(multiples[i], tmp);
  810. }
  811. decaf_bzero(pn,sizeof(pn));
  812. decaf_bzero(tmp,sizeof(tmp));
  813. }
  814. void API_NS(point_scalarmul) (
  815. point_t a,
  816. const point_t b,
  817. const scalar_t scalar
  818. ) {
  819. const int WINDOW = DECAF_WINDOW_BITS,
  820. WINDOW_MASK = (1<<WINDOW)-1,
  821. WINDOW_T_MASK = WINDOW_MASK >> 1,
  822. NTABLE = 1<<(WINDOW-1);
  823. scalar_t scalar1x;
  824. API_NS(scalar_add)(scalar1x, scalar, point_scalarmul_adjustment);
  825. sc_halve(scalar1x,scalar1x,sc_p);
  826. /* Set up a precomputed table with odd multiples of b. */
  827. pniels_t pn, multiples[NTABLE];
  828. point_t tmp;
  829. prepare_fixed_window(multiples, b, NTABLE);
  830. /* Initialize. */
  831. int i,j,first=1;
  832. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  833. for (; i>=0; i-=WINDOW) {
  834. /* Fetch another block of bits */
  835. word_t bits = scalar1x->limb[i/WBITS] >> (i%WBITS);
  836. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  837. bits ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  838. }
  839. bits &= WINDOW_MASK;
  840. mask_t inv = (bits>>(WINDOW-1))-1;
  841. bits ^= inv;
  842. /* Add in from table. Compute t only on last iteration. */
  843. constant_time_lookup_xx(pn, multiples, sizeof(pn), NTABLE, bits & WINDOW_T_MASK);
  844. cond_neg_niels(pn->n, inv);
  845. if (first) {
  846. pniels_to_pt(tmp, pn);
  847. first = 0;
  848. } else {
  849. /* Using Hisil et al's lookahead method instead of extensible here
  850. * for no particular reason. Double WINDOW times, but only compute t on
  851. * the last one.
  852. */
  853. for (j=0; j<WINDOW-1; j++)
  854. point_double_internal(tmp, tmp, -1);
  855. point_double_internal(tmp, tmp, 0);
  856. add_pniels_to_pt(tmp, pn, i ? -1 : 0);
  857. }
  858. }
  859. /* Write out the answer */
  860. API_NS(point_copy)(a,tmp);
  861. decaf_bzero(scalar1x,sizeof(scalar1x));
  862. decaf_bzero(pn,sizeof(pn));
  863. decaf_bzero(multiples,sizeof(multiples));
  864. decaf_bzero(tmp,sizeof(tmp));
  865. }
  866. void API_NS(point_double_scalarmul) (
  867. point_t a,
  868. const point_t b,
  869. const scalar_t scalarb,
  870. const point_t c,
  871. const scalar_t scalarc
  872. ) {
  873. const int WINDOW = DECAF_WINDOW_BITS,
  874. WINDOW_MASK = (1<<WINDOW)-1,
  875. WINDOW_T_MASK = WINDOW_MASK >> 1,
  876. NTABLE = 1<<(WINDOW-1);
  877. scalar_t scalar1x, scalar2x;
  878. API_NS(scalar_add)(scalar1x, scalarb, point_scalarmul_adjustment);
  879. sc_halve(scalar1x,scalar1x,sc_p);
  880. API_NS(scalar_add)(scalar2x, scalarc, point_scalarmul_adjustment);
  881. sc_halve(scalar2x,scalar2x,sc_p);
  882. /* Set up a precomputed table with odd multiples of b. */
  883. pniels_t pn, multiples1[NTABLE], multiples2[NTABLE];
  884. point_t tmp;
  885. prepare_fixed_window(multiples1, b, NTABLE);
  886. prepare_fixed_window(multiples2, c, NTABLE);
  887. /* Initialize. */
  888. int i,j,first=1;
  889. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  890. for (; i>=0; i-=WINDOW) {
  891. /* Fetch another block of bits */
  892. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  893. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  894. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  895. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  896. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  897. }
  898. bits1 &= WINDOW_MASK;
  899. bits2 &= WINDOW_MASK;
  900. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  901. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  902. bits1 ^= inv1;
  903. bits2 ^= inv2;
  904. /* Add in from table. Compute t only on last iteration. */
  905. constant_time_lookup_xx(pn, multiples1, sizeof(pn), NTABLE, bits1 & WINDOW_T_MASK);
  906. cond_neg_niels(pn->n, inv1);
  907. if (first) {
  908. pniels_to_pt(tmp, pn);
  909. first = 0;
  910. } else {
  911. /* Using Hisil et al's lookahead method instead of extensible here
  912. * for no particular reason. Double WINDOW times, but only compute t on
  913. * the last one.
  914. */
  915. for (j=0; j<WINDOW-1; j++)
  916. point_double_internal(tmp, tmp, -1);
  917. point_double_internal(tmp, tmp, 0);
  918. add_pniels_to_pt(tmp, pn, 0);
  919. }
  920. constant_time_lookup_xx(pn, multiples2, sizeof(pn), NTABLE, bits2 & WINDOW_T_MASK);
  921. cond_neg_niels(pn->n, inv2);
  922. add_pniels_to_pt(tmp, pn, i?-1:0);
  923. }
  924. /* Write out the answer */
  925. API_NS(point_copy)(a,tmp);
  926. decaf_bzero(scalar1x,sizeof(scalar1x));
  927. decaf_bzero(scalar2x,sizeof(scalar2x));
  928. decaf_bzero(pn,sizeof(pn));
  929. decaf_bzero(multiples1,sizeof(multiples1));
  930. decaf_bzero(multiples2,sizeof(multiples2));
  931. decaf_bzero(tmp,sizeof(tmp));
  932. }
  933. void API_NS(point_dual_scalarmul) (
  934. point_t a1,
  935. point_t a2,
  936. const point_t b,
  937. const scalar_t scalar1,
  938. const scalar_t scalar2
  939. ) {
  940. const int WINDOW = DECAF_WINDOW_BITS,
  941. WINDOW_MASK = (1<<WINDOW)-1,
  942. WINDOW_T_MASK = WINDOW_MASK >> 1,
  943. NTABLE = 1<<(WINDOW-1);
  944. scalar_t scalar1x, scalar2x;
  945. API_NS(scalar_add)(scalar1x, scalar1, point_scalarmul_adjustment);
  946. sc_halve(scalar1x,scalar1x,sc_p);
  947. API_NS(scalar_add)(scalar2x, scalar2, point_scalarmul_adjustment);
  948. sc_halve(scalar2x,scalar2x,sc_p);
  949. /* Set up a precomputed table with odd multiples of b. */
  950. point_t multiples1[NTABLE], multiples2[NTABLE], working, tmp;
  951. pniels_t pn;
  952. API_NS(point_copy)(working, b);
  953. /* Initialize. */
  954. int i,j;
  955. for (i=0; i<NTABLE; i++) {
  956. API_NS(point_copy)(multiples1[i], API_NS(point_identity));
  957. API_NS(point_copy)(multiples2[i], API_NS(point_identity));
  958. }
  959. for (i=0; i<SCALAR_BITS; i+=WINDOW) {
  960. if (i) {
  961. for (j=0; j<WINDOW-1; j++)
  962. point_double_internal(working, working, -1);
  963. point_double_internal(working, working, 0);
  964. }
  965. /* Fetch another block of bits */
  966. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  967. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  968. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  969. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  970. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  971. }
  972. bits1 &= WINDOW_MASK;
  973. bits2 &= WINDOW_MASK;
  974. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  975. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  976. bits1 ^= inv1;
  977. bits2 ^= inv2;
  978. pt_to_pniels(pn, working);
  979. constant_time_lookup_xx(tmp, multiples1, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  980. cond_neg_niels(pn->n, inv1);
  981. /* add_pniels_to_pt(multiples1[bits1 & WINDOW_T_MASK], pn, 0); */
  982. add_pniels_to_pt(tmp, pn, 0);
  983. constant_time_insert(multiples1, tmp, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  984. constant_time_lookup_xx(tmp, multiples2, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  985. cond_neg_niels(pn->n, inv1^inv2);
  986. /* add_pniels_to_pt(multiples2[bits2 & WINDOW_T_MASK], pn, 0); */
  987. add_pniels_to_pt(tmp, pn, 0);
  988. constant_time_insert(multiples2, tmp, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  989. }
  990. if (NTABLE > 1) {
  991. API_NS(point_copy)(working, multiples1[NTABLE-1]);
  992. API_NS(point_copy)(tmp , multiples2[NTABLE-1]);
  993. for (i=NTABLE-1; i>1; i--) {
  994. API_NS(point_add)(multiples1[i-1], multiples1[i-1], multiples1[i]);
  995. API_NS(point_add)(multiples2[i-1], multiples2[i-1], multiples2[i]);
  996. API_NS(point_add)(working, working, multiples1[i-1]);
  997. API_NS(point_add)(tmp, tmp, multiples2[i-1]);
  998. }
  999. API_NS(point_add)(multiples1[0], multiples1[0], multiples1[1]);
  1000. API_NS(point_add)(multiples2[0], multiples2[0], multiples2[1]);
  1001. point_double_internal(working, working, 0);
  1002. point_double_internal(tmp, tmp, 0);
  1003. API_NS(point_add)(a1, working, multiples1[0]);
  1004. API_NS(point_add)(a2, tmp, multiples2[0]);
  1005. } else {
  1006. API_NS(point_copy)(a1, multiples1[0]);
  1007. API_NS(point_copy)(a2, multiples2[0]);
  1008. }
  1009. decaf_bzero(scalar1x,sizeof(scalar1x));
  1010. decaf_bzero(scalar2x,sizeof(scalar2x));
  1011. decaf_bzero(pn,sizeof(pn));
  1012. decaf_bzero(multiples1,sizeof(multiples1));
  1013. decaf_bzero(multiples2,sizeof(multiples2));
  1014. decaf_bzero(tmp,sizeof(tmp));
  1015. decaf_bzero(working,sizeof(working));
  1016. }
  1017. decaf_bool_t API_NS(point_eq) ( const point_t p, const point_t q ) {
  1018. /* equality mod 2-torsion compares x/y */
  1019. gf a, b;
  1020. gf_mul ( a, p->y, q->x );
  1021. gf_mul ( b, q->y, p->x );
  1022. mask_t succ = gf_eq(a,b);
  1023. #if (COFACTOR == 8) && IMAGINE_TWIST
  1024. gf_mul ( a, p->y, q->y );
  1025. gf_mul ( b, q->x, p->x );
  1026. #if !(IMAGINE_TWIST)
  1027. gf_sub ( a, ZERO, a );
  1028. #else
  1029. /* Interesting note: the 4tor would normally be rotation.
  1030. * But because of the *i twist, it's actually
  1031. * (x,y) <-> (iy,ix)
  1032. */
  1033. /* No code, just a comment. */
  1034. #endif
  1035. succ |= gf_eq(a,b);
  1036. #endif
  1037. return mask_to_bool(succ);
  1038. }
  1039. void API_NS(point_from_hash_nonuniform) (
  1040. point_t p,
  1041. const unsigned char ser[SER_BYTES]
  1042. ) {
  1043. gf r0,r,a,b,c,N,e;
  1044. gf_deserialize(r0,ser);
  1045. gf_strong_reduce(r0);
  1046. gf_sqr(a,r0);
  1047. #if P_MOD_8 == 5
  1048. /* r = QNR * r0^2 */
  1049. gf_mul(r,a,SQRT_MINUS_ONE);
  1050. #elif P_MOD_8 == 3 || P_MOD_8 == 7
  1051. gf_sub(r,ZERO,a);
  1052. #else
  1053. #error "Only supporting p=3,5,7 mod 8"
  1054. #endif
  1055. /* Compute D@c := (dr+a-d)(dr-ar-d) with a=1 */
  1056. gf_sub(a,r,ONE);
  1057. gf_mulw_sgn(b,a,EDWARDS_D); /* dr-d */
  1058. gf_add(a,b,ONE);
  1059. gf_sub(b,b,r);
  1060. gf_mul(c,a,b);
  1061. /* compute N := (r+1)(a-2d) */
  1062. gf_add(a,r,ONE);
  1063. gf_mulw_sgn(N,a,1-2*EDWARDS_D);
  1064. /* e = +-sqrt(1/ND) or +-r0 * sqrt(qnr/ND) */
  1065. gf_mul(a,c,N);
  1066. mask_t square = gf_isqrt_chk(b,a,DECAF_FALSE);
  1067. cond_sel(c,r0,ONE,square); /* r? = square ? 1 : r0 */
  1068. gf_mul(e,b,c);
  1069. /* s@a = +-|N.e| */
  1070. gf_mul(a,N,e);
  1071. cond_neg(a,hibit(a)^square); /* NB this is - what is listen in the paper */
  1072. /* t@b = -+ cN(r-1)((a-2d)e)^2 - 1 */
  1073. gf_mulw_sgn(c,e,1-2*EDWARDS_D); /* (a-2d)e */
  1074. gf_sqr(b,c);
  1075. gf_sub(e,r,ONE);
  1076. gf_mul(c,b,e);
  1077. gf_mul(b,c,N);
  1078. cond_neg(b,square);
  1079. gf_sub(b,b,ONE);
  1080. /* isogenize */
  1081. #if IMAGINE_TWIST
  1082. gf_mul(c,a,SQRT_MINUS_ONE);
  1083. gf_copy(a,c);
  1084. #endif
  1085. gf_sqr(c,a); /* s^2 */
  1086. gf_add(a,a,a); /* 2s */
  1087. gf_add(e,c,ONE);
  1088. gf_mul(p->t,a,e); /* 2s(1+s^2) */
  1089. gf_mul(p->x,a,b); /* 2st */
  1090. gf_sub(a,ONE,c);
  1091. gf_mul(p->y,e,a); /* (1+s^2)(1-s^2) */
  1092. gf_mul(p->z,a,b); /* (1-s^2)t */
  1093. assert(API_NS(point_valid)(p));
  1094. }
  1095. decaf_error_t
  1096. API_NS(invert_elligator_nonuniform) (
  1097. unsigned char recovered_hash[SER_BYTES],
  1098. const point_t p,
  1099. uint16_t hint_
  1100. ) {
  1101. mask_t hint = hint_;
  1102. mask_t sgn_s = -(hint & 1),
  1103. sgn_t_over_s = -(hint>>1 & 1),
  1104. sgn_r0 = -(hint>>2 & 1),
  1105. sgn_ed_T = -(hint>>3 & 1);
  1106. gf a, b, c, d;
  1107. deisogenize(a,c,p,sgn_s,sgn_t_over_s,sgn_ed_T);
  1108. /* ok, a = s; c = -t/s */
  1109. gf_mul(b,c,a);
  1110. gf_sub(b,ONE,b); /* t+1 */
  1111. gf_sqr(c,a); /* s^2 */
  1112. mask_t is_identity = gf_eq(p->t,ZERO);
  1113. {
  1114. /* identity adjustments */
  1115. /* in case of identity, currently c=0, t=0, b=1, will encode to 1 */
  1116. /* if hint is 0, -> 0 */
  1117. /* if hint is to neg t/s, then go to infinity, effectively set s to 1 */
  1118. cond_sel(c,c,ONE,is_identity & sgn_t_over_s);
  1119. cond_sel(b,b,ZERO,is_identity & ~sgn_t_over_s & ~sgn_s); /* identity adjust */
  1120. }
  1121. gf_mulw_sgn(d,c,2*EDWARDS_D-1); /* $d = (2d-a)s^2 */
  1122. gf_add(a,b,d); /* num? */
  1123. gf_sub(d,d,b); /* den? */
  1124. gf_mul(b,a,d); /* n*d */
  1125. cond_sel(a,d,a,sgn_s);
  1126. #if P_MOD_8 == 5
  1127. gf_mul(d,b,SQRT_MINUS_ONE);
  1128. #else
  1129. gf_sub(d,ZERO,b);
  1130. #endif
  1131. mask_t succ = gf_isqrt_chk(c,d,DECAF_TRUE);
  1132. gf_mul(b,a,c);
  1133. cond_neg(b, sgn_r0^hibit(b));
  1134. succ &= ~(gf_eq(b,ZERO) & sgn_r0);
  1135. #if COFACTOR == 8
  1136. succ &= ~(is_identity & sgn_ed_T); /* NB: there are no preimages of rotated identity. */
  1137. #endif
  1138. gf_serialize(recovered_hash, b);
  1139. /* TODO: deal with overflow flag */
  1140. return decaf_succeed_if(mask_to_bool(succ));
  1141. }
  1142. void API_NS(point_from_hash_uniform) (
  1143. point_t pt,
  1144. const unsigned char hashed_data[2*SER_BYTES]
  1145. ) {
  1146. point_t pt2;
  1147. API_NS(point_from_hash_nonuniform)(pt,hashed_data);
  1148. API_NS(point_from_hash_nonuniform)(pt2,&hashed_data[SER_BYTES]);
  1149. API_NS(point_add)(pt,pt,pt2);
  1150. }
  1151. decaf_error_t
  1152. API_NS(invert_elligator_uniform) (
  1153. unsigned char partial_hash[2*SER_BYTES],
  1154. const point_t p,
  1155. uint16_t hint
  1156. ) {
  1157. point_t pt2;
  1158. API_NS(point_from_hash_nonuniform)(pt2,&partial_hash[SER_BYTES]);
  1159. API_NS(point_sub)(pt2,p,pt2);
  1160. return API_NS(invert_elligator_nonuniform)(partial_hash,pt2,hint);
  1161. }
  1162. decaf_bool_t API_NS(point_valid) (
  1163. const point_t p
  1164. ) {
  1165. gf a,b,c;
  1166. gf_mul(a,p->x,p->y);
  1167. gf_mul(b,p->z,p->t);
  1168. mask_t out = gf_eq(a,b);
  1169. gf_sqr(a,p->x);
  1170. gf_sqr(b,p->y);
  1171. gf_sub(a,b,a);
  1172. gf_sqr(b,p->t);
  1173. gf_mulw_sgn(c,b,TWISTED_D);
  1174. gf_sqr(b,p->z);
  1175. gf_add(b,b,c);
  1176. out &= gf_eq(a,b);
  1177. out &= ~gf_eq(p->z,ZERO);
  1178. return mask_to_bool(out);
  1179. }
  1180. void API_NS(point_debugging_torque) (
  1181. point_t q,
  1182. const point_t p
  1183. ) {
  1184. #if COFACTOR == 8
  1185. gf tmp;
  1186. gf_mul(tmp,p->x,SQRT_MINUS_ONE);
  1187. gf_mul(q->x,p->y,SQRT_MINUS_ONE);
  1188. gf_copy(q->y,tmp);
  1189. gf_copy(q->z,p->z);
  1190. gf_sub(q->t,ZERO,p->t);
  1191. #else
  1192. gf_sub(q->x,ZERO,p->x);
  1193. gf_sub(q->y,ZERO,p->y);
  1194. gf_copy(q->z,p->z);
  1195. gf_copy(q->t,p->t);
  1196. #endif
  1197. }
  1198. void API_NS(point_debugging_pscale) (
  1199. point_t q,
  1200. const point_t p,
  1201. const uint8_t factor[SER_BYTES]
  1202. ) {
  1203. gf gfac,tmp;
  1204. ignore_result(gf_deserialize(gfac,factor));
  1205. cond_sel(gfac,gfac,ONE,gf_eq(gfac,ZERO));
  1206. gf_mul(tmp,p->x,gfac);
  1207. gf_copy(q->x,tmp);
  1208. gf_mul(tmp,p->y,gfac);
  1209. gf_copy(q->y,tmp);
  1210. gf_mul(tmp,p->z,gfac);
  1211. gf_copy(q->z,tmp);
  1212. gf_mul(tmp,p->t,gfac);
  1213. gf_copy(q->t,tmp);
  1214. }
  1215. static void gf_batch_invert (
  1216. gf *__restrict__ out,
  1217. const gf *in,
  1218. unsigned int n
  1219. ) {
  1220. gf t1;
  1221. assert(n>1);
  1222. gf_copy(out[1], in[0]);
  1223. int i;
  1224. for (i=1; i<(int) (n-1); i++) {
  1225. gf_mul(out[i+1], out[i], in[i]);
  1226. }
  1227. gf_mul(out[0], out[n-1], in[n-1]);
  1228. gf_invert(out[0], out[0]);
  1229. for (i=n-1; i>0; i--) {
  1230. gf_mul(t1, out[i], out[0]);
  1231. gf_copy(out[i], t1);
  1232. gf_mul(t1, out[0], in[i]);
  1233. gf_copy(out[0], t1);
  1234. }
  1235. }
  1236. static void batch_normalize_niels (
  1237. niels_t *table,
  1238. const gf *zs,
  1239. gf *__restrict__ zis,
  1240. int n
  1241. ) {
  1242. int i;
  1243. gf product;
  1244. gf_batch_invert(zis, zs, n);
  1245. for (i=0; i<n; i++) {
  1246. gf_mul(product, table[i]->a, zis[i]);
  1247. gf_strong_reduce(product);
  1248. gf_copy(table[i]->a, product);
  1249. gf_mul(product, table[i]->b, zis[i]);
  1250. gf_strong_reduce(product);
  1251. gf_copy(table[i]->b, product);
  1252. gf_mul(product, table[i]->c, zis[i]);
  1253. gf_strong_reduce(product);
  1254. gf_copy(table[i]->c, product);
  1255. }
  1256. decaf_bzero(product,sizeof(product));
  1257. }
  1258. void API_NS(precompute) (
  1259. precomputed_s *table,
  1260. const point_t base
  1261. ) {
  1262. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  1263. assert(n*t*s >= SCALAR_BITS);
  1264. point_t working, start, doubles[t-1];
  1265. API_NS(point_copy)(working, base);
  1266. pniels_t pn_tmp;
  1267. gf zs[n<<(t-1)], zis[n<<(t-1)];
  1268. unsigned int i,j,k;
  1269. /* Compute n tables */
  1270. for (i=0; i<n; i++) {
  1271. /* Doubling phase */
  1272. for (j=0; j<t; j++) {
  1273. if (j) API_NS(point_add)(start, start, working);
  1274. else API_NS(point_copy)(start, working);
  1275. if (j==t-1 && i==n-1) break;
  1276. point_double_internal(working, working,0);
  1277. if (j<t-1) API_NS(point_copy)(doubles[j], working);
  1278. for (k=0; k<s-1; k++)
  1279. point_double_internal(working, working, k<s-2);
  1280. }
  1281. /* Gray-code phase */
  1282. for (j=0;; j++) {
  1283. int gray = j ^ (j>>1);
  1284. int idx = (((i+1)<<(t-1))-1) ^ gray;
  1285. pt_to_pniels(pn_tmp, start);
  1286. memcpy(table->table[idx], pn_tmp->n, sizeof(pn_tmp->n));
  1287. gf_copy(zs[idx], pn_tmp->z);
  1288. if (j >= (1u<<(t-1)) - 1) break;
  1289. int delta = (j+1) ^ ((j+1)>>1) ^ gray;
  1290. for (k=0; delta>1; k++)
  1291. delta >>=1;
  1292. if (gray & (1<<k)) {
  1293. API_NS(point_add)(start, start, doubles[k]);
  1294. } else {
  1295. API_NS(point_sub)(start, start, doubles[k]);
  1296. }
  1297. }
  1298. }
  1299. batch_normalize_niels(table->table,(const gf *)zs,zis,n<<(t-1));
  1300. decaf_bzero(zs,sizeof(zs));
  1301. decaf_bzero(zis,sizeof(zis));
  1302. decaf_bzero(pn_tmp,sizeof(pn_tmp));
  1303. decaf_bzero(working,sizeof(working));
  1304. decaf_bzero(start,sizeof(start));
  1305. decaf_bzero(doubles,sizeof(doubles));
  1306. }
  1307. static INLINE void
  1308. constant_time_lookup_xx_niels (
  1309. niels_s *__restrict__ ni,
  1310. const niels_t *table,
  1311. int nelts,
  1312. int idx
  1313. ) {
  1314. constant_time_lookup_xx(ni, table, sizeof(niels_s), nelts, idx);
  1315. }
  1316. void API_NS(precomputed_scalarmul) (
  1317. point_t out,
  1318. const precomputed_s *table,
  1319. const scalar_t scalar
  1320. ) {
  1321. int i;
  1322. unsigned j,k;
  1323. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  1324. scalar_t scalar1x;
  1325. API_NS(scalar_add)(scalar1x, scalar, precomputed_scalarmul_adjustment);
  1326. sc_halve(scalar1x,scalar1x,sc_p);
  1327. niels_t ni;
  1328. for (i=s-1; i>=0; i--) {
  1329. if (i != (int)s-1) point_double_internal(out,out,0);
  1330. for (j=0; j<n; j++) {
  1331. int tab = 0;
  1332. for (k=0; k<t; k++) {
  1333. unsigned int bit = i + s*(k + j*t);
  1334. if (bit < SCALAR_BITS) {
  1335. tab |= (scalar1x->limb[bit/WBITS] >> (bit%WBITS) & 1) << k;
  1336. }
  1337. }
  1338. mask_t invert = (tab>>(t-1))-1;
  1339. tab ^= invert;
  1340. tab &= (1<<(t-1)) - 1;
  1341. constant_time_lookup_xx_niels(ni, &table->table[j<<(t-1)], 1<<(t-1), tab);
  1342. cond_neg_niels(ni, invert);
  1343. if ((i!=(int)s-1)||j) {
  1344. add_niels_to_pt(out, ni, j==n-1 && i);
  1345. } else {
  1346. niels_to_pt(out, ni);
  1347. }
  1348. }
  1349. }
  1350. decaf_bzero(ni,sizeof(ni));
  1351. decaf_bzero(scalar1x,sizeof(scalar1x));
  1352. }
  1353. void API_NS(point_cond_sel) (
  1354. point_t out,
  1355. const point_t a,
  1356. const point_t b,
  1357. decaf_bool_t pick_b
  1358. ) {
  1359. constant_time_select(out,a,b,sizeof(point_t),bool_to_mask(pick_b),0);
  1360. }
  1361. void API_NS(scalar_cond_sel) (
  1362. scalar_t out,
  1363. const scalar_t a,
  1364. const scalar_t b,
  1365. decaf_bool_t pick_b
  1366. ) {
  1367. constant_time_select(out,a,b,sizeof(scalar_t),bool_to_mask(pick_b),sizeof(out->limb[0]));
  1368. }
  1369. /* FUTURE: restore Curve25519 Montgomery ladder? */
  1370. decaf_error_t API_NS(direct_scalarmul) (
  1371. uint8_t scaled[SER_BYTES],
  1372. const uint8_t base[SER_BYTES],
  1373. const scalar_t scalar,
  1374. decaf_bool_t allow_identity,
  1375. decaf_bool_t short_circuit
  1376. ) {
  1377. point_t basep;
  1378. decaf_error_t succ = API_NS(point_decode)(basep, base, allow_identity);
  1379. if (short_circuit && succ != DECAF_SUCCESS) return succ;
  1380. API_NS(point_cond_sel)(basep, API_NS(point_base), basep, succ);
  1381. API_NS(point_scalarmul)(basep, basep, scalar);
  1382. API_NS(point_encode)(scaled, basep);
  1383. API_NS(point_destroy)(basep);
  1384. return succ;
  1385. }
  1386. decaf_error_t API_NS(x_direct_scalarmul) (
  1387. uint8_t out[X_PUBLIC_BYTES],
  1388. const uint8_t base[X_PUBLIC_BYTES],
  1389. const uint8_t scalar[X_PRIVATE_BYTES]
  1390. ) {
  1391. gf x1, x2, z2, x3, z3, t1, t2;
  1392. ignore_result(gf_deserialize(x1,base));
  1393. gf_copy(x2,ONE);
  1394. gf_copy(z2,ZERO);
  1395. gf_copy(x3,x1);
  1396. gf_copy(z3,ONE);
  1397. int t;
  1398. mask_t swap = 0;
  1399. for (t = X_PRIVATE_BITS-1; t>=0; t--) {
  1400. uint8_t sb = scalar[t/8];
  1401. /* Scalar conditioning */
  1402. if (t/8==0) sb &= -(uint8_t)COFACTOR;
  1403. else if (t == X_PRIVATE_BITS-1) sb = -1;
  1404. mask_t k_t = (sb>>(t%8)) & 1;
  1405. k_t = -k_t; /* set to all 0s or all 1s */
  1406. swap ^= k_t;
  1407. cond_swap(x2,x3,swap);
  1408. cond_swap(z2,z3,swap);
  1409. swap = k_t;
  1410. gf_add_nr(t1,x2,z2); /* A = x2 + z2 */
  1411. gf_sub_nr(t2,x2,z2); /* B = x2 - z2 */
  1412. gf_sub_nr(z2,x3,z3); /* D = x3 - z3 */
  1413. gf_mul(x2,t1,z2); /* DA */
  1414. gf_add_nr(z2,z3,x3); /* C = x3 + z3 */
  1415. gf_mul(x3,t2,z2); /* CB */
  1416. gf_sub_nr(z3,x2,x3); /* DA-CB */
  1417. gf_sqr(z2,z3); /* (DA-CB)^2 */
  1418. gf_mul(z3,x1,z2); /* z3 = x1(DA-CB)^2 */
  1419. gf_add_nr(z2,x2,x3); /* (DA+CB) */
  1420. gf_sqr(x3,z2); /* x3 = (DA+CB)^2 */
  1421. gf_sqr(z2,t1); /* AA = A^2 */
  1422. gf_sqr(t1,t2); /* BB = B^2 */
  1423. gf_mul(x2,z2,t1); /* x2 = AA*BB */
  1424. gf_sub_nr(t2,z2,t1); /* E = AA-BB */
  1425. gf_mulw_sgn(t1,t2,-EDWARDS_D); /* E*-d = a24*E */
  1426. gf_add_nr(t1,t1,z2); /* AA + a24*E */
  1427. gf_mul(z2,t2,t1); /* z2 = E(AA+a24*E) */
  1428. }
  1429. /* Finish */
  1430. cond_swap(x2,x3,swap);
  1431. cond_swap(z2,z3,swap);
  1432. gf_invert(z2,z2);
  1433. gf_mul(x1,x2,z2);
  1434. gf_serialize(out,x1);
  1435. mask_t nz = ~gf_eq(x1,ZERO);
  1436. decaf_bzero(x1,sizeof(x1));
  1437. decaf_bzero(x2,sizeof(x2));
  1438. decaf_bzero(z2,sizeof(z2));
  1439. decaf_bzero(x3,sizeof(x3));
  1440. decaf_bzero(z3,sizeof(z3));
  1441. decaf_bzero(t1,sizeof(t1));
  1442. decaf_bzero(t2,sizeof(t2));
  1443. return decaf_succeed_if(mask_to_bool(nz));
  1444. }
  1445. void API_NS(x_base_scalarmul) (
  1446. uint8_t out[X_PUBLIC_BYTES],
  1447. const uint8_t scalar[X_PRIVATE_BYTES]
  1448. ) {
  1449. /* Scalar conditioning */
  1450. uint8_t scalar2[X_PRIVATE_BYTES];
  1451. memcpy(scalar2,scalar,sizeof(scalar2));
  1452. scalar2[0] &= -(uint8_t)COFACTOR;
  1453. scalar2[X_PRIVATE_BYTES-1] &= ~(-1<<((X_PRIVATE_BITS+7)%8));
  1454. scalar2[X_PRIVATE_BYTES-1] |= 1<<((X_PRIVATE_BITS+7)%8);
  1455. scalar_t the_scalar;
  1456. API_NS(scalar_decode_long)(the_scalar,scalar2,sizeof(scalar2));
  1457. /* We're gonna isogenize by 2, so divide by 2.
  1458. *
  1459. * Why by 2, even though it's a 4-isogeny?
  1460. *
  1461. * The isogeny map looks like
  1462. * Montgomery <-2-> Jacobi <-2-> Edwards
  1463. *
  1464. * Since the Jacobi base point is the PREimage of the iso to
  1465. * the Montgomery curve, and we're going
  1466. * Jacobi -> Edwards -> Jacobi -> Montgomery,
  1467. * we pick up only a factor of 2 over Jacobi -> Montgomery.
  1468. */
  1469. sc_halve(the_scalar,the_scalar,sc_p);
  1470. #if COFACTOR==8
  1471. /* If the base point isn't in the prime-order subgroup (PERF:
  1472. * guarantee that it is?) then a 4-isogeny isn't necessarily
  1473. * enough to clear the cofactor. So add another doubling.
  1474. */
  1475. sc_halve(the_scalar,the_scalar,sc_p);
  1476. #endif
  1477. point_t p;
  1478. API_NS(precomputed_scalarmul)(p,API_NS(precomputed_base),the_scalar);
  1479. #if COFACTOR==8
  1480. API_NS(point_double)(p,p);
  1481. #endif
  1482. /* Isogenize to Montgomery curve */
  1483. gf_invert(p->t,p->x); /* 1/x */
  1484. gf_mul(p->z,p->t,p->y); /* y/x */
  1485. gf_sqr(p->y,p->z); /* (y/x)^2 */
  1486. #if IMAGINE_TWIST
  1487. gf_sub(p->y,ZERO,p->y);
  1488. #endif
  1489. gf_serialize(out,p->y);
  1490. decaf_bzero(scalar2,sizeof(scalar2));
  1491. API_NS(scalar_destroy)(the_scalar);
  1492. API_NS(point_destroy)(p);
  1493. }
  1494. /**
  1495. * @cond internal
  1496. * Control for variable-time scalar multiply algorithms.
  1497. */
  1498. struct smvt_control {
  1499. int power, addend;
  1500. };
  1501. static int recode_wnaf (
  1502. struct smvt_control *control, /* [nbits/(tableBits+1) + 3] */
  1503. const scalar_t scalar,
  1504. unsigned int tableBits
  1505. ) {
  1506. unsigned int table_size = SCALAR_BITS/(tableBits+1) + 3;
  1507. int position = table_size - 1; /* at the end */
  1508. /* place the end marker */
  1509. control[position].power = -1;
  1510. control[position].addend = 0;
  1511. position--;
  1512. /* PERF: Could negate scalar if it's large. But then would need more cases
  1513. * in the actual code that uses it, all for an expected reduction of like 1/5 op.
  1514. * Probably not worth it.
  1515. */
  1516. uint64_t current = scalar->limb[0] & 0xFFFF;
  1517. uint32_t mask = (1<<(tableBits+1))-1;
  1518. unsigned int w;
  1519. const unsigned int B_OVER_16 = sizeof(scalar->limb[0]) / 2;
  1520. for (w = 1; w<(SCALAR_BITS-1)/16+3; w++) {
  1521. if (w < (SCALAR_BITS-1)/16+1) {
  1522. /* Refill the 16 high bits of current */
  1523. current += (uint32_t)((scalar->limb[w/B_OVER_16]>>(16*(w%B_OVER_16)))<<16);
  1524. }
  1525. while (current & 0xFFFF) {
  1526. assert(position >= 0);
  1527. uint32_t pos = __builtin_ctz((uint32_t)current), odd = (uint32_t)current >> pos;
  1528. int32_t delta = odd & mask;
  1529. if (odd & 1<<(tableBits+1)) delta -= (1<<(tableBits+1));
  1530. current -= delta << pos;
  1531. control[position].power = pos + 16*(w-1);
  1532. control[position].addend = delta;
  1533. position--;
  1534. }
  1535. current >>= 16;
  1536. }
  1537. assert(current==0);
  1538. position++;
  1539. unsigned int n = table_size - position;
  1540. unsigned int i;
  1541. for (i=0; i<n; i++) {
  1542. control[i] = control[i+position];
  1543. }
  1544. return n-1;
  1545. }
  1546. static void
  1547. prepare_wnaf_table(
  1548. pniels_t *output,
  1549. const point_t working,
  1550. unsigned int tbits
  1551. ) {
  1552. point_t tmp;
  1553. int i;
  1554. pt_to_pniels(output[0], working);
  1555. if (tbits == 0) return;
  1556. API_NS(point_double)(tmp,working);
  1557. pniels_t twop;
  1558. pt_to_pniels(twop, tmp);
  1559. add_pniels_to_pt(tmp, output[0],0);
  1560. pt_to_pniels(output[1], tmp);
  1561. for (i=2; i < 1<<tbits; i++) {
  1562. add_pniels_to_pt(tmp, twop,0);
  1563. pt_to_pniels(output[i], tmp);
  1564. }
  1565. API_NS(point_destroy)(tmp);
  1566. }
  1567. extern const gf API_NS(precomputed_wnaf_as_fe)[];
  1568. static const niels_t *API_NS(wnaf_base) = (const niels_t *)API_NS(precomputed_wnaf_as_fe);
  1569. const size_t API_NS(sizeof_precomputed_wnafs) __attribute((visibility("hidden")))
  1570. = sizeof(niels_t)<<DECAF_WNAF_FIXED_TABLE_BITS;
  1571. void API_NS(precompute_wnafs) (
  1572. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1573. const point_t base
  1574. ) __attribute__ ((visibility ("hidden")));
  1575. void API_NS(precompute_wnafs) (
  1576. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1577. const point_t base
  1578. ) {
  1579. pniels_t tmp[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1580. gf zs[1<<DECAF_WNAF_FIXED_TABLE_BITS], zis[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1581. int i;
  1582. prepare_wnaf_table(tmp,base,DECAF_WNAF_FIXED_TABLE_BITS);
  1583. for (i=0; i<1<<DECAF_WNAF_FIXED_TABLE_BITS; i++) {
  1584. memcpy(out[i], tmp[i]->n, sizeof(niels_t));
  1585. gf_copy(zs[i], tmp[i]->z);
  1586. }
  1587. batch_normalize_niels(out, (const gf *)zs, zis, 1<<DECAF_WNAF_FIXED_TABLE_BITS);
  1588. decaf_bzero(tmp,sizeof(tmp));
  1589. decaf_bzero(zs,sizeof(zs));
  1590. decaf_bzero(zis,sizeof(zis));
  1591. }
  1592. void API_NS(base_double_scalarmul_non_secret) (
  1593. point_t combo,
  1594. const scalar_t scalar1,
  1595. const point_t base2,
  1596. const scalar_t scalar2
  1597. ) {
  1598. const int table_bits_var = DECAF_WNAF_VAR_TABLE_BITS,
  1599. table_bits_pre = DECAF_WNAF_FIXED_TABLE_BITS;
  1600. struct smvt_control control_var[SCALAR_BITS/(table_bits_var+1)+3];
  1601. struct smvt_control control_pre[SCALAR_BITS/(table_bits_pre+1)+3];
  1602. int ncb_pre = recode_wnaf(control_pre, scalar1, table_bits_pre);
  1603. int ncb_var = recode_wnaf(control_var, scalar2, table_bits_var);
  1604. pniels_t precmp_var[1<<table_bits_var];
  1605. prepare_wnaf_table(precmp_var, base2, table_bits_var);
  1606. int contp=0, contv=0, i = control_var[0].power;
  1607. if (i < 0) {
  1608. API_NS(point_copy)(combo, API_NS(point_identity));
  1609. return;
  1610. } else if (i > control_pre[0].power) {
  1611. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1612. contv++;
  1613. } else if (i == control_pre[0].power && i >=0 ) {
  1614. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1615. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1], i);
  1616. contv++; contp++;
  1617. } else {
  1618. i = control_pre[0].power;
  1619. niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1]);
  1620. contp++;
  1621. }
  1622. for (i--; i >= 0; i--) {
  1623. int cv = (i==control_var[contv].power), cp = (i==control_pre[contp].power);
  1624. point_double_internal(combo,combo,i && !(cv||cp));
  1625. if (cv) {
  1626. assert(control_var[contv].addend);
  1627. if (control_var[contv].addend > 0) {
  1628. add_pniels_to_pt(combo, precmp_var[control_var[contv].addend >> 1], i&&!cp);
  1629. } else {
  1630. sub_pniels_from_pt(combo, precmp_var[(-control_var[contv].addend) >> 1], i&&!cp);
  1631. }
  1632. contv++;
  1633. }
  1634. if (cp) {
  1635. assert(control_pre[contp].addend);
  1636. if (control_pre[contp].addend > 0) {
  1637. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[contp].addend >> 1], i);
  1638. } else {
  1639. sub_niels_from_pt(combo, API_NS(wnaf_base)[(-control_pre[contp].addend) >> 1], i);
  1640. }
  1641. contp++;
  1642. }
  1643. }
  1644. /* This function is non-secret, but whatever this is cheap. */
  1645. decaf_bzero(control_var,sizeof(control_var));
  1646. decaf_bzero(control_pre,sizeof(control_pre));
  1647. decaf_bzero(precmp_var,sizeof(precmp_var));
  1648. assert(contv == ncb_var); (void)ncb_var;
  1649. assert(contp == ncb_pre); (void)ncb_pre;
  1650. }
  1651. void API_NS(point_destroy) (
  1652. point_t point
  1653. ) {
  1654. decaf_bzero(point, sizeof(point_t));
  1655. }
  1656. void API_NS(precomputed_destroy) (
  1657. precomputed_s *pre
  1658. ) {
  1659. decaf_bzero(pre, API_NS(sizeof_precomputed_s));
  1660. }