1 /*
2 * Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright (c) 2014, Intel Corporation. All Rights Reserved.
4 *
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
9 *
10 * Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
11 * (1) Intel Corporation, Israel Development Center, Haifa, Israel
12 * (2) University of Haifa, Israel
13 *
14 * Reference:
15 * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
16 * 256 Bit Primes"
17 */
18
19 #include <ring-core/base.h>
20
21 #include "../../limbs/limbs.inl"
22
23 #include <stdint.h>
24
25 #include "p256-x86_64.h"
26
27 #if defined(OPENSSL_USE_NISTZ256)
28
29 typedef P256_POINT_AFFINE PRECOMP256_ROW[64];
30
31 // One converted into the Montgomery domain
32 static const BN_ULONG ONE[P256_LIMBS] = {
33 TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000),
34 TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe),
35 };
36
37 // Precomputed tables for the default generator
38 #include "p256-x86_64-table.h"
39
40 // Recode window to a signed digit, see |nistp_recode_scalar_bits| in
41 // util.c for details
booth_recode_w5(crypto_word in)42 static crypto_word booth_recode_w5(crypto_word in) {
43 crypto_word s, d;
44
45 s = ~((in >> 5) - 1);
46 d = (1 << 6) - in - 1;
47 d = (d & s) | (in & ~s);
48 d = (d >> 1) + (d & 1);
49
50 return (d << 1) + (s & 1);
51 }
52
booth_recode_w7(crypto_word in)53 static crypto_word booth_recode_w7(crypto_word in) {
54 crypto_word s, d;
55
56 s = ~((in >> 7) - 1);
57 d = (1 << 8) - in - 1;
58 d = (d & s) | (in & ~s);
59 d = (d >> 1) + (d & 1);
60
61 return (d << 1) + (s & 1);
62 }
63
64 // copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is
65 // if |move| is zero.
66 //
67 // WARNING: this breaks the usual convention of constant-time functions
68 // returning masks.
copy_conditional(BN_ULONG dst[P256_LIMBS],const BN_ULONG src[P256_LIMBS],BN_ULONG move)69 static void copy_conditional(BN_ULONG dst[P256_LIMBS],
70 const BN_ULONG src[P256_LIMBS], BN_ULONG move) {
71 BN_ULONG mask1 = ((BN_ULONG)0) - move;
72 BN_ULONG mask2 = ~mask1;
73
74 dst[0] = (src[0] & mask1) ^ (dst[0] & mask2);
75 dst[1] = (src[1] & mask1) ^ (dst[1] & mask2);
76 dst[2] = (src[2] & mask1) ^ (dst[2] & mask2);
77 dst[3] = (src[3] & mask1) ^ (dst[3] & mask2);
78 if (P256_LIMBS == 8) {
79 dst[4] = (src[4] & mask1) ^ (dst[4] & mask2);
80 dst[5] = (src[5] & mask1) ^ (dst[5] & mask2);
81 dst[6] = (src[6] & mask1) ^ (dst[6] & mask2);
82 dst[7] = (src[7] & mask1) ^ (dst[7] & mask2);
83 }
84 }
85
86 // is_not_zero returns one iff in != 0 and zero otherwise.
87 //
88 // WARNING: this breaks the usual convention of constant-time functions
89 // returning masks.
90 //
91 // (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64)
92 // (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f)
93 // )
94 //
95 // (declare-fun x () (_ BitVec 64))
96 //
97 // (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001)))
98 // (check-sat)
99 //
100 // (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000)))
101 // (check-sat)
102 //
is_not_zero(BN_ULONG in)103 static BN_ULONG is_not_zero(BN_ULONG in) {
104 in |= (0 - in);
105 in >>= BN_BITS2 - 1;
106 return in;
107 }
108
109
110 // r = p * p_scalar
ecp_nistz256_windowed_mul(P256_POINT * r,const BN_ULONG p_scalar[P256_LIMBS],const BN_ULONG p_x[P256_LIMBS],const BN_ULONG p_y[P256_LIMBS])111 static void ecp_nistz256_windowed_mul(P256_POINT *r,
112 const BN_ULONG p_scalar[P256_LIMBS],
113 const BN_ULONG p_x[P256_LIMBS],
114 const BN_ULONG p_y[P256_LIMBS]) {
115 debug_assert_nonsecret(r != NULL);
116 debug_assert_nonsecret(p_scalar != NULL);
117 debug_assert_nonsecret(p_x != NULL);
118 debug_assert_nonsecret(p_y != NULL);
119
120 static const size_t kWindowSize = 5;
121 static const crypto_word kMask = (1 << (5 /* kWindowSize */ + 1)) - 1;
122
123 // A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should
124 // add no more than 63 bytes of overhead. Thus, |table| should require
125 // ~1599 ((96 * 16) + 63) bytes of stack space.
126 alignas(64) P256_POINT table[16];
127 P256_SCALAR_BYTES p_str;
128 p256_scalar_bytes_from_limbs(p_str, p_scalar);
129
130 // table[0] is implicitly (0,0,0) (the point at infinity), therefore it is
131 // not stored. All other values are actually stored with an offset of -1 in
132 // table.
133 P256_POINT *row = table;
134
135 limbs_copy(row[1 - 1].X, p_x, P256_LIMBS);
136 limbs_copy(row[1 - 1].Y, p_y, P256_LIMBS);
137 limbs_copy(row[1 - 1].Z, ONE, P256_LIMBS);
138
139 ecp_nistz256_point_double(&row[2 - 1], &row[1 - 1]);
140 ecp_nistz256_point_add(&row[3 - 1], &row[2 - 1], &row[1 - 1]);
141 ecp_nistz256_point_double(&row[4 - 1], &row[2 - 1]);
142 ecp_nistz256_point_double(&row[6 - 1], &row[3 - 1]);
143 ecp_nistz256_point_double(&row[8 - 1], &row[4 - 1]);
144 ecp_nistz256_point_double(&row[12 - 1], &row[6 - 1]);
145 ecp_nistz256_point_add(&row[5 - 1], &row[4 - 1], &row[1 - 1]);
146 ecp_nistz256_point_add(&row[7 - 1], &row[6 - 1], &row[1 - 1]);
147 ecp_nistz256_point_add(&row[9 - 1], &row[8 - 1], &row[1 - 1]);
148 ecp_nistz256_point_add(&row[13 - 1], &row[12 - 1], &row[1 - 1]);
149 ecp_nistz256_point_double(&row[14 - 1], &row[7 - 1]);
150 ecp_nistz256_point_double(&row[10 - 1], &row[5 - 1]);
151 ecp_nistz256_point_add(&row[15 - 1], &row[14 - 1], &row[1 - 1]);
152 ecp_nistz256_point_add(&row[11 - 1], &row[10 - 1], &row[1 - 1]);
153 ecp_nistz256_point_double(&row[16 - 1], &row[8 - 1]);
154
155 BN_ULONG tmp[P256_LIMBS];
156 alignas(32) P256_POINT h;
157 size_t index = 255;
158 crypto_word wvalue = p_str[(index - 1) / 8];
159 wvalue = (wvalue >> ((index - 1) % 8)) & kMask;
160
161 ecp_nistz256_select_w5(r, table, booth_recode_w5(wvalue) >> 1);
162
163 while (index >= 5) {
164 if (index != 255) {
165 size_t off = (index - 1) / 8;
166
167 wvalue = (crypto_word)p_str[off] | (crypto_word)p_str[off + 1] << 8;
168 wvalue = (wvalue >> ((index - 1) % 8)) & kMask;
169
170 wvalue = booth_recode_w5(wvalue);
171
172 ecp_nistz256_select_w5(&h, table, wvalue >> 1);
173
174 ecp_nistz256_neg(tmp, h.Y);
175 copy_conditional(h.Y, tmp, (wvalue & 1));
176
177 ecp_nistz256_point_add(r, r, &h);
178 }
179
180 index -= kWindowSize;
181
182 ecp_nistz256_point_double(r, r);
183 ecp_nistz256_point_double(r, r);
184 ecp_nistz256_point_double(r, r);
185 ecp_nistz256_point_double(r, r);
186 ecp_nistz256_point_double(r, r);
187 }
188
189 // Final window
190 wvalue = p_str[0];
191 wvalue = (wvalue << 1) & kMask;
192
193 wvalue = booth_recode_w5(wvalue);
194
195 ecp_nistz256_select_w5(&h, table, wvalue >> 1);
196
197 ecp_nistz256_neg(tmp, h.Y);
198 copy_conditional(h.Y, tmp, wvalue & 1);
199
200 ecp_nistz256_point_add(r, r, &h);
201 }
202
203 typedef union {
204 P256_POINT p;
205 P256_POINT_AFFINE a;
206 } p256_point_union_t;
207
calc_first_wvalue(size_t * index,const uint8_t p_str[33])208 static crypto_word calc_first_wvalue(size_t *index, const uint8_t p_str[33]) {
209 static const size_t kWindowSize = 7;
210 static const crypto_word kMask = (1 << (7 /* kWindowSize */ + 1)) - 1;
211 *index = kWindowSize;
212
213 crypto_word wvalue = ((crypto_word)p_str[0] << 1) & kMask;
214 return booth_recode_w7(wvalue);
215 }
216
calc_wvalue(size_t * index,const uint8_t p_str[33])217 static crypto_word calc_wvalue(size_t *index, const uint8_t p_str[33]) {
218 static const size_t kWindowSize = 7;
219 static const crypto_word kMask = (1 << (7 /* kWindowSize */ + 1)) - 1;
220
221 const size_t off = (*index - 1) / 8;
222 crypto_word wvalue =
223 (crypto_word)p_str[off] | (crypto_word)p_str[off + 1] << 8;
224 wvalue = (wvalue >> ((*index - 1) % 8)) & kMask;
225 *index += kWindowSize;
226
227 return booth_recode_w7(wvalue);
228 }
229
p256_point_mul(P256_POINT * r,const Limb p_scalar[P256_LIMBS],const Limb p_x[P256_LIMBS],const Limb p_y[P256_LIMBS])230 void p256_point_mul(P256_POINT *r, const Limb p_scalar[P256_LIMBS],
231 const Limb p_x[P256_LIMBS],
232 const Limb p_y[P256_LIMBS]) {
233 alignas(32) P256_POINT out;
234 ecp_nistz256_windowed_mul(&out, p_scalar, p_x, p_y);
235
236 limbs_copy(r->X, out.X, P256_LIMBS);
237 limbs_copy(r->Y, out.Y, P256_LIMBS);
238 limbs_copy(r->Z, out.Z, P256_LIMBS);
239 }
240
p256_point_mul_base(P256_POINT * r,const Limb scalar[P256_LIMBS])241 void p256_point_mul_base(P256_POINT *r, const Limb scalar[P256_LIMBS]) {
242 alignas(32) p256_point_union_t t, p;
243
244 P256_SCALAR_BYTES p_str;
245 p256_scalar_bytes_from_limbs(p_str, scalar);
246
247 // First window
248 size_t index = 0;
249 crypto_word wvalue = calc_first_wvalue(&index, p_str);
250
251 ecp_nistz256_select_w7(&p.a, ecp_nistz256_precomputed[0], wvalue >> 1);
252 ecp_nistz256_neg(p.p.Z, p.p.Y);
253 copy_conditional(p.p.Y, p.p.Z, wvalue & 1);
254
255 // Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p|
256 // is infinity and |ONE| otherwise. |p| was computed from the table, so it
257 // is infinity iff |wvalue >> 1| is zero.
258 OPENSSL_memset(p.p.Z, 0, sizeof(p.p.Z));
259 copy_conditional(p.p.Z, ONE, is_not_zero(wvalue >> 1));
260
261 for (int i = 1; i < 37; i++) {
262 wvalue = calc_wvalue(&index, p_str);
263
264 ecp_nistz256_select_w7(&t.a, ecp_nistz256_precomputed[i], wvalue >> 1);
265
266 ecp_nistz256_neg(t.p.Z, t.a.Y);
267 copy_conditional(t.a.Y, t.p.Z, wvalue & 1);
268
269 // Note |ecp_nistz256_point_add_affine| does not work if |p.p| and |t.a|
270 // are the same non-infinity point.
271 ecp_nistz256_point_add_affine(&p.p, &p.p, &t.a);
272 }
273
274 limbs_copy(r->X, p.p.X, P256_LIMBS);
275 limbs_copy(r->Y, p.p.Y, P256_LIMBS);
276 limbs_copy(r->Z, p.p.Z, P256_LIMBS);
277 }
278
279 #endif /* defined(OPENSSL_USE_NISTZ256) */
280