1 /*############################################################################
2 # Copyright 2017 Intel Corporation
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 ############################################################################*/
16 /*
17 ===============================================================================
18
19 Copyright (c) 2013, Kenneth MacKay
20 All rights reserved.
21 https://github.com/kmackay/micro-ecc
22
23 Redistribution and use in source and binary forms, with or without modification,
24 are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 this list of conditions and the following disclaimer in the documentation
29 and/or other materials provided with the distribution.
30
31 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
32 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
34 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
35 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
38 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
40 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41
42 ===============================================================================
43 */
44 /// Implementation of Large Integer math
45
46 #include "epid/member/tiny/math/vli.h"
47 #include "epid/member/tiny/math/mathtypes.h"
48 #include "epid/member/tiny/math/serialize.h"
49
VliAdd(VeryLargeInt * result,VeryLargeInt const * left,VeryLargeInt const * right)50 uint32_t VliAdd(VeryLargeInt* result, VeryLargeInt const* left,
51 VeryLargeInt const* right) {
52 uint32_t carry = 0;
53 uint32_t i;
54 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
55 uint32_t sum = left->word[i] + right->word[i] + carry;
56 carry = (sum < left->word[i]) | ((sum == left->word[i]) && carry);
57 result->word[i] = sum;
58 }
59 return carry;
60 }
61
VliMul(VeryLargeIntProduct * result,VeryLargeInt const * left,VeryLargeInt const * right)62 void VliMul(VeryLargeIntProduct* result, VeryLargeInt const* left,
63 VeryLargeInt const* right) {
64 uint64_t tmp_r1 = 0;
65 uint32_t tmp_r2 = 0;
66 uint32_t i, k;
67 /* Compute each digit of result in sequence, maintaining the carries. */
68 for (k = 0; k < (uint32_t)(NUM_ECC_DIGITS * 2 - 1); ++k) {
69 uint32_t min_idx =
70 (k < (uint32_t)NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
71 for (i = min_idx; i <= k && i < (uint32_t)NUM_ECC_DIGITS; ++i) {
72 uint64_t product = (uint64_t)left->word[i] * right->word[k - i];
73 tmp_r1 += product;
74 tmp_r2 += (tmp_r1 < product);
75 }
76 result->word[k] = (uint32_t)tmp_r1;
77 tmp_r1 = (tmp_r1 >> 32) | (((uint64_t)tmp_r2) << 32);
78 tmp_r2 = 0;
79 }
80 result->word[NUM_ECC_DIGITS * 2 - 1] = (uint32_t)tmp_r1;
81 }
82
VliRShift(VeryLargeInt * result,VeryLargeInt const * in,uint32_t shift)83 void VliRShift(VeryLargeInt* result, VeryLargeInt const* in, uint32_t shift) {
84 uint32_t i;
85 for (i = 0; i < NUM_ECC_DIGITS - 1; i++) {
86 result->word[i] = (in->word[i] >> shift) | in->word[i + 1] << (32 - shift);
87 }
88 result->word[NUM_ECC_DIGITS - 1] = in->word[NUM_ECC_DIGITS - 1] >> shift;
89 }
90
VliSub(VeryLargeInt * result,VeryLargeInt const * left,VeryLargeInt const * right)91 uint32_t VliSub(VeryLargeInt* result, VeryLargeInt const* left,
92 VeryLargeInt const* right) {
93 uint32_t borrow = 0;
94
95 int i;
96 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
97 uint32_t diff = left->word[i] - right->word[i] - borrow;
98 borrow = (diff > left->word[i]) | ((diff == left->word[i]) && borrow);
99 result->word[i] = diff;
100 }
101 return borrow;
102 }
103
VliSet(VeryLargeInt * result,VeryLargeInt const * in)104 void VliSet(VeryLargeInt* result, VeryLargeInt const* in) {
105 uint32_t i;
106 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
107 result->word[i] = in->word[i];
108 }
109 }
110
VliClear(VeryLargeInt * result)111 void VliClear(VeryLargeInt* result) {
112 uint32_t i;
113 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
114 result->word[i] = 0;
115 }
116 }
117
VliIsZero(VeryLargeInt const * in)118 int VliIsZero(VeryLargeInt const* in) {
119 uint32_t i, acc = 0;
120 for (i = 0; i < NUM_ECC_DIGITS; ++i) {
121 acc += (in->word[i] != 0);
122 }
123 return (!acc);
124 }
125
VliCondSet(VeryLargeInt * result,VeryLargeInt const * true_val,VeryLargeInt const * false_val,int truth_val)126 void VliCondSet(VeryLargeInt* result, VeryLargeInt const* true_val,
127 VeryLargeInt const* false_val, int truth_val) {
128 int i;
129 for (i = 0; i < NUM_ECC_DIGITS; i++)
130 result->word[i] = (!truth_val) * false_val->word[i] +
131 (truth_val != 0) * true_val->word[i];
132 }
133
VliTestBit(VeryLargeInt const * in,uint32_t bit)134 uint32_t VliTestBit(VeryLargeInt const* in, uint32_t bit) {
135 return ((in->word[bit >> 5] >> (bit & 31)) &
136 1); // p_bit % 32 = p_bit & 0x0000001F = 31
137 }
138
VliRand(VeryLargeInt * result,BitSupplier rnd_func,void * rnd_param)139 int VliRand(VeryLargeInt* result, BitSupplier rnd_func, void* rnd_param) {
140 uint32_t t[NUM_ECC_DIGITS] = {0};
141 if (rnd_func(t, sizeof(VeryLargeInt) * 8, rnd_param)) {
142 return 0;
143 }
144 VliDeserialize(result, (BigNumStr const*)t);
145 return 1;
146 }
147
VliCmp(VeryLargeInt const * left,VeryLargeInt const * right)148 int VliCmp(VeryLargeInt const* left, VeryLargeInt const* right) {
149 int i, cmp = 0;
150 for (i = NUM_ECC_DIGITS - 1; i >= 0; --i) {
151 cmp |=
152 ((left->word[i] > right->word[i]) - (left->word[i] < right->word[i])) *
153 (!cmp);
154 }
155 return cmp;
156 }
157
VliModAdd(VeryLargeInt * result,VeryLargeInt const * left,VeryLargeInt const * right,VeryLargeInt const * mod)158 void VliModAdd(VeryLargeInt* result, VeryLargeInt const* left,
159 VeryLargeInt const* right, VeryLargeInt const* mod) {
160 VeryLargeInt tmp;
161 uint32_t carry = VliAdd(result, left, right);
162 carry = VliSub(&tmp, result, mod) - carry;
163 VliCondSet(result, result, &tmp, carry);
164 }
165
VliModSub(VeryLargeInt * result,VeryLargeInt const * left,VeryLargeInt const * right,VeryLargeInt const * mod)166 void VliModSub(VeryLargeInt* result, VeryLargeInt const* left,
167 VeryLargeInt const* right, VeryLargeInt const* mod) {
168 VeryLargeInt tmp;
169 uint32_t borrow = VliSub(result, left, right);
170 VliAdd(&tmp, result, mod);
171 VliCondSet(result, &tmp, result, borrow);
172 }
173
VliModMul(VeryLargeInt * result,VeryLargeInt const * left,VeryLargeInt const * right,VeryLargeInt const * mod)174 void VliModMul(VeryLargeInt* result, VeryLargeInt const* left,
175 VeryLargeInt const* right, VeryLargeInt const* mod) {
176 VeryLargeIntProduct product;
177 VliMul(&product, left, right);
178 VliModBarrett(result, &product, mod);
179 }
180
vliSquare(VeryLargeIntProduct * p_result,VeryLargeInt const * p_left)181 static void vliSquare(VeryLargeIntProduct* p_result,
182 VeryLargeInt const* p_left) {
183 uint64_t tmp_r1 = 0;
184 uint32_t tmp_r2 = 0;
185 uint32_t i, k;
186 for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
187 uint32_t min_idx = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
188 for (i = min_idx; i <= k && i <= k - i; ++i) {
189 uint64_t l_product = (uint64_t)p_left->word[i] * p_left->word[k - i];
190 if (i < k - i) {
191 tmp_r2 += l_product >> 63;
192 l_product *= 2;
193 }
194 tmp_r1 += l_product;
195 tmp_r2 += (tmp_r1 < l_product);
196 }
197 p_result->word[k] = (uint32_t)tmp_r1;
198 tmp_r1 = (tmp_r1 >> 32) | (((uint64_t)tmp_r2) << 32);
199 tmp_r2 = 0;
200 }
201
202 p_result->word[NUM_ECC_DIGITS * 2 - 1] = (uint32_t)tmp_r1;
203 }
204
VliModExp(VeryLargeInt * result,VeryLargeInt const * base,VeryLargeInt const * exp,VeryLargeInt const * mod)205 void VliModExp(VeryLargeInt* result, VeryLargeInt const* base,
206 VeryLargeInt const* exp, VeryLargeInt const* mod) {
207 VeryLargeInt acc, tmp;
208 VeryLargeIntProduct product;
209 uint32_t j;
210 int i;
211 VliClear(&acc);
212 acc.word[0] = 1;
213 for (i = NUM_ECC_DIGITS - 1; i >= 0; i--) {
214 for (j = 1U << 31; j > 0; j = j >> 1) {
215 vliSquare(&product, &acc);
216 VliModBarrett(&acc, &product, mod);
217 VliMul(&product, &acc, base);
218 VliModBarrett(&tmp, &product, mod);
219 VliCondSet(&acc, &tmp, &acc, j & (exp->word[i]));
220 }
221 }
222 VliSet(result, &acc);
223 }
224
VliModInv(VeryLargeInt * result,VeryLargeInt const * input,VeryLargeInt const * mod)225 void VliModInv(VeryLargeInt* result, VeryLargeInt const* input,
226 VeryLargeInt const* mod) {
227 VeryLargeInt power;
228 VliSet(&power, mod);
229 power.word[0] -= 2;
230 VliModExp(result, input, &power, mod);
231 }
232
VliModSquare(VeryLargeInt * result,VeryLargeInt const * input,VeryLargeInt const * mod)233 void VliModSquare(VeryLargeInt* result, VeryLargeInt const* input,
234 VeryLargeInt const* mod) {
235 VeryLargeIntProduct product;
236 vliSquare(&product, input);
237 VliModBarrett(result, &product, mod);
238 }
239
240 /* Computes p_result = p_in << c, returning carry.
241 * Can modify in place (if p_result == p_in). 0 < p_shift < 32. */
vliLShift(VeryLargeIntProduct * p_result,VeryLargeIntProduct const * p_in,uint32_t p_shift)242 static uint32_t vliLShift(VeryLargeIntProduct* p_result,
243 VeryLargeIntProduct const* p_in, uint32_t p_shift) {
244 int i;
245 uint32_t carry = p_in->word[NUM_ECC_DIGITS * 2 - 1];
246 for (i = NUM_ECC_DIGITS * 2 - 1; i > 0; --i)
247 p_result->word[i] =
248 ((p_in->word[i] << p_shift) | (p_in->word[i - 1] >> (32 - p_shift)));
249 p_result->word[0] = p_in->word[0] << p_shift;
250 return carry >> (32 - p_shift);
251 }
252
vliScalarMult(VeryLargeInt * p_result,VeryLargeInt * p_left,uint32_t p_right)253 static void vliScalarMult(VeryLargeInt* p_result, VeryLargeInt* p_left,
254 uint32_t p_right) {
255 int i;
256 uint64_t tmpresult;
257 uint32_t left = 0, right;
258 for (i = 0; i < NUM_ECC_DIGITS - 1; i++) {
259 tmpresult = p_left->word[i] * ((uint64_t)p_right);
260 right = left + ((uint32_t)tmpresult);
261 left = (right < left) + ((uint32_t)(tmpresult >> 32));
262 p_result->word[i] = right;
263 }
264 p_result->word[NUM_ECC_DIGITS - 1] = left;
265 }
266
vliProdRShift(VeryLargeIntProduct * result,VeryLargeIntProduct const * in,uint32_t shift,uint32_t len)267 static void vliProdRShift(VeryLargeIntProduct* result,
268 VeryLargeIntProduct const* in, uint32_t shift,
269 uint32_t len) {
270 uint32_t i;
271 for (i = 0; i < len - 1; i++) {
272 result->word[i] = (in->word[i] >> shift) | in->word[i + 1] << (32 - shift);
273 }
274 result->word[len - 1] = in->word[len - 1] >> shift;
275 }
276
277 /* WARNING THIS METHOD MAKES STRONG ASSUMPTIONS ABOUT THE INVOLVED PRIMES
278 * All primes used for computations in Intel(R) EPID 2.0 begin with 32 ones.
279 * This method assumes 2^256 - p_mod
280 * begins with 32 zeros, and is tuned to this assumption. Violating this
281 * assumption will cause it not
282 * to work. It also assumes that it does not end with 32 zeros.
283 */
VliModBarrett(VeryLargeInt * result,VeryLargeIntProduct const * input,VeryLargeInt const * mod)284 void VliModBarrett(VeryLargeInt* result, VeryLargeIntProduct const* input,
285 VeryLargeInt const* mod) {
286 int i;
287 VeryLargeIntProduct tmpprod;
288 VeryLargeInt negprime, linear;
289 uint32_t carry = 0;
290 VliSet((VeryLargeInt*)&tmpprod.word[0], (VeryLargeInt const*)&input->word[0]);
291 VliSet((VeryLargeInt*)&tmpprod.word[NUM_ECC_DIGITS],
292 (VeryLargeInt const*)&input->word[NUM_ECC_DIGITS]);
293 // negative prime is ~q + 1, so we store this in
294 for (i = 0; i < NUM_ECC_DIGITS - 1; i++) negprime.word[i] = ~mod->word[i];
295 negprime.word[0]++;
296 for (i = 0; i < NUM_ECC_DIGITS; i++) {
297 vliScalarMult(&linear, &negprime, tmpprod.word[2 * NUM_ECC_DIGITS - 1]);
298 tmpprod.word[2 * NUM_ECC_DIGITS - 1] = 0;
299 tmpprod.word[2 * NUM_ECC_DIGITS - 1] =
300 VliAdd((VeryLargeInt*)&tmpprod.word[NUM_ECC_DIGITS - 1],
301 (VeryLargeInt const*)&tmpprod.word[7], &linear);
302 vliLShift(&tmpprod, &tmpprod, 31);
303 }
304 // shift the 256+32-NUM_ECC_DIGITS-1 bits in the largest 9 limbs back to the
305 // base
306 vliProdRShift(&tmpprod,
307 (VeryLargeIntProduct const*)&tmpprod.word[NUM_ECC_DIGITS - 1],
308 (31 * 8) % 32, NUM_ECC_DIGITS + 1);
309 vliScalarMult(&linear, &negprime, tmpprod.word[NUM_ECC_DIGITS]);
310 carry = VliAdd((VeryLargeInt*)&tmpprod.word[0],
311 (VeryLargeInt const*)&tmpprod.word[0],
312 (VeryLargeInt const*)&linear);
313 carry |= (-1 < VliCmp((VeryLargeInt const*)&tmpprod.word[0], mod));
314 vliScalarMult(&linear, &negprime, carry);
315 VliAdd(result, (VeryLargeInt const*)&tmpprod.word[0], &linear);
316 }
317