1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3 */
4
5 /*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include "sysincludes.h"
39
40 #include "cryptolib.h"
41 #include "utility.h"
42
43 #define SHFR(x, n) (x >> n)
44 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
45 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
46 #define CH(x, y, z) ((x & y) ^ (~x & z))
47 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
48
49 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
50 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
51 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3))
52 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
53
54 #define UNPACK32(x, str) \
55 { \
56 *((str) + 3) = (uint8_t) ((x) ); \
57 *((str) + 2) = (uint8_t) ((x) >> 8); \
58 *((str) + 1) = (uint8_t) ((x) >> 16); \
59 *((str) + 0) = (uint8_t) ((x) >> 24); \
60 }
61
62 #define PACK32(str, x) \
63 { \
64 *(x) = ((uint32_t) *((str) + 3) ) \
65 | ((uint32_t) *((str) + 2) << 8) \
66 | ((uint32_t) *((str) + 1) << 16) \
67 | ((uint32_t) *((str) + 0) << 24); \
68 }
69
70 /* Macros used for loops unrolling */
71
72 #define SHA256_SCR(i) \
73 { \
74 w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \
75 + SHA256_F3(w[i - 15]) + w[i - 16]; \
76 }
77
78 #define SHA256_EXP(a, b, c, d, e, f, g, h, j) \
79 { \
80 t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \
81 + sha256_k[j] + w[j]; \
82 t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
83 wv[d] += t1; \
84 wv[h] = t1 + t2; \
85 }
86
87 static const uint32_t sha256_h0[8] = {
88 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
89 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
90
91 static const uint32_t sha256_k[64] = {
92 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
93 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
94 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
95 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
96 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
97 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
98 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
99 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
100 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
101 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
102 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
103 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
104 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
105 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
106 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
107 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
108
109
110 /* SHA-256 implementation */
SHA256_init(VB_SHA256_CTX * ctx)111 void SHA256_init(VB_SHA256_CTX *ctx) {
112 #ifndef UNROLL_LOOPS
113 int i;
114 for (i = 0; i < 8; i++) {
115 ctx->h[i] = sha256_h0[i];
116 }
117 #else
118 ctx->h[0] = sha256_h0[0]; ctx->h[1] = sha256_h0[1];
119 ctx->h[2] = sha256_h0[2]; ctx->h[3] = sha256_h0[3];
120 ctx->h[4] = sha256_h0[4]; ctx->h[5] = sha256_h0[5];
121 ctx->h[6] = sha256_h0[6]; ctx->h[7] = sha256_h0[7];
122 #endif /* !UNROLL_LOOPS */
123
124 ctx->len = 0;
125 ctx->tot_len = 0;
126 }
127
128
SHA256_transform(VB_SHA256_CTX * ctx,const uint8_t * message,unsigned int block_nb)129 static void SHA256_transform(VB_SHA256_CTX* ctx, const uint8_t* message,
130 unsigned int block_nb) {
131 uint32_t w[64];
132 uint32_t wv[8];
133 uint32_t t1, t2;
134 const unsigned char *sub_block;
135 int i;
136
137 #ifndef UNROLL_LOOPS
138 int j;
139 #endif
140
141 for (i = 0; i < (int) block_nb; i++) {
142 sub_block = message + (i << 6);
143
144 #ifndef UNROLL_LOOPS
145 for (j = 0; j < 16; j++) {
146 PACK32(&sub_block[j << 2], &w[j]);
147 }
148
149 for (j = 16; j < 64; j++) {
150 SHA256_SCR(j);
151 }
152
153 for (j = 0; j < 8; j++) {
154 wv[j] = ctx->h[j];
155 }
156
157 for (j = 0; j < 64; j++) {
158 t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6])
159 + sha256_k[j] + w[j];
160 t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
161 wv[7] = wv[6];
162 wv[6] = wv[5];
163 wv[5] = wv[4];
164 wv[4] = wv[3] + t1;
165 wv[3] = wv[2];
166 wv[2] = wv[1];
167 wv[1] = wv[0];
168 wv[0] = t1 + t2;
169 }
170
171 for (j = 0; j < 8; j++) {
172 ctx->h[j] += wv[j];
173 }
174 #else
175 PACK32(&sub_block[ 0], &w[ 0]); PACK32(&sub_block[ 4], &w[ 1]);
176 PACK32(&sub_block[ 8], &w[ 2]); PACK32(&sub_block[12], &w[ 3]);
177 PACK32(&sub_block[16], &w[ 4]); PACK32(&sub_block[20], &w[ 5]);
178 PACK32(&sub_block[24], &w[ 6]); PACK32(&sub_block[28], &w[ 7]);
179 PACK32(&sub_block[32], &w[ 8]); PACK32(&sub_block[36], &w[ 9]);
180 PACK32(&sub_block[40], &w[10]); PACK32(&sub_block[44], &w[11]);
181 PACK32(&sub_block[48], &w[12]); PACK32(&sub_block[52], &w[13]);
182 PACK32(&sub_block[56], &w[14]); PACK32(&sub_block[60], &w[15]);
183
184 SHA256_SCR(16); SHA256_SCR(17); SHA256_SCR(18); SHA256_SCR(19);
185 SHA256_SCR(20); SHA256_SCR(21); SHA256_SCR(22); SHA256_SCR(23);
186 SHA256_SCR(24); SHA256_SCR(25); SHA256_SCR(26); SHA256_SCR(27);
187 SHA256_SCR(28); SHA256_SCR(29); SHA256_SCR(30); SHA256_SCR(31);
188 SHA256_SCR(32); SHA256_SCR(33); SHA256_SCR(34); SHA256_SCR(35);
189 SHA256_SCR(36); SHA256_SCR(37); SHA256_SCR(38); SHA256_SCR(39);
190 SHA256_SCR(40); SHA256_SCR(41); SHA256_SCR(42); SHA256_SCR(43);
191 SHA256_SCR(44); SHA256_SCR(45); SHA256_SCR(46); SHA256_SCR(47);
192 SHA256_SCR(48); SHA256_SCR(49); SHA256_SCR(50); SHA256_SCR(51);
193 SHA256_SCR(52); SHA256_SCR(53); SHA256_SCR(54); SHA256_SCR(55);
194 SHA256_SCR(56); SHA256_SCR(57); SHA256_SCR(58); SHA256_SCR(59);
195 SHA256_SCR(60); SHA256_SCR(61); SHA256_SCR(62); SHA256_SCR(63);
196
197 wv[0] = ctx->h[0]; wv[1] = ctx->h[1];
198 wv[2] = ctx->h[2]; wv[3] = ctx->h[3];
199 wv[4] = ctx->h[4]; wv[5] = ctx->h[5];
200 wv[6] = ctx->h[6]; wv[7] = ctx->h[7];
201
202 SHA256_EXP(0,1,2,3,4,5,6,7, 0); SHA256_EXP(7,0,1,2,3,4,5,6, 1);
203 SHA256_EXP(6,7,0,1,2,3,4,5, 2); SHA256_EXP(5,6,7,0,1,2,3,4, 3);
204 SHA256_EXP(4,5,6,7,0,1,2,3, 4); SHA256_EXP(3,4,5,6,7,0,1,2, 5);
205 SHA256_EXP(2,3,4,5,6,7,0,1, 6); SHA256_EXP(1,2,3,4,5,6,7,0, 7);
206 SHA256_EXP(0,1,2,3,4,5,6,7, 8); SHA256_EXP(7,0,1,2,3,4,5,6, 9);
207 SHA256_EXP(6,7,0,1,2,3,4,5,10); SHA256_EXP(5,6,7,0,1,2,3,4,11);
208 SHA256_EXP(4,5,6,7,0,1,2,3,12); SHA256_EXP(3,4,5,6,7,0,1,2,13);
209 SHA256_EXP(2,3,4,5,6,7,0,1,14); SHA256_EXP(1,2,3,4,5,6,7,0,15);
210 SHA256_EXP(0,1,2,3,4,5,6,7,16); SHA256_EXP(7,0,1,2,3,4,5,6,17);
211 SHA256_EXP(6,7,0,1,2,3,4,5,18); SHA256_EXP(5,6,7,0,1,2,3,4,19);
212 SHA256_EXP(4,5,6,7,0,1,2,3,20); SHA256_EXP(3,4,5,6,7,0,1,2,21);
213 SHA256_EXP(2,3,4,5,6,7,0,1,22); SHA256_EXP(1,2,3,4,5,6,7,0,23);
214 SHA256_EXP(0,1,2,3,4,5,6,7,24); SHA256_EXP(7,0,1,2,3,4,5,6,25);
215 SHA256_EXP(6,7,0,1,2,3,4,5,26); SHA256_EXP(5,6,7,0,1,2,3,4,27);
216 SHA256_EXP(4,5,6,7,0,1,2,3,28); SHA256_EXP(3,4,5,6,7,0,1,2,29);
217 SHA256_EXP(2,3,4,5,6,7,0,1,30); SHA256_EXP(1,2,3,4,5,6,7,0,31);
218 SHA256_EXP(0,1,2,3,4,5,6,7,32); SHA256_EXP(7,0,1,2,3,4,5,6,33);
219 SHA256_EXP(6,7,0,1,2,3,4,5,34); SHA256_EXP(5,6,7,0,1,2,3,4,35);
220 SHA256_EXP(4,5,6,7,0,1,2,3,36); SHA256_EXP(3,4,5,6,7,0,1,2,37);
221 SHA256_EXP(2,3,4,5,6,7,0,1,38); SHA256_EXP(1,2,3,4,5,6,7,0,39);
222 SHA256_EXP(0,1,2,3,4,5,6,7,40); SHA256_EXP(7,0,1,2,3,4,5,6,41);
223 SHA256_EXP(6,7,0,1,2,3,4,5,42); SHA256_EXP(5,6,7,0,1,2,3,4,43);
224 SHA256_EXP(4,5,6,7,0,1,2,3,44); SHA256_EXP(3,4,5,6,7,0,1,2,45);
225 SHA256_EXP(2,3,4,5,6,7,0,1,46); SHA256_EXP(1,2,3,4,5,6,7,0,47);
226 SHA256_EXP(0,1,2,3,4,5,6,7,48); SHA256_EXP(7,0,1,2,3,4,5,6,49);
227 SHA256_EXP(6,7,0,1,2,3,4,5,50); SHA256_EXP(5,6,7,0,1,2,3,4,51);
228 SHA256_EXP(4,5,6,7,0,1,2,3,52); SHA256_EXP(3,4,5,6,7,0,1,2,53);
229 SHA256_EXP(2,3,4,5,6,7,0,1,54); SHA256_EXP(1,2,3,4,5,6,7,0,55);
230 SHA256_EXP(0,1,2,3,4,5,6,7,56); SHA256_EXP(7,0,1,2,3,4,5,6,57);
231 SHA256_EXP(6,7,0,1,2,3,4,5,58); SHA256_EXP(5,6,7,0,1,2,3,4,59);
232 SHA256_EXP(4,5,6,7,0,1,2,3,60); SHA256_EXP(3,4,5,6,7,0,1,2,61);
233 SHA256_EXP(2,3,4,5,6,7,0,1,62); SHA256_EXP(1,2,3,4,5,6,7,0,63);
234
235 ctx->h[0] += wv[0]; ctx->h[1] += wv[1];
236 ctx->h[2] += wv[2]; ctx->h[3] += wv[3];
237 ctx->h[4] += wv[4]; ctx->h[5] += wv[5];
238 ctx->h[6] += wv[6]; ctx->h[7] += wv[7];
239 #endif /* !UNROLL_LOOPS */
240 }
241 }
242
243
244
SHA256_update(VB_SHA256_CTX * ctx,const uint8_t * data,uint32_t len)245 void SHA256_update(VB_SHA256_CTX* ctx, const uint8_t* data, uint32_t len) {
246 unsigned int block_nb;
247 unsigned int new_len, rem_len, tmp_len;
248 const uint8_t *shifted_data;
249
250 tmp_len = SHA256_BLOCK_SIZE - ctx->len;
251 rem_len = len < tmp_len ? len : tmp_len;
252
253 Memcpy(&ctx->block[ctx->len], data, rem_len);
254
255 if (ctx->len + len < SHA256_BLOCK_SIZE) {
256 ctx->len += len;
257 return;
258 }
259
260 new_len = len - rem_len;
261 block_nb = new_len / SHA256_BLOCK_SIZE;
262
263 shifted_data = data + rem_len;
264
265 SHA256_transform(ctx, ctx->block, 1);
266 SHA256_transform(ctx, shifted_data, block_nb);
267
268 rem_len = new_len % SHA256_BLOCK_SIZE;
269
270 Memcpy(ctx->block, &shifted_data[block_nb << 6],
271 rem_len);
272
273 ctx->len = rem_len;
274 ctx->tot_len += (block_nb + 1) << 6;
275 }
276
SHA256_final(VB_SHA256_CTX * ctx)277 uint8_t* SHA256_final(VB_SHA256_CTX* ctx) {
278 unsigned int block_nb;
279 unsigned int pm_len;
280 unsigned int len_b;
281 #ifndef UNROLL_LOOPS
282 int i;
283 #endif
284
285 block_nb = (1 + ((SHA256_BLOCK_SIZE - 9)
286 < (ctx->len % SHA256_BLOCK_SIZE)));
287
288 len_b = (ctx->tot_len + ctx->len) << 3;
289 pm_len = block_nb << 6;
290
291 Memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
292 ctx->block[ctx->len] = 0x80;
293 UNPACK32(len_b, ctx->block + pm_len - 4);
294
295 SHA256_transform(ctx, ctx->block, block_nb);
296
297 #ifndef UNROLL_LOOPS
298 for (i = 0 ; i < 8; i++) {
299 UNPACK32(ctx->h[i], &ctx->buf[i << 2]);
300 }
301 #else
302 UNPACK32(ctx->h[0], &ctx->buf[ 0]);
303 UNPACK32(ctx->h[1], &ctx->buf[ 4]);
304 UNPACK32(ctx->h[2], &ctx->buf[ 8]);
305 UNPACK32(ctx->h[3], &ctx->buf[12]);
306 UNPACK32(ctx->h[4], &ctx->buf[16]);
307 UNPACK32(ctx->h[5], &ctx->buf[20]);
308 UNPACK32(ctx->h[6], &ctx->buf[24]);
309 UNPACK32(ctx->h[7], &ctx->buf[28]);
310 #endif /* !UNROLL_LOOPS */
311
312 return ctx->buf;
313 }
314
internal_SHA256(const uint8_t * data,uint64_t len,uint8_t * digest)315 uint8_t* internal_SHA256(const uint8_t* data, uint64_t len, uint8_t* digest) {
316 const uint8_t* input_ptr;
317 const uint8_t* result;
318 uint64_t remaining_len;
319 int i;
320 VB_SHA256_CTX ctx;
321
322 SHA256_init(&ctx);
323
324 input_ptr = data;
325 remaining_len = len;
326
327 /* Process data in at most UINT32_MAX byte chunks at a time. */
328 while (remaining_len) {
329 uint32_t block_size;
330 block_size = (uint32_t) ((remaining_len >= UINT32_MAX) ?
331 UINT32_MAX : remaining_len);
332 SHA256_update(&ctx, input_ptr, block_size);
333 remaining_len -= block_size;
334 input_ptr += block_size;
335 }
336
337 result = SHA256_final(&ctx);
338 for (i = 0; i < SHA256_DIGEST_SIZE; ++i) {
339 digest[i] = *result++;
340 }
341 return digest;
342 }
343