1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3 */
4
5 /*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include "avb_sha.h"
39
40 #define SHFR(x, n) (x >> n)
41 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
42 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
43 #define CH(x, y, z) ((x & y) ^ (~x & z))
44 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
45
46 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
47 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
48 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3))
49 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
50
51 #define UNPACK32(x, str) \
52 { \
53 *((str) + 3) = (uint8_t)((x)); \
54 *((str) + 2) = (uint8_t)((x) >> 8); \
55 *((str) + 1) = (uint8_t)((x) >> 16); \
56 *((str) + 0) = (uint8_t)((x) >> 24); \
57 }
58
59 #define UNPACK64(x, str) \
60 { \
61 *((str) + 7) = (uint8_t)x; \
62 *((str) + 6) = (uint8_t)((uint64_t)x >> 8); \
63 *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
64 *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
65 *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
66 *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
67 *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
68 *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
69 }
70
71 #define PACK32(str, x) \
72 { \
73 *(x) = ((uint32_t) * ((str) + 3)) | ((uint32_t) * ((str) + 2) << 8) | \
74 ((uint32_t) * ((str) + 1) << 16) | \
75 ((uint32_t) * ((str) + 0) << 24); \
76 }
77
78 /* Macros used for loops unrolling */
79
80 #define SHA256_SCR(i) \
81 { w[i] = SHA256_F4(w[i - 2]) + w[i - 7] + SHA256_F3(w[i - 15]) + w[i - 16]; }
82
83 #define SHA256_EXP(a, b, c, d, e, f, g, h, j) \
84 { \
85 t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha256_k[j] + \
86 w[j]; \
87 t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
88 wv[d] += t1; \
89 wv[h] = t1 + t2; \
90 }
91
92 static const uint32_t sha256_h0[8] = {0x6a09e667,
93 0xbb67ae85,
94 0x3c6ef372,
95 0xa54ff53a,
96 0x510e527f,
97 0x9b05688c,
98 0x1f83d9ab,
99 0x5be0cd19};
100
101 static const uint32_t sha256_k[64] = {
102 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
103 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
104 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
105 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
106 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
107 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
108 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
109 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
110 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
111 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
112 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
113
114 /* SHA-256 implementation */
avb_sha256_init(AvbSHA256Ctx * ctx)115 void avb_sha256_init(AvbSHA256Ctx* ctx) {
116 #ifndef UNROLL_LOOPS
117 int i;
118 for (i = 0; i < 8; i++) {
119 ctx->h[i] = sha256_h0[i];
120 }
121 #else
122 ctx->h[0] = sha256_h0[0];
123 ctx->h[1] = sha256_h0[1];
124 ctx->h[2] = sha256_h0[2];
125 ctx->h[3] = sha256_h0[3];
126 ctx->h[4] = sha256_h0[4];
127 ctx->h[5] = sha256_h0[5];
128 ctx->h[6] = sha256_h0[6];
129 ctx->h[7] = sha256_h0[7];
130 #endif /* !UNROLL_LOOPS */
131
132 ctx->len = 0;
133 ctx->tot_len = 0;
134 }
135
SHA256_transform(AvbSHA256Ctx * ctx,const uint8_t * message,size_t block_nb)136 static void SHA256_transform(AvbSHA256Ctx* ctx,
137 const uint8_t* message,
138 size_t block_nb) {
139 uint32_t w[64];
140 uint32_t wv[8];
141 uint32_t t1, t2;
142 const unsigned char* sub_block;
143 size_t i;
144
145 #ifndef UNROLL_LOOPS
146 size_t j;
147 #endif
148
149 for (i = 0; i < block_nb; i++) {
150 sub_block = message + (i << 6);
151
152 #ifndef UNROLL_LOOPS
153 for (j = 0; j < 16; j++) {
154 PACK32(&sub_block[j << 2], &w[j]);
155 }
156
157 for (j = 16; j < 64; j++) {
158 SHA256_SCR(j);
159 }
160
161 for (j = 0; j < 8; j++) {
162 wv[j] = ctx->h[j];
163 }
164
165 for (j = 0; j < 64; j++) {
166 t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha256_k[j] +
167 w[j];
168 t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
169 wv[7] = wv[6];
170 wv[6] = wv[5];
171 wv[5] = wv[4];
172 wv[4] = wv[3] + t1;
173 wv[3] = wv[2];
174 wv[2] = wv[1];
175 wv[1] = wv[0];
176 wv[0] = t1 + t2;
177 }
178
179 for (j = 0; j < 8; j++) {
180 ctx->h[j] += wv[j];
181 }
182 #else
183 PACK32(&sub_block[0], &w[0]);
184 PACK32(&sub_block[4], &w[1]);
185 PACK32(&sub_block[8], &w[2]);
186 PACK32(&sub_block[12], &w[3]);
187 PACK32(&sub_block[16], &w[4]);
188 PACK32(&sub_block[20], &w[5]);
189 PACK32(&sub_block[24], &w[6]);
190 PACK32(&sub_block[28], &w[7]);
191 PACK32(&sub_block[32], &w[8]);
192 PACK32(&sub_block[36], &w[9]);
193 PACK32(&sub_block[40], &w[10]);
194 PACK32(&sub_block[44], &w[11]);
195 PACK32(&sub_block[48], &w[12]);
196 PACK32(&sub_block[52], &w[13]);
197 PACK32(&sub_block[56], &w[14]);
198 PACK32(&sub_block[60], &w[15]);
199
200 SHA256_SCR(16);
201 SHA256_SCR(17);
202 SHA256_SCR(18);
203 SHA256_SCR(19);
204 SHA256_SCR(20);
205 SHA256_SCR(21);
206 SHA256_SCR(22);
207 SHA256_SCR(23);
208 SHA256_SCR(24);
209 SHA256_SCR(25);
210 SHA256_SCR(26);
211 SHA256_SCR(27);
212 SHA256_SCR(28);
213 SHA256_SCR(29);
214 SHA256_SCR(30);
215 SHA256_SCR(31);
216 SHA256_SCR(32);
217 SHA256_SCR(33);
218 SHA256_SCR(34);
219 SHA256_SCR(35);
220 SHA256_SCR(36);
221 SHA256_SCR(37);
222 SHA256_SCR(38);
223 SHA256_SCR(39);
224 SHA256_SCR(40);
225 SHA256_SCR(41);
226 SHA256_SCR(42);
227 SHA256_SCR(43);
228 SHA256_SCR(44);
229 SHA256_SCR(45);
230 SHA256_SCR(46);
231 SHA256_SCR(47);
232 SHA256_SCR(48);
233 SHA256_SCR(49);
234 SHA256_SCR(50);
235 SHA256_SCR(51);
236 SHA256_SCR(52);
237 SHA256_SCR(53);
238 SHA256_SCR(54);
239 SHA256_SCR(55);
240 SHA256_SCR(56);
241 SHA256_SCR(57);
242 SHA256_SCR(58);
243 SHA256_SCR(59);
244 SHA256_SCR(60);
245 SHA256_SCR(61);
246 SHA256_SCR(62);
247 SHA256_SCR(63);
248
249 wv[0] = ctx->h[0];
250 wv[1] = ctx->h[1];
251 wv[2] = ctx->h[2];
252 wv[3] = ctx->h[3];
253 wv[4] = ctx->h[4];
254 wv[5] = ctx->h[5];
255 wv[6] = ctx->h[6];
256 wv[7] = ctx->h[7];
257
258 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 0);
259 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 1);
260 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 2);
261 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 3);
262 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 4);
263 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 5);
264 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 6);
265 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 7);
266 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 8);
267 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 9);
268 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 10);
269 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 11);
270 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 12);
271 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 13);
272 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 14);
273 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 15);
274 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 16);
275 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 17);
276 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 18);
277 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 19);
278 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 20);
279 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 21);
280 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 22);
281 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 23);
282 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 24);
283 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 25);
284 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 26);
285 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 27);
286 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 28);
287 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 29);
288 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 30);
289 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 31);
290 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 32);
291 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 33);
292 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 34);
293 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 35);
294 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 36);
295 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 37);
296 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 38);
297 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 39);
298 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 40);
299 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 41);
300 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 42);
301 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 43);
302 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 44);
303 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 45);
304 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 46);
305 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 47);
306 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 48);
307 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 49);
308 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 50);
309 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 51);
310 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 52);
311 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 53);
312 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 54);
313 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 55);
314 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 56);
315 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 57);
316 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 58);
317 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 59);
318 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 60);
319 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 61);
320 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 62);
321 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 63);
322
323 ctx->h[0] += wv[0];
324 ctx->h[1] += wv[1];
325 ctx->h[2] += wv[2];
326 ctx->h[3] += wv[3];
327 ctx->h[4] += wv[4];
328 ctx->h[5] += wv[5];
329 ctx->h[6] += wv[6];
330 ctx->h[7] += wv[7];
331 #endif /* !UNROLL_LOOPS */
332 }
333 }
334
avb_sha256_update(AvbSHA256Ctx * ctx,const uint8_t * data,size_t len)335 void avb_sha256_update(AvbSHA256Ctx* ctx, const uint8_t* data, size_t len) {
336 size_t block_nb;
337 size_t new_len, rem_len, tmp_len;
338 const uint8_t* shifted_data;
339
340 tmp_len = AVB_SHA256_BLOCK_SIZE - ctx->len;
341 rem_len = len < tmp_len ? len : tmp_len;
342
343 avb_memcpy(&ctx->block[ctx->len], data, rem_len);
344
345 if (ctx->len + len < AVB_SHA256_BLOCK_SIZE) {
346 ctx->len += len;
347 return;
348 }
349
350 new_len = len - rem_len;
351 block_nb = new_len / AVB_SHA256_BLOCK_SIZE;
352
353 shifted_data = data + rem_len;
354
355 SHA256_transform(ctx, ctx->block, 1);
356 SHA256_transform(ctx, shifted_data, block_nb);
357
358 rem_len = new_len % AVB_SHA256_BLOCK_SIZE;
359
360 avb_memcpy(ctx->block, &shifted_data[block_nb << 6], rem_len);
361
362 ctx->len = rem_len;
363 ctx->tot_len += (block_nb + 1) << 6;
364 }
365
avb_sha256_final(AvbSHA256Ctx * ctx)366 uint8_t* avb_sha256_final(AvbSHA256Ctx* ctx) {
367 size_t block_nb;
368 size_t pm_len;
369 uint64_t len_b;
370 #ifndef UNROLL_LOOPS
371 size_t i;
372 #endif
373
374 block_nb =
375 (1 + ((AVB_SHA256_BLOCK_SIZE - 9) < (ctx->len % AVB_SHA256_BLOCK_SIZE)));
376
377 len_b = (ctx->tot_len + ctx->len) << 3;
378 pm_len = block_nb << 6;
379
380 avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
381 ctx->block[ctx->len] = 0x80;
382 UNPACK64(len_b, ctx->block + pm_len - 8);
383
384 SHA256_transform(ctx, ctx->block, block_nb);
385
386 #ifndef UNROLL_LOOPS
387 for (i = 0; i < 8; i++) {
388 UNPACK32(ctx->h[i], &ctx->buf[i << 2]);
389 }
390 #else
391 UNPACK32(ctx->h[0], &ctx->buf[0]);
392 UNPACK32(ctx->h[1], &ctx->buf[4]);
393 UNPACK32(ctx->h[2], &ctx->buf[8]);
394 UNPACK32(ctx->h[3], &ctx->buf[12]);
395 UNPACK32(ctx->h[4], &ctx->buf[16]);
396 UNPACK32(ctx->h[5], &ctx->buf[20]);
397 UNPACK32(ctx->h[6], &ctx->buf[24]);
398 UNPACK32(ctx->h[7], &ctx->buf[28]);
399 #endif /* !UNROLL_LOOPS */
400
401 return ctx->buf;
402 }
403