1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3 */
4
5 /*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <libavb/avb_sha.h>
39
40 #include "avb_crypto_ops_impl.h"
41
42 #define SHFR(x, n) (x >> n)
43 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
44 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
45 #define CH(x, y, z) ((x & y) ^ (~x & z))
46 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
47
48 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
49 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
50 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3))
51 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
52
53 #define UNPACK32(x, str) \
54 { \
55 *((str) + 3) = (uint8_t)((x)); \
56 *((str) + 2) = (uint8_t)((x) >> 8); \
57 *((str) + 1) = (uint8_t)((x) >> 16); \
58 *((str) + 0) = (uint8_t)((x) >> 24); \
59 }
60
61 #define UNPACK64(x, str) \
62 { \
63 *((str) + 7) = (uint8_t)x; \
64 *((str) + 6) = (uint8_t)((uint64_t)x >> 8); \
65 *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
66 *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
67 *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
68 *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
69 *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
70 *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
71 }
72
73 #define PACK32(str, x) \
74 { \
75 *(x) = ((uint32_t) * ((str) + 3)) | ((uint32_t) * ((str) + 2) << 8) | \
76 ((uint32_t) * ((str) + 1) << 16) | \
77 ((uint32_t) * ((str) + 0) << 24); \
78 }
79
80 /* Macros used for loops unrolling */
81
82 #define SHA256_SCR(i) \
83 { w[i] = SHA256_F4(w[i - 2]) + w[i - 7] + SHA256_F3(w[i - 15]) + w[i - 16]; }
84
85 #define SHA256_EXP(a, b, c, d, e, f, g, h, j) \
86 { \
87 t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha256_k[j] + \
88 w[j]; \
89 t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
90 wv[d] += t1; \
91 wv[h] = t1 + t2; \
92 }
93
94 static const uint32_t sha256_h0[8] = {0x6a09e667,
95 0xbb67ae85,
96 0x3c6ef372,
97 0xa54ff53a,
98 0x510e527f,
99 0x9b05688c,
100 0x1f83d9ab,
101 0x5be0cd19};
102
103 static const uint32_t sha256_k[64] = {
104 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
105 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
106 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
107 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
108 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
109 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
110 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
111 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
112 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
113 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
114 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
115
116 /* SHA-256 implementation */
avb_sha256_init(AvbSHA256Ctx * avb_ctx)117 void avb_sha256_init(AvbSHA256Ctx* avb_ctx) {
118 AvbSHA256ImplCtx* ctx = (AvbSHA256ImplCtx*)avb_ctx->reserved;
119 #ifndef UNROLL_LOOPS
120 int i;
121 for (i = 0; i < 8; i++) {
122 ctx->h[i] = sha256_h0[i];
123 }
124 #else
125 ctx->h[0] = sha256_h0[0];
126 ctx->h[1] = sha256_h0[1];
127 ctx->h[2] = sha256_h0[2];
128 ctx->h[3] = sha256_h0[3];
129 ctx->h[4] = sha256_h0[4];
130 ctx->h[5] = sha256_h0[5];
131 ctx->h[6] = sha256_h0[6];
132 ctx->h[7] = sha256_h0[7];
133 #endif /* !UNROLL_LOOPS */
134
135 ctx->len = 0;
136 ctx->tot_len = 0;
137 }
138
SHA256_transform(AvbSHA256ImplCtx * ctx,const uint8_t * message,size_t block_nb)139 static void SHA256_transform(AvbSHA256ImplCtx* ctx,
140 const uint8_t* message,
141 size_t block_nb) {
142 uint32_t w[64];
143 uint32_t wv[8];
144 uint32_t t1, t2;
145 const unsigned char* sub_block;
146 size_t i;
147
148 #ifndef UNROLL_LOOPS
149 size_t j;
150 #endif
151
152 for (i = 0; i < block_nb; i++) {
153 sub_block = message + (i << 6);
154
155 #ifndef UNROLL_LOOPS
156 for (j = 0; j < 16; j++) {
157 PACK32(&sub_block[j << 2], &w[j]);
158 }
159
160 for (j = 16; j < 64; j++) {
161 SHA256_SCR(j);
162 }
163
164 for (j = 0; j < 8; j++) {
165 wv[j] = ctx->h[j];
166 }
167
168 for (j = 0; j < 64; j++) {
169 t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha256_k[j] +
170 w[j];
171 t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
172 wv[7] = wv[6];
173 wv[6] = wv[5];
174 wv[5] = wv[4];
175 wv[4] = wv[3] + t1;
176 wv[3] = wv[2];
177 wv[2] = wv[1];
178 wv[1] = wv[0];
179 wv[0] = t1 + t2;
180 }
181
182 for (j = 0; j < 8; j++) {
183 ctx->h[j] += wv[j];
184 }
185 #else
186 PACK32(&sub_block[0], &w[0]);
187 PACK32(&sub_block[4], &w[1]);
188 PACK32(&sub_block[8], &w[2]);
189 PACK32(&sub_block[12], &w[3]);
190 PACK32(&sub_block[16], &w[4]);
191 PACK32(&sub_block[20], &w[5]);
192 PACK32(&sub_block[24], &w[6]);
193 PACK32(&sub_block[28], &w[7]);
194 PACK32(&sub_block[32], &w[8]);
195 PACK32(&sub_block[36], &w[9]);
196 PACK32(&sub_block[40], &w[10]);
197 PACK32(&sub_block[44], &w[11]);
198 PACK32(&sub_block[48], &w[12]);
199 PACK32(&sub_block[52], &w[13]);
200 PACK32(&sub_block[56], &w[14]);
201 PACK32(&sub_block[60], &w[15]);
202
203 SHA256_SCR(16);
204 SHA256_SCR(17);
205 SHA256_SCR(18);
206 SHA256_SCR(19);
207 SHA256_SCR(20);
208 SHA256_SCR(21);
209 SHA256_SCR(22);
210 SHA256_SCR(23);
211 SHA256_SCR(24);
212 SHA256_SCR(25);
213 SHA256_SCR(26);
214 SHA256_SCR(27);
215 SHA256_SCR(28);
216 SHA256_SCR(29);
217 SHA256_SCR(30);
218 SHA256_SCR(31);
219 SHA256_SCR(32);
220 SHA256_SCR(33);
221 SHA256_SCR(34);
222 SHA256_SCR(35);
223 SHA256_SCR(36);
224 SHA256_SCR(37);
225 SHA256_SCR(38);
226 SHA256_SCR(39);
227 SHA256_SCR(40);
228 SHA256_SCR(41);
229 SHA256_SCR(42);
230 SHA256_SCR(43);
231 SHA256_SCR(44);
232 SHA256_SCR(45);
233 SHA256_SCR(46);
234 SHA256_SCR(47);
235 SHA256_SCR(48);
236 SHA256_SCR(49);
237 SHA256_SCR(50);
238 SHA256_SCR(51);
239 SHA256_SCR(52);
240 SHA256_SCR(53);
241 SHA256_SCR(54);
242 SHA256_SCR(55);
243 SHA256_SCR(56);
244 SHA256_SCR(57);
245 SHA256_SCR(58);
246 SHA256_SCR(59);
247 SHA256_SCR(60);
248 SHA256_SCR(61);
249 SHA256_SCR(62);
250 SHA256_SCR(63);
251
252 wv[0] = ctx->h[0];
253 wv[1] = ctx->h[1];
254 wv[2] = ctx->h[2];
255 wv[3] = ctx->h[3];
256 wv[4] = ctx->h[4];
257 wv[5] = ctx->h[5];
258 wv[6] = ctx->h[6];
259 wv[7] = ctx->h[7];
260
261 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 0);
262 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 1);
263 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 2);
264 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 3);
265 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 4);
266 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 5);
267 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 6);
268 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 7);
269 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 8);
270 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 9);
271 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 10);
272 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 11);
273 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 12);
274 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 13);
275 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 14);
276 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 15);
277 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 16);
278 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 17);
279 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 18);
280 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 19);
281 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 20);
282 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 21);
283 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 22);
284 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 23);
285 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 24);
286 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 25);
287 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 26);
288 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 27);
289 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 28);
290 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 29);
291 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 30);
292 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 31);
293 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 32);
294 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 33);
295 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 34);
296 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 35);
297 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 36);
298 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 37);
299 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 38);
300 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 39);
301 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 40);
302 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 41);
303 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 42);
304 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 43);
305 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 44);
306 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 45);
307 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 46);
308 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 47);
309 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 48);
310 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 49);
311 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 50);
312 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 51);
313 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 52);
314 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 53);
315 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 54);
316 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 55);
317 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 56);
318 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 57);
319 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 58);
320 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 59);
321 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 60);
322 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 61);
323 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 62);
324 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 63);
325
326 ctx->h[0] += wv[0];
327 ctx->h[1] += wv[1];
328 ctx->h[2] += wv[2];
329 ctx->h[3] += wv[3];
330 ctx->h[4] += wv[4];
331 ctx->h[5] += wv[5];
332 ctx->h[6] += wv[6];
333 ctx->h[7] += wv[7];
334 #endif /* !UNROLL_LOOPS */
335 }
336 }
337
avb_sha256_update(AvbSHA256Ctx * avb_ctx,const uint8_t * data,size_t len)338 void avb_sha256_update(AvbSHA256Ctx* avb_ctx, const uint8_t* data, size_t len) {
339 AvbSHA256ImplCtx* ctx = (AvbSHA256ImplCtx*)avb_ctx->reserved;
340 size_t block_nb;
341 size_t new_len, rem_len, tmp_len;
342 const uint8_t* shifted_data;
343
344 tmp_len = AVB_SHA256_BLOCK_SIZE - ctx->len;
345 rem_len = len < tmp_len ? len : tmp_len;
346
347 avb_memcpy(&ctx->block[ctx->len], data, rem_len);
348
349 if (ctx->len + len < AVB_SHA256_BLOCK_SIZE) {
350 ctx->len += len;
351 return;
352 }
353
354 new_len = len - rem_len;
355 block_nb = new_len / AVB_SHA256_BLOCK_SIZE;
356
357 shifted_data = data + rem_len;
358
359 SHA256_transform(ctx, ctx->block, 1);
360 SHA256_transform(ctx, shifted_data, block_nb);
361
362 rem_len = new_len % AVB_SHA256_BLOCK_SIZE;
363
364 avb_memcpy(ctx->block, &shifted_data[block_nb << 6], rem_len);
365
366 ctx->len = rem_len;
367 ctx->tot_len += (block_nb + 1) << 6;
368 }
369
avb_sha256_final(AvbSHA256Ctx * avb_ctx)370 uint8_t* avb_sha256_final(AvbSHA256Ctx* avb_ctx) {
371 AvbSHA256ImplCtx* ctx = (AvbSHA256ImplCtx*)avb_ctx->reserved;
372 size_t block_nb;
373 size_t pm_len;
374 uint64_t len_b;
375 #ifndef UNROLL_LOOPS
376 size_t i;
377 #endif
378
379 block_nb =
380 (1 + ((AVB_SHA256_BLOCK_SIZE - 9) < (ctx->len % AVB_SHA256_BLOCK_SIZE)));
381
382 len_b = (ctx->tot_len + ctx->len) << 3;
383 pm_len = block_nb << 6;
384
385 avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
386 ctx->block[ctx->len] = 0x80;
387 UNPACK64(len_b, ctx->block + pm_len - 8);
388
389 SHA256_transform(ctx, ctx->block, block_nb);
390
391 #ifndef UNROLL_LOOPS
392 for (i = 0; i < 8; i++) {
393 UNPACK32(ctx->h[i], &avb_ctx->buf[i << 2]);
394 }
395 #else
396 UNPACK32(ctx->h[0], &avb_ctx->buf[0]);
397 UNPACK32(ctx->h[1], &avb_ctx->buf[4]);
398 UNPACK32(ctx->h[2], &avb_ctx->buf[8]);
399 UNPACK32(ctx->h[3], &avb_ctx->buf[12]);
400 UNPACK32(ctx->h[4], &avb_ctx->buf[16]);
401 UNPACK32(ctx->h[5], &avb_ctx->buf[20]);
402 UNPACK32(ctx->h[6], &avb_ctx->buf[24]);
403 UNPACK32(ctx->h[7], &avb_ctx->buf[28]);
404 #endif /* !UNROLL_LOOPS */
405
406 return avb_ctx->buf;
407 }
408