1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3 */
4
5 /*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <libavb/avb_sha.h>
39
40 #include "avb_crypto_ops_impl.h"
41
42 #define SHFR(x, n) (x >> n)
43 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
44 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
45 #define CH(x, y, z) ((x & y) ^ (~x & z))
46 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
47
48 #define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
49 #define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
50 #define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7))
51 #define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6))
52
53 #define UNPACK32(x, str) \
54 { \
55 *((str) + 3) = (uint8_t)((x)); \
56 *((str) + 2) = (uint8_t)((x) >> 8); \
57 *((str) + 1) = (uint8_t)((x) >> 16); \
58 *((str) + 0) = (uint8_t)((x) >> 24); \
59 }
60
61 #define UNPACK64(x, str) \
62 { \
63 *((str) + 7) = (uint8_t)x; \
64 *((str) + 6) = (uint8_t)((uint64_t)x >> 8); \
65 *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
66 *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
67 *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
68 *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
69 *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
70 *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
71 }
72
73 #define PACK64(str, x) \
74 { \
75 *(x) = \
76 ((uint64_t) * ((str) + 7)) | ((uint64_t) * ((str) + 6) << 8) | \
77 ((uint64_t) * ((str) + 5) << 16) | ((uint64_t) * ((str) + 4) << 24) | \
78 ((uint64_t) * ((str) + 3) << 32) | ((uint64_t) * ((str) + 2) << 40) | \
79 ((uint64_t) * ((str) + 1) << 48) | ((uint64_t) * ((str) + 0) << 56); \
80 }
81
82 /* Macros used for loops unrolling */
83
84 #define SHA512_SCR(i) \
85 { w[i] = SHA512_F4(w[i - 2]) + w[i - 7] + SHA512_F3(w[i - 15]) + w[i - 16]; }
86
87 #define SHA512_EXP(a, b, c, d, e, f, g, h, j) \
88 { \
89 t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha512_k[j] + \
90 w[j]; \
91 t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
92 wv[d] += t1; \
93 wv[h] = t1 + t2; \
94 }
95
96 static const uint64_t sha512_h0[8] = {0x6a09e667f3bcc908ULL,
97 0xbb67ae8584caa73bULL,
98 0x3c6ef372fe94f82bULL,
99 0xa54ff53a5f1d36f1ULL,
100 0x510e527fade682d1ULL,
101 0x9b05688c2b3e6c1fULL,
102 0x1f83d9abfb41bd6bULL,
103 0x5be0cd19137e2179ULL};
104
105 static const uint64_t sha512_k[80] = {
106 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
107 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
108 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
109 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
110 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
111 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
112 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
113 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
114 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
115 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
116 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
117 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
118 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
119 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
120 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
121 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
122 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
123 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
124 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
125 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
126 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
127 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
128 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
129 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
130 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
131 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
132 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL};
133
134 /* SHA-512 implementation */
135
avb_sha512_init(AvbSHA512Ctx * avb_ctx)136 void avb_sha512_init(AvbSHA512Ctx* avb_ctx) {
137 AvbSHA512ImplCtx* ctx = (AvbSHA512ImplCtx*)avb_ctx->reserved;
138 #ifdef UNROLL_LOOPS_SHA512
139 ctx->h[0] = sha512_h0[0];
140 ctx->h[1] = sha512_h0[1];
141 ctx->h[2] = sha512_h0[2];
142 ctx->h[3] = sha512_h0[3];
143 ctx->h[4] = sha512_h0[4];
144 ctx->h[5] = sha512_h0[5];
145 ctx->h[6] = sha512_h0[6];
146 ctx->h[7] = sha512_h0[7];
147 #else
148 int i;
149
150 for (i = 0; i < 8; i++)
151 ctx->h[i] = sha512_h0[i];
152 #endif /* UNROLL_LOOPS_SHA512 */
153
154 ctx->len = 0;
155 ctx->tot_len = 0;
156 }
157
SHA512_transform(AvbSHA512ImplCtx * ctx,const uint8_t * message,size_t block_nb)158 static void SHA512_transform(AvbSHA512ImplCtx* ctx,
159 const uint8_t* message,
160 size_t block_nb) {
161 uint64_t w[80];
162 uint64_t wv[8];
163 uint64_t t1, t2;
164 const uint8_t* sub_block;
165 size_t i, j;
166
167 for (i = 0; i < block_nb; i++) {
168 sub_block = message + (i << 7);
169
170 #ifdef UNROLL_LOOPS_SHA512
171 PACK64(&sub_block[0], &w[0]);
172 PACK64(&sub_block[8], &w[1]);
173 PACK64(&sub_block[16], &w[2]);
174 PACK64(&sub_block[24], &w[3]);
175 PACK64(&sub_block[32], &w[4]);
176 PACK64(&sub_block[40], &w[5]);
177 PACK64(&sub_block[48], &w[6]);
178 PACK64(&sub_block[56], &w[7]);
179 PACK64(&sub_block[64], &w[8]);
180 PACK64(&sub_block[72], &w[9]);
181 PACK64(&sub_block[80], &w[10]);
182 PACK64(&sub_block[88], &w[11]);
183 PACK64(&sub_block[96], &w[12]);
184 PACK64(&sub_block[104], &w[13]);
185 PACK64(&sub_block[112], &w[14]);
186 PACK64(&sub_block[120], &w[15]);
187
188 SHA512_SCR(16);
189 SHA512_SCR(17);
190 SHA512_SCR(18);
191 SHA512_SCR(19);
192 SHA512_SCR(20);
193 SHA512_SCR(21);
194 SHA512_SCR(22);
195 SHA512_SCR(23);
196 SHA512_SCR(24);
197 SHA512_SCR(25);
198 SHA512_SCR(26);
199 SHA512_SCR(27);
200 SHA512_SCR(28);
201 SHA512_SCR(29);
202 SHA512_SCR(30);
203 SHA512_SCR(31);
204 SHA512_SCR(32);
205 SHA512_SCR(33);
206 SHA512_SCR(34);
207 SHA512_SCR(35);
208 SHA512_SCR(36);
209 SHA512_SCR(37);
210 SHA512_SCR(38);
211 SHA512_SCR(39);
212 SHA512_SCR(40);
213 SHA512_SCR(41);
214 SHA512_SCR(42);
215 SHA512_SCR(43);
216 SHA512_SCR(44);
217 SHA512_SCR(45);
218 SHA512_SCR(46);
219 SHA512_SCR(47);
220 SHA512_SCR(48);
221 SHA512_SCR(49);
222 SHA512_SCR(50);
223 SHA512_SCR(51);
224 SHA512_SCR(52);
225 SHA512_SCR(53);
226 SHA512_SCR(54);
227 SHA512_SCR(55);
228 SHA512_SCR(56);
229 SHA512_SCR(57);
230 SHA512_SCR(58);
231 SHA512_SCR(59);
232 SHA512_SCR(60);
233 SHA512_SCR(61);
234 SHA512_SCR(62);
235 SHA512_SCR(63);
236 SHA512_SCR(64);
237 SHA512_SCR(65);
238 SHA512_SCR(66);
239 SHA512_SCR(67);
240 SHA512_SCR(68);
241 SHA512_SCR(69);
242 SHA512_SCR(70);
243 SHA512_SCR(71);
244 SHA512_SCR(72);
245 SHA512_SCR(73);
246 SHA512_SCR(74);
247 SHA512_SCR(75);
248 SHA512_SCR(76);
249 SHA512_SCR(77);
250 SHA512_SCR(78);
251 SHA512_SCR(79);
252
253 wv[0] = ctx->h[0];
254 wv[1] = ctx->h[1];
255 wv[2] = ctx->h[2];
256 wv[3] = ctx->h[3];
257 wv[4] = ctx->h[4];
258 wv[5] = ctx->h[5];
259 wv[6] = ctx->h[6];
260 wv[7] = ctx->h[7];
261
262 j = 0;
263
264 do {
265 SHA512_EXP(0, 1, 2, 3, 4, 5, 6, 7, j);
266 j++;
267 SHA512_EXP(7, 0, 1, 2, 3, 4, 5, 6, j);
268 j++;
269 SHA512_EXP(6, 7, 0, 1, 2, 3, 4, 5, j);
270 j++;
271 SHA512_EXP(5, 6, 7, 0, 1, 2, 3, 4, j);
272 j++;
273 SHA512_EXP(4, 5, 6, 7, 0, 1, 2, 3, j);
274 j++;
275 SHA512_EXP(3, 4, 5, 6, 7, 0, 1, 2, j);
276 j++;
277 SHA512_EXP(2, 3, 4, 5, 6, 7, 0, 1, j);
278 j++;
279 SHA512_EXP(1, 2, 3, 4, 5, 6, 7, 0, j);
280 j++;
281 } while (j < 80);
282
283 ctx->h[0] += wv[0];
284 ctx->h[1] += wv[1];
285 ctx->h[2] += wv[2];
286 ctx->h[3] += wv[3];
287 ctx->h[4] += wv[4];
288 ctx->h[5] += wv[5];
289 ctx->h[6] += wv[6];
290 ctx->h[7] += wv[7];
291 #else
292 for (j = 0; j < 16; j++) {
293 PACK64(&sub_block[j << 3], &w[j]);
294 }
295
296 for (j = 16; j < 80; j++) {
297 SHA512_SCR(j);
298 }
299
300 for (j = 0; j < 8; j++) {
301 wv[j] = ctx->h[j];
302 }
303
304 for (j = 0; j < 80; j++) {
305 t1 = wv[7] + SHA512_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha512_k[j] +
306 w[j];
307 t2 = SHA512_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
308 wv[7] = wv[6];
309 wv[6] = wv[5];
310 wv[5] = wv[4];
311 wv[4] = wv[3] + t1;
312 wv[3] = wv[2];
313 wv[2] = wv[1];
314 wv[1] = wv[0];
315 wv[0] = t1 + t2;
316 }
317
318 for (j = 0; j < 8; j++)
319 ctx->h[j] += wv[j];
320 #endif /* UNROLL_LOOPS_SHA512 */
321 }
322 }
323
avb_sha512_update(AvbSHA512Ctx * avb_ctx,const uint8_t * data,size_t len)324 void avb_sha512_update(AvbSHA512Ctx* avb_ctx, const uint8_t* data, size_t len) {
325 AvbSHA512ImplCtx* ctx = (AvbSHA512ImplCtx*)avb_ctx->reserved;
326 size_t block_nb;
327 size_t new_len, rem_len, tmp_len;
328 const uint8_t* shifted_data;
329
330 tmp_len = AVB_SHA512_BLOCK_SIZE - ctx->len;
331 rem_len = len < tmp_len ? len : tmp_len;
332
333 avb_memcpy(&ctx->block[ctx->len], data, rem_len);
334
335 if (ctx->len + len < AVB_SHA512_BLOCK_SIZE) {
336 ctx->len += len;
337 return;
338 }
339
340 new_len = len - rem_len;
341 block_nb = new_len / AVB_SHA512_BLOCK_SIZE;
342
343 shifted_data = data + rem_len;
344
345 SHA512_transform(ctx, ctx->block, 1);
346 SHA512_transform(ctx, shifted_data, block_nb);
347
348 rem_len = new_len % AVB_SHA512_BLOCK_SIZE;
349
350 avb_memcpy(ctx->block, &shifted_data[block_nb << 7], rem_len);
351
352 ctx->len = rem_len;
353 ctx->tot_len += (block_nb + 1) << 7;
354 }
355
avb_sha512_final(AvbSHA512Ctx * avb_ctx)356 uint8_t* avb_sha512_final(AvbSHA512Ctx* avb_ctx) {
357 AvbSHA512ImplCtx* ctx = (AvbSHA512ImplCtx*)avb_ctx->reserved;
358 size_t block_nb;
359 size_t pm_len;
360 uint64_t len_b;
361
362 #ifndef UNROLL_LOOPS_SHA512
363 size_t i;
364 #endif
365
366 block_nb =
367 1 + ((AVB_SHA512_BLOCK_SIZE - 17) < (ctx->len % AVB_SHA512_BLOCK_SIZE));
368
369 len_b = (ctx->tot_len + ctx->len) << 3;
370 pm_len = block_nb << 7;
371
372 avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
373 ctx->block[ctx->len] = 0x80;
374 UNPACK64(len_b, ctx->block + pm_len - 8);
375
376 SHA512_transform(ctx, ctx->block, block_nb);
377
378 #ifdef UNROLL_LOOPS_SHA512
379 UNPACK64(ctx->h[0], &avb_ctx->buf[0]);
380 UNPACK64(ctx->h[1], &avb_ctx->buf[8]);
381 UNPACK64(ctx->h[2], &avb_ctx->buf[16]);
382 UNPACK64(ctx->h[3], &avb_ctx->buf[24]);
383 UNPACK64(ctx->h[4], &avb_ctx->buf[32]);
384 UNPACK64(ctx->h[5], &avb_ctx->buf[40]);
385 UNPACK64(ctx->h[6], &avb_ctx->buf[48]);
386 UNPACK64(ctx->h[7], &avb_ctx->buf[56]);
387 #else
388 for (i = 0; i < 8; i++)
389 UNPACK64(ctx->h[i], &avb_ctx->buf[i << 3]);
390 #endif /* UNROLL_LOOPS_SHA512 */
391
392 return avb_ctx->buf;
393 }
394