• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*############################################################################
2 # Copyright 2017 Intel Corporation
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 ############################################################################*/
16 
17 /*
18  *  Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
19  *
20  *  Redistribution and use in source and binary forms, with or without
21  *  modification, are permitted provided that the following conditions are met:
22  *
23  *    - Redistributions of source code must retain the above copyright notice,
24  *     this list of conditions and the following disclaimer.
25  *
26  *    - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in the
28  *    documentation and/or other materials provided with the distribution.
29  *
30  *    - Neither the name of Intel Corporation nor the names of its contributors
31  *    may be used to endorse or promote products derived from this software
32  *    without specific prior written permission.
33  *
34  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
35  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
38  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44  *  POSSIBILITY OF SUCH DAMAGE.
45  */
46 /// A SHA-256 implementation.
47 /*! \file */
48 
49 #include "epid/member/tiny/math/sha256.h"
50 
51 #include "epid/member/tiny/stdlib/tiny_stdlib.h"
52 
53 static void sha256_compress(unsigned int* iv, const uint8_t* data);
54 
tc_sha256_init(sha256_state * s)55 void tc_sha256_init(sha256_state* s) {
56   /*
57    * Setting the initial state values.
58    * These values correspond to the first 32 bits of the fractional parts
59    * of the square roots of the first 8 primes: 2, 3, 5, 7, 11, 13, 17
60    * and 19.
61    */
62   (void)memset((uint8_t*)s, 0x00, sizeof(*s));
63   s->iv[0] = 0x6a09e667;
64   s->iv[1] = 0xbb67ae85;
65   s->iv[2] = 0x3c6ef372;
66   s->iv[3] = 0xa54ff53a;
67   s->iv[4] = 0x510e527f;
68   s->iv[5] = 0x9b05688c;
69   s->iv[6] = 0x1f83d9ab;
70   s->iv[7] = 0x5be0cd19;
71 }
72 
tc_sha256_update(sha256_state * s,const uint8_t * data,size_t datalen)73 void tc_sha256_update(sha256_state* s, const uint8_t* data, size_t datalen) {
74   while (datalen-- > 0) {
75     s->leftover[s->leftover_offset++] = *(data++);
76     if (s->leftover_offset >= SHA256_BLOCK_SIZE) {
77       sha256_compress(s->iv, s->leftover);
78       s->leftover_offset = 0;
79       s->bits_hashed += (SHA256_BLOCK_SIZE << 3);
80     }
81   }
82 }
83 
tc_sha256_final(uint8_t * digest,sha256_state * s)84 void tc_sha256_final(uint8_t* digest, sha256_state* s) {
85   unsigned int i;
86 
87   s->bits_hashed += (s->leftover_offset << 3);
88 
89   s->leftover[s->leftover_offset++] = 0x80; /* always room for one byte */
90   if (s->leftover_offset > (sizeof(s->leftover) - 8)) {
91     /* there is not room for all the padding in this block */
92     (void)memset(s->leftover + s->leftover_offset, 0x00,
93                  sizeof(s->leftover) - s->leftover_offset);
94     sha256_compress(s->iv, s->leftover);
95     s->leftover_offset = 0;
96   }
97 
98   /* add the padding and the length in big-Endian format */
99   (void)memset(s->leftover + s->leftover_offset, 0x00,
100                sizeof(s->leftover) - 8 - s->leftover_offset);
101   s->leftover[sizeof(s->leftover) - 1] = (uint8_t)(s->bits_hashed);
102   s->leftover[sizeof(s->leftover) - 2] = (uint8_t)(s->bits_hashed >> 8);
103   s->leftover[sizeof(s->leftover) - 3] = (uint8_t)(s->bits_hashed >> 16);
104   s->leftover[sizeof(s->leftover) - 4] = (uint8_t)(s->bits_hashed >> 24);
105   s->leftover[sizeof(s->leftover) - 5] = (uint8_t)(s->bits_hashed >> 32);
106   s->leftover[sizeof(s->leftover) - 6] = (uint8_t)(s->bits_hashed >> 40);
107   s->leftover[sizeof(s->leftover) - 7] = (uint8_t)(s->bits_hashed >> 48);
108   s->leftover[sizeof(s->leftover) - 8] = (uint8_t)(s->bits_hashed >> 56);
109 
110   /* hash the padding and length */
111   sha256_compress(s->iv, s->leftover);
112 
113   /* copy the iv out to digest */
114   for (i = 0; i < SHA256_STATE_BLOCKS; ++i) {
115     unsigned int t = *((unsigned int*)&s->iv[i]);
116     *digest++ = (uint8_t)(t >> 24);
117     *digest++ = (uint8_t)(t >> 16);
118     *digest++ = (uint8_t)(t >> 8);
119     *digest++ = (uint8_t)(t);
120   }
121 
122   /* destroy the current state */
123   (void)memset(s, 0, sizeof(*s));
124 }
125 
126 /*
127  * Initializing SHA-256 Hash constant words K.
128  * These values correspond to the first 32 bits of the fractional parts of the
129  * cube roots of the first 64 primes between 2 and 311.
130  */
131 static const unsigned int k256[64] = {
132     0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
133     0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
134     0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
135     0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
136     0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
137     0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
138     0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
139     0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
140     0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
141     0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
142     0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
143 
ROTR(unsigned int a,unsigned int n)144 static unsigned int ROTR(unsigned int a, unsigned int n) {
145   return (((a) >> n) | ((a) << (32 - n)));
146 }
147 
148 #define Sigma0(a) (ROTR((a), 2) ^ ROTR((a), 13) ^ ROTR((a), 22))
149 #define Sigma1(a) (ROTR((a), 6) ^ ROTR((a), 11) ^ ROTR((a), 25))
150 #define sigma0(a) (ROTR((a), 7) ^ ROTR((a), 18) ^ ((a) >> 3))
151 #define sigma1(a) (ROTR((a), 17) ^ ROTR((a), 19) ^ ((a) >> 10))
152 
153 #define Ch(a, b, c) (((a) & (b)) ^ ((~(a)) & (c)))
154 #define Maj(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c)))
155 
BigEndian(const uint8_t ** c)156 static unsigned int BigEndian(const uint8_t** c) {
157   unsigned int n = 0;
158 
159   n = (((unsigned int)(*((*c)++))) << 24);
160   n |= ((unsigned int)(*((*c)++)) << 16);
161   n |= ((unsigned int)(*((*c)++)) << 8);
162   n |= ((unsigned int)(*((*c)++)));
163   return n;
164 }
165 
sha256_compress(unsigned int * iv,const uint8_t * data)166 static void sha256_compress(unsigned int* iv, const uint8_t* data) {
167   unsigned int a, b, c, d, e, f, g, h;
168   unsigned int s0, s1;
169   unsigned int t1, t2;
170   unsigned int work_space[16];
171   unsigned int n;
172   unsigned int i;
173 
174   a = iv[0];
175   b = iv[1];
176   c = iv[2];
177   d = iv[3];
178   e = iv[4];
179   f = iv[5];
180   g = iv[6];
181   h = iv[7];
182 
183   for (i = 0; i < 16; ++i) {
184     n = BigEndian(&data);
185     t1 = work_space[i] = n;
186     t1 += h + Sigma1(e) + Ch(e, f, g) + k256[i];
187     t2 = Sigma0(a) + Maj(a, b, c);
188     h = g;
189     g = f;
190     f = e;
191     e = d + t1;
192     d = c;
193     c = b;
194     b = a;
195     a = t1 + t2;
196   }
197 
198   for (; i < 64; ++i) {
199     s0 = work_space[(i + 1) & 0x0f];
200     s0 = sigma0(s0);
201     s1 = work_space[(i + 14) & 0x0f];
202     s1 = sigma1(s1);
203 
204     t1 = work_space[i & 0xf] += s0 + s1 + work_space[(i + 9) & 0xf];
205     t1 += h + Sigma1(e) + Ch(e, f, g) + k256[i];
206     t2 = Sigma0(a) + Maj(a, b, c);
207     h = g;
208     g = f;
209     f = e;
210     e = d + t1;
211     d = c;
212     c = b;
213     b = a;
214     a = t1 + t2;
215   }
216 
217   iv[0] += a;
218   iv[1] += b;
219   iv[2] += c;
220   iv[3] += d;
221   iv[4] += e;
222   iv[5] += f;
223   iv[6] += g;
224   iv[7] += h;
225 }
226