1 // Copyright 2015-2016 Brian Smith.
2 //
3 // Permission to use, copy, modify, and/or distribute this software for any
4 // purpose with or without fee is hereby granted, provided that the above
5 // copyright notice and this permission notice appear in all copies.
6 //
7 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10 // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15 use super::{
16 aes::{self, Counter},
17 block::{Block, BLOCK_LEN},
18 gcm, shift, Aad, Nonce, Tag,
19 };
20 use crate::{aead, cpu, error, polyfill};
21 use core::ops::RangeFrom;
22
23 /// AES-128 in GCM mode with 128-bit tags and 96 bit nonces.
24 pub static AES_128_GCM: aead::Algorithm = aead::Algorithm {
25 key_len: 16,
26 init: init_128,
27 seal: aes_gcm_seal,
28 open: aes_gcm_open,
29 id: aead::AlgorithmID::AES_128_GCM,
30 max_input_len: AES_GCM_MAX_INPUT_LEN,
31 };
32
33 /// AES-256 in GCM mode with 128-bit tags and 96 bit nonces.
34 pub static AES_256_GCM: aead::Algorithm = aead::Algorithm {
35 key_len: 32,
36 init: init_256,
37 seal: aes_gcm_seal,
38 open: aes_gcm_open,
39 id: aead::AlgorithmID::AES_256_GCM,
40 max_input_len: AES_GCM_MAX_INPUT_LEN,
41 };
42
43 pub struct Key {
44 gcm_key: gcm::Key, // First because it has a large alignment requirement.
45 aes_key: aes::Key,
46 }
47
init_128(key: &[u8], cpu_features: cpu::Features) -> Result<aead::KeyInner, error::Unspecified>48 fn init_128(key: &[u8], cpu_features: cpu::Features) -> Result<aead::KeyInner, error::Unspecified> {
49 init(key, aes::Variant::AES_128, cpu_features)
50 }
51
init_256(key: &[u8], cpu_features: cpu::Features) -> Result<aead::KeyInner, error::Unspecified>52 fn init_256(key: &[u8], cpu_features: cpu::Features) -> Result<aead::KeyInner, error::Unspecified> {
53 init(key, aes::Variant::AES_256, cpu_features)
54 }
55
init( key: &[u8], variant: aes::Variant, cpu_features: cpu::Features, ) -> Result<aead::KeyInner, error::Unspecified>56 fn init(
57 key: &[u8],
58 variant: aes::Variant,
59 cpu_features: cpu::Features,
60 ) -> Result<aead::KeyInner, error::Unspecified> {
61 let aes_key = aes::Key::new(key, variant, cpu_features)?;
62 let gcm_key = gcm::Key::new(aes_key.encrypt_block(Block::zero()), cpu_features);
63 Ok(aead::KeyInner::AesGcm(Key { aes_key, gcm_key }))
64 }
65
66 const CHUNK_BLOCKS: usize = 3 * 1024 / 16;
67
aes_gcm_seal(key: &aead::KeyInner, nonce: Nonce, aad: Aad<&[u8]>, in_out: &mut [u8]) -> Tag68 fn aes_gcm_seal(key: &aead::KeyInner, nonce: Nonce, aad: Aad<&[u8]>, in_out: &mut [u8]) -> Tag {
69 let Key { aes_key, gcm_key } = match key {
70 aead::KeyInner::AesGcm(key) => key,
71 _ => unreachable!(),
72 };
73
74 let mut ctr = Counter::one(nonce);
75 let tag_iv = ctr.increment();
76
77 let total_in_out_len = in_out.len();
78 let aad_len = aad.0.len();
79 let mut auth = gcm::Context::new(gcm_key, aad);
80
81 #[cfg(target_arch = "x86_64")]
82 let in_out = {
83 if !aes_key.is_aes_hw() || !auth.is_avx2() {
84 in_out
85 } else {
86 use crate::c;
87 prefixed_extern! {
88 fn aesni_gcm_encrypt(
89 input: *const u8,
90 output: *mut u8,
91 len: c::size_t,
92 key: &aes::AES_KEY,
93 ivec: &mut Counter,
94 gcm: &mut gcm::ContextInner,
95 ) -> c::size_t;
96 }
97 let processed = unsafe {
98 aesni_gcm_encrypt(
99 in_out.as_ptr(),
100 in_out.as_mut_ptr(),
101 in_out.len(),
102 aes_key.inner_less_safe(),
103 &mut ctr,
104 auth.inner(),
105 )
106 };
107
108 &mut in_out[processed..]
109 }
110 };
111
112 let (whole, remainder) = {
113 let in_out_len = in_out.len();
114 let whole_len = in_out_len - (in_out_len % BLOCK_LEN);
115 in_out.split_at_mut(whole_len)
116 };
117
118 for chunk in whole.chunks_mut(CHUNK_BLOCKS * BLOCK_LEN) {
119 aes_key.ctr32_encrypt_within(chunk, 0.., &mut ctr);
120 auth.update_blocks(chunk);
121 }
122
123 if !remainder.is_empty() {
124 let mut input = Block::zero();
125 input.overwrite_part_at(0, remainder);
126 let mut output = aes_key.encrypt_iv_xor_block(ctr.into(), input);
127 output.zero_from(remainder.len());
128 auth.update_block(output);
129 remainder.copy_from_slice(&output.as_ref()[..remainder.len()]);
130 }
131
132 finish(aes_key, auth, tag_iv, aad_len, total_in_out_len)
133 }
134
aes_gcm_open( key: &aead::KeyInner, nonce: Nonce, aad: Aad<&[u8]>, in_out: &mut [u8], src: RangeFrom<usize>, ) -> Tag135 fn aes_gcm_open(
136 key: &aead::KeyInner,
137 nonce: Nonce,
138 aad: Aad<&[u8]>,
139 in_out: &mut [u8],
140 src: RangeFrom<usize>,
141 ) -> Tag {
142 let Key { aes_key, gcm_key } = match key {
143 aead::KeyInner::AesGcm(key) => key,
144 _ => unreachable!(),
145 };
146
147 let mut ctr = Counter::one(nonce);
148 let tag_iv = ctr.increment();
149
150 let aad_len = aad.0.len();
151 let mut auth = gcm::Context::new(gcm_key, aad);
152
153 let in_prefix_len = src.start;
154
155 let total_in_out_len = in_out.len() - in_prefix_len;
156
157 #[cfg(target_arch = "x86_64")]
158 let in_out = {
159 if !aes_key.is_aes_hw() || !auth.is_avx2() {
160 in_out
161 } else {
162 use crate::c;
163
164 prefixed_extern! {
165 fn aesni_gcm_decrypt(
166 input: *const u8,
167 output: *mut u8,
168 len: c::size_t,
169 key: &aes::AES_KEY,
170 ivec: &mut Counter,
171 gcm: &mut gcm::ContextInner,
172 ) -> c::size_t;
173 }
174
175 let processed = unsafe {
176 aesni_gcm_decrypt(
177 in_out[src.clone()].as_ptr(),
178 in_out.as_mut_ptr(),
179 in_out.len() - src.start,
180 aes_key.inner_less_safe(),
181 &mut ctr,
182 auth.inner(),
183 )
184 };
185 &mut in_out[processed..]
186 }
187 };
188
189 let whole_len = {
190 let in_out_len = in_out.len() - in_prefix_len;
191 in_out_len - (in_out_len % BLOCK_LEN)
192 };
193 {
194 let mut chunk_len = CHUNK_BLOCKS * BLOCK_LEN;
195 let mut output = 0;
196 let mut input = in_prefix_len;
197 loop {
198 if whole_len - output < chunk_len {
199 chunk_len = whole_len - output;
200 }
201 if chunk_len == 0 {
202 break;
203 }
204
205 auth.update_blocks(&in_out[input..][..chunk_len]);
206 aes_key.ctr32_encrypt_within(
207 &mut in_out[output..][..(chunk_len + in_prefix_len)],
208 in_prefix_len..,
209 &mut ctr,
210 );
211 output += chunk_len;
212 input += chunk_len;
213 }
214 }
215
216 let remainder = &mut in_out[whole_len..];
217 shift::shift_partial((in_prefix_len, remainder), |remainder| {
218 let mut input = Block::zero();
219 input.overwrite_part_at(0, remainder);
220 auth.update_block(input);
221 aes_key.encrypt_iv_xor_block(ctr.into(), input)
222 });
223
224 finish(aes_key, auth, tag_iv, aad_len, total_in_out_len)
225 }
226
finish( aes_key: &aes::Key, mut gcm_ctx: gcm::Context, tag_iv: aes::Iv, aad_len: usize, in_out_len: usize, ) -> Tag227 fn finish(
228 aes_key: &aes::Key,
229 mut gcm_ctx: gcm::Context,
230 tag_iv: aes::Iv,
231 aad_len: usize,
232 in_out_len: usize,
233 ) -> Tag {
234 // Authenticate the final block containing the input lengths.
235 let aad_bits = polyfill::u64_from_usize(aad_len) << 3;
236 let ciphertext_bits = polyfill::u64_from_usize(in_out_len) << 3;
237 gcm_ctx.update_block(Block::from([aad_bits, ciphertext_bits]));
238
239 // Finalize the tag and return it.
240 gcm_ctx.pre_finish(|pre_tag| {
241 let encrypted_iv = aes_key.encrypt_block(Block::from(tag_iv.as_bytes_less_safe()));
242 let tag = pre_tag ^ encrypted_iv;
243 Tag(*tag.as_ref())
244 })
245 }
246
247 const AES_GCM_MAX_INPUT_LEN: u64 = super::max_input_len(BLOCK_LEN, 2);
248
249 #[cfg(test)]
250 mod tests {
251 #[test]
max_input_len_test()252 fn max_input_len_test() {
253 // [NIST SP800-38D] Section 5.2.1.1. Note that [RFC 5116 Section 5.1] and
254 // [RFC 5116 Section 5.2] have an off-by-one error in `P_MAX`.
255 //
256 // [NIST SP800-38D]:
257 // http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
258 // [RFC 5116 Section 5.1]: https://tools.ietf.org/html/rfc5116#section-5.1
259 // [RFC 5116 Section 5.2]: https://tools.ietf.org/html/rfc5116#section-5.2
260 const NIST_SP800_38D_MAX_BITS: u64 = (1u64 << 39) - 256;
261 assert_eq!(NIST_SP800_38D_MAX_BITS, 549_755_813_632u64);
262 assert_eq!(
263 super::AES_128_GCM.max_input_len * 8,
264 NIST_SP800_38D_MAX_BITS
265 );
266 assert_eq!(
267 super::AES_256_GCM.max_input_len * 8,
268 NIST_SP800_38D_MAX_BITS
269 );
270 }
271 }
272