1 //! Provides the [GeneralPurpose] engine and associated config types.
2 use crate::{
3 alphabet,
4 alphabet::Alphabet,
5 engine::{Config, DecodePaddingMode},
6 DecodeError,
7 };
8 use core::convert::TryInto;
9
10 mod decode;
11 pub(crate) mod decode_suffix;
12 pub use decode::GeneralPurposeEstimate;
13
14 pub(crate) const INVALID_VALUE: u8 = 255;
15
16 /// A general-purpose base64 engine.
17 ///
18 /// - It uses no vector CPU instructions, so it will work on any system.
19 /// - It is reasonably fast (~2-3GiB/s).
20 /// - It is not constant-time, though, so it is vulnerable to timing side-channel attacks. For loading cryptographic keys, etc, it is suggested to use the forthcoming constant-time implementation.
21 pub struct GeneralPurpose {
22 encode_table: [u8; 64],
23 decode_table: [u8; 256],
24 config: GeneralPurposeConfig,
25 }
26
27 impl GeneralPurpose {
28 /// Create a `GeneralPurpose` engine from an [Alphabet].
29 ///
30 /// While not very expensive to initialize, ideally these should be cached
31 /// if the engine will be used repeatedly.
new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self32 pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self {
33 Self {
34 encode_table: encode_table(alphabet),
35 decode_table: decode_table(alphabet),
36 config,
37 }
38 }
39 }
40
41 impl super::Engine for GeneralPurpose {
42 type Config = GeneralPurposeConfig;
43 type DecodeEstimate = GeneralPurposeEstimate;
44
internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize45 fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
46 let mut input_index: usize = 0;
47
48 const BLOCKS_PER_FAST_LOOP: usize = 4;
49 const LOW_SIX_BITS: u64 = 0x3F;
50
51 // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
52 // 2 trailing bytes to be available to read..
53 let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
54 let mut output_index = 0;
55
56 if last_fast_index > 0 {
57 while input_index <= last_fast_index {
58 // Major performance wins from letting the optimizer do the bounds check once, mostly
59 // on the output side
60 let input_chunk =
61 &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
62 let output_chunk =
63 &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
64
65 // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
66 // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
67 // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
68 // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
69 // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
70 // Plus, single-digit percentage performance differences might well be quite different
71 // on different hardware.
72
73 let input_u64 = read_u64(&input_chunk[0..]);
74
75 output_chunk[0] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
76 output_chunk[1] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
77 output_chunk[2] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
78 output_chunk[3] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
79 output_chunk[4] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
80 output_chunk[5] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
81 output_chunk[6] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
82 output_chunk[7] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
83
84 let input_u64 = read_u64(&input_chunk[6..]);
85
86 output_chunk[8] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
87 output_chunk[9] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
88 output_chunk[10] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
89 output_chunk[11] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
90 output_chunk[12] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
91 output_chunk[13] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
92 output_chunk[14] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
93 output_chunk[15] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
94
95 let input_u64 = read_u64(&input_chunk[12..]);
96
97 output_chunk[16] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
98 output_chunk[17] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
99 output_chunk[18] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
100 output_chunk[19] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
101 output_chunk[20] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
102 output_chunk[21] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
103 output_chunk[22] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
104 output_chunk[23] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
105
106 let input_u64 = read_u64(&input_chunk[18..]);
107
108 output_chunk[24] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
109 output_chunk[25] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
110 output_chunk[26] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
111 output_chunk[27] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
112 output_chunk[28] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
113 output_chunk[29] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
114 output_chunk[30] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
115 output_chunk[31] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
116
117 output_index += BLOCKS_PER_FAST_LOOP * 8;
118 input_index += BLOCKS_PER_FAST_LOOP * 6;
119 }
120 }
121
122 // Encode what's left after the fast loop.
123
124 const LOW_SIX_BITS_U8: u8 = 0x3F;
125
126 let rem = input.len() % 3;
127 let start_of_rem = input.len() - rem;
128
129 // start at the first index not handled by fast loop, which may be 0.
130
131 while input_index < start_of_rem {
132 let input_chunk = &input[input_index..(input_index + 3)];
133 let output_chunk = &mut output[output_index..(output_index + 4)];
134
135 output_chunk[0] = self.encode_table[(input_chunk[0] >> 2) as usize];
136 output_chunk[1] = self.encode_table
137 [((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
138 output_chunk[2] = self.encode_table
139 [((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
140 output_chunk[3] = self.encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
141
142 input_index += 3;
143 output_index += 4;
144 }
145
146 if rem == 2 {
147 output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
148 output[output_index + 1] =
149 self.encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4)
150 & LOW_SIX_BITS_U8) as usize];
151 output[output_index + 2] =
152 self.encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
153 output_index += 3;
154 } else if rem == 1 {
155 output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
156 output[output_index + 1] =
157 self.encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
158 output_index += 2;
159 }
160
161 output_index
162 }
163
internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate164 fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
165 GeneralPurposeEstimate::new(input_len)
166 }
167
internal_decode( &self, input: &[u8], output: &mut [u8], estimate: Self::DecodeEstimate, ) -> Result<usize, DecodeError>168 fn internal_decode(
169 &self,
170 input: &[u8],
171 output: &mut [u8],
172 estimate: Self::DecodeEstimate,
173 ) -> Result<usize, DecodeError> {
174 decode::decode_helper(
175 input,
176 estimate,
177 output,
178 &self.decode_table,
179 self.config.decode_allow_trailing_bits,
180 self.config.decode_padding_mode,
181 )
182 }
183
config(&self) -> &Self::Config184 fn config(&self) -> &Self::Config {
185 &self.config
186 }
187 }
188
189 /// Returns a table mapping a 6-bit index to the ASCII byte encoding of the index
encode_table(alphabet: &Alphabet) -> [u8; 64]190 pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] {
191 // the encode table is just the alphabet:
192 // 6-bit index lookup -> printable byte
193 let mut encode_table = [0_u8; 64];
194 {
195 let mut index = 0;
196 while index < 64 {
197 encode_table[index] = alphabet.symbols[index];
198 index += 1;
199 }
200 }
201
202 encode_table
203 }
204
205 /// Returns a table mapping base64 bytes as the lookup index to either:
206 /// - [INVALID_VALUE] for bytes that aren't members of the alphabet
207 /// - a byte whose lower 6 bits are the value that was encoded into the index byte
decode_table(alphabet: &Alphabet) -> [u8; 256]208 pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] {
209 let mut decode_table = [INVALID_VALUE; 256];
210
211 // Since the table is full of `INVALID_VALUE` already, we only need to overwrite
212 // the parts that are valid.
213 let mut index = 0;
214 while index < 64 {
215 // The index in the alphabet is the 6-bit value we care about.
216 // Since the index is in 0-63, it is safe to cast to u8.
217 decode_table[alphabet.symbols[index] as usize] = index as u8;
218 index += 1;
219 }
220
221 decode_table
222 }
223
224 #[inline]
read_u64(s: &[u8]) -> u64225 fn read_u64(s: &[u8]) -> u64 {
226 u64::from_be_bytes(s[..8].try_into().unwrap())
227 }
228
229 /// Contains configuration parameters for base64 encoding and decoding.
230 ///
231 /// ```
232 /// # use base64::engine::GeneralPurposeConfig;
233 /// let config = GeneralPurposeConfig::new()
234 /// .with_encode_padding(false);
235 /// // further customize using `.with_*` methods as needed
236 /// ```
237 ///
238 /// The constants [PAD] and [NO_PAD] cover most use cases.
239 ///
240 /// To specify the characters used, see [Alphabet].
241 #[derive(Clone, Copy, Debug)]
242 pub struct GeneralPurposeConfig {
243 encode_padding: bool,
244 decode_allow_trailing_bits: bool,
245 decode_padding_mode: DecodePaddingMode,
246 }
247
248 impl GeneralPurposeConfig {
249 /// Create a new config with `padding` = `true`, `decode_allow_trailing_bits` = `false`, and
250 /// `decode_padding_mode = DecodePaddingMode::RequireCanonicalPadding`.
251 ///
252 /// This probably matches most people's expectations, but consider disabling padding to save
253 /// a few bytes unless you specifically need it for compatibility with some legacy system.
new() -> Self254 pub const fn new() -> Self {
255 Self {
256 // RFC states that padding must be applied by default
257 encode_padding: true,
258 decode_allow_trailing_bits: false,
259 decode_padding_mode: DecodePaddingMode::RequireCanonical,
260 }
261 }
262
263 /// Create a new config based on `self` with an updated `padding` setting.
264 ///
265 /// If `padding` is `true`, encoding will append either 1 or 2 `=` padding characters as needed
266 /// to produce an output whose length is a multiple of 4.
267 ///
268 /// Padding is not needed for correct decoding and only serves to waste bytes, but it's in the
269 /// [spec](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2).
270 ///
271 /// For new applications, consider not using padding if the decoders you're using don't require
272 /// padding to be present.
with_encode_padding(self, padding: bool) -> Self273 pub const fn with_encode_padding(self, padding: bool) -> Self {
274 Self {
275 encode_padding: padding,
276 ..self
277 }
278 }
279
280 /// Create a new config based on `self` with an updated `decode_allow_trailing_bits` setting.
281 ///
282 /// Most users will not need to configure this. It's useful if you need to decode base64
283 /// produced by a buggy encoder that has bits set in the unused space on the last base64
284 /// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode).
285 /// If invalid trailing bits are present and this is `true`, those bits will
286 /// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted.
with_decode_allow_trailing_bits(self, allow: bool) -> Self287 pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self {
288 Self {
289 decode_allow_trailing_bits: allow,
290 ..self
291 }
292 }
293
294 /// Create a new config based on `self` with an updated `decode_padding_mode` setting.
295 ///
296 /// Padding is not useful in terms of representing encoded data -- it makes no difference to
297 /// the decoder if padding is present or not, so if you have some un-padded input to decode, it
298 /// is perfectly fine to use `DecodePaddingMode::Indifferent` to prevent errors from being
299 /// emitted.
300 ///
301 /// However, since in practice
302 /// [people who learned nothing from BER vs DER seem to expect base64 to have one canonical encoding](https://eprint.iacr.org/2022/361),
303 /// the default setting is the stricter `DecodePaddingMode::RequireCanonicalPadding`.
304 ///
305 /// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the
306 /// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`.
with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self307 pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self {
308 Self {
309 decode_padding_mode: mode,
310 ..self
311 }
312 }
313 }
314
315 impl Default for GeneralPurposeConfig {
316 /// Delegates to [GeneralPurposeConfig::new].
default() -> Self317 fn default() -> Self {
318 Self::new()
319 }
320 }
321
322 impl Config for GeneralPurposeConfig {
encode_padding(&self) -> bool323 fn encode_padding(&self) -> bool {
324 self.encode_padding
325 }
326 }
327
328 /// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config.
329 pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD);
330
331 /// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config.
332 pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
333
334 /// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config.
335 pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD);
336
337 /// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config.
338 pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
339
340 /// Include padding bytes when encoding, and require that they be present when decoding.
341 ///
342 /// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves
343 /// little purpose in practice.
344 pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new();
345
346 /// Don't add padding when encoding, and require no padding when decoding.
347 pub const NO_PAD: GeneralPurposeConfig = GeneralPurposeConfig::new()
348 .with_encode_padding(false)
349 .with_decode_padding_mode(DecodePaddingMode::RequireNone);
350