1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef NLP_SAFT_COMPONENTS_COMMON_MOBILE_LITE_BASE_FLOAT16_H_
18 #define NLP_SAFT_COMPONENTS_COMMON_MOBILE_LITE_BASE_FLOAT16_H_
19
20 #include "lang_id/common/lite_base/casts.h"
21 #include "lang_id/common/lite_base/integral-types.h"
22
23 namespace libtextclassifier3 {
24 namespace mobile {
25
26 // 16 bit encoding of a float. NOTE: can't be used directly for computation:
27 // one first needs to convert it to a normal float, using Float16To32.
28 //
29 // Compact 16-bit encoding of floating point numbers. This
30 // representation uses 1 bit for the sign, 8 bits for the exponent and
31 // 7 bits for the mantissa. It is assumed that floats are in IEEE 754
32 // format so a float16 is just bits 16-31 of a single precision float.
33 //
34 // NOTE: The IEEE floating point standard defines a float16 format that
35 // is different than this format (it has fewer bits of exponent and more
36 // bits of mantissa). We don't use that format here because conversion
37 // to/from 32-bit floats is more complex for that format, and the
38 // conversion for this format is very simple.
39 //
40 // <---------float16------------>
41 // s e e e e e e e e f f f f f f f f f f f f f f f f f f f f f f f
42 // <------------------------------float-------------------------->
43 // 3 3 2 2 1 1 0
44 // 1 0 3 2 5 4 0
45
46 typedef uint16 float16;
47
Float32To16(float f)48 static inline float16 Float32To16(float f) {
49 // Note that we just truncate the mantissa bits: we make no effort to
50 // do any smarter rounding.
51 return (bit_cast<uint32>(f) >> 16) & 0xffff;
52 }
53
Float16To32(float16 f)54 static inline float Float16To32(float16 f) {
55 // We fill in the new mantissa bits with 0, and don't do anything smarter.
56 return bit_cast<float>(f << 16);
57 }
58
59 } // namespace mobile
60 } // namespace nlp_saft
61
62 #endif // NLP_SAFT_COMPONENTS_COMMON_MOBILE_LITE_BASE_FLOAT16_H_
63