• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "QuantUtils.h"
2 
3 #include <algorithm>
4 #include <limits>
5 #include <memory>
6 
7 namespace android {
8 namespace nn {
9 
ApplyLayerNorm(const int16_t * input,const int16_t * layer_norm_weights,const int32_t * bias,int32_t layer_norm_scale_a,int32_t layer_norm_scale_b,int32_t variance_limit,int n_batch,int n_input,int16_t * output)10 void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights, const int32_t* bias,
11                     int32_t layer_norm_scale_a, int32_t layer_norm_scale_b, int32_t variance_limit,
12                     int n_batch, int n_input, int16_t* output) {
13     static const int kOverflowGuard = 1 << 20;
14     for (int i = 0; i < n_batch; ++i) {
15         int64_t sum = 0;
16         int64_t sum_sq = 0;
17         for (int j = 0; j < n_input; ++j) {
18             const int32_t index = i * n_input + j;
19             int32_t val = static_cast<int32_t>(input[index]);
20             sum += val;
21             sum_sq += val * val;
22         }
23         int32_t mean = static_cast<int32_t>(static_cast<int64_t>(sum) * 1024 / n_input);
24         // TODO(jianlijianli): Avoids overflow but only works for POT n_input.
25         int32_t temp = kOverflowGuard / n_input;
26         int64_t variance = sum_sq * temp - static_cast<int64_t>(mean) * static_cast<int64_t>(mean);
27         int32_t variance2 = static_cast<int32_t>(variance / kOverflowGuard);
28         if (variance2 < 1) {
29             variance2 = variance_limit;
30         }
31         int32_t stddev_inverse_a;
32         int stddev_inverse_b;
33         GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1, &stddev_inverse_a,
34                                          &stddev_inverse_b);
35 
36         for (int j = 0; j < n_input; ++j) {
37             const int32_t index = i * n_input + j;
38             int32_t val = static_cast<int32_t>(input[index]);
39             int32_t shifted = 1024 * val - mean;
40             int32_t rescaled =
41                     MultiplyByQuantizedMultiplier(shifted, stddev_inverse_a, stddev_inverse_b);
42             // TODO(jianlijianli): Saturate this.
43             int64_t val3 = rescaled * layer_norm_weights[j] + bias[j];
44             int32_t val4 = static_cast<int32_t>((val3 > 0 ? val3 + 512 : val3 - 512) / 1024);
45             int32_t val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a,
46                                                          layer_norm_scale_b + 12);
47             val5 = std::min(std::max(INT16_MIN, val5), INT16_MAX);
48             output[index] = static_cast<int16_t>(val5);
49         }
50     }
51 }
52 
MatrixScalarMultiplyAccumulate(const int8_t * matrix,int32_t scalar,int32_t n_row,int32_t n_col,int32_t * output)53 void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar, int32_t n_row,
54                                     int32_t n_col, int32_t* output) {
55     for (int i = 0; i < n_row; ++i) {
56         int32_t row_sum = 0;
57         for (int j = 0; j < n_col; ++j) {
58             row_sum += *matrix++;
59         }
60         output[i] += row_sum * scalar;
61     }
62 }
63 
PrecomputeZeroPointTimesWeightWithBias(int32_t zero_point,const int8_t * weight_tensor,const Shape & weight_shape,const int32_t * bias_tensor,std::unique_ptr<int32_t[]> * output)64 bool PrecomputeZeroPointTimesWeightWithBias(int32_t zero_point, const int8_t* weight_tensor,
65                                             const Shape& weight_shape, const int32_t* bias_tensor,
66                                             std::unique_ptr<int32_t[]>* output) {
67     if (weight_tensor == nullptr) {
68         return true;
69     }
70 
71     NN_RET_CHECK_EQ(weight_shape.dimensions.size(), 2u);
72     const int row = weight_shape.dimensions[0];
73     const int col = weight_shape.dimensions[1];
74     *output = std::make_unique<int32_t[]>(row);
75     if (bias_tensor == nullptr) {
76         memset(output->get(), 0, row * sizeof(int32_t));
77     } else {
78         memcpy(output->get(), bias_tensor, row * sizeof(int32_t));
79     }
80     if (zero_point != 0) {
81         MatrixScalarMultiplyAccumulate(weight_tensor, zero_point, row, col, output->get());
82     }
83     return true;
84 }
85 
ApplySigmoid(const int16_t * input,int32_t n_batch,int32_t n_input,int16_t * output)86 void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input, int16_t* output) {
87     for (int batch = 0; batch < n_batch; ++batch) {
88         for (int c = 0; c < n_input; c++) {
89             using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
90             using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
91             const int index = batch * n_input + c;
92             F3 sigmoid_input = F3::FromRaw(input[index]);
93             F0 sigmoid_output = gemmlowp::logistic(sigmoid_input);
94             output[index] = sigmoid_output.raw();
95         }
96     }
97 }
98 
CwiseMul(const int16_t * input_1,const int16_t * input_2,int n_batch,int n_input,int shift,int16_t * output)99 void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, int n_input, int shift,
100               int16_t* output) {
101     for (int batch = 0; batch < n_batch; ++batch) {
102         for (int i = 0; i < n_input; ++i) {
103             const int index = batch * n_input + i;
104             const int16_t a = input_1[index];
105             const int16_t b = input_2[index];
106             const int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
107             output[index] = static_cast<int16_t>(gemmlowp::RoundingDivideByPOT(value, shift));
108         }
109     }
110 }
111 
CwiseMul(const int16_t * input_1,const int16_t * input_2,int32_t multiplier,int32_t shift,int32_t n_batch,int32_t n_input,int32_t output_zp,int8_t * output)112 void CwiseMul(const int16_t* input_1, const int16_t* input_2, int32_t multiplier, int32_t shift,
113               int32_t n_batch, int32_t n_input, int32_t output_zp, int8_t* output) {
114     for (int batch = 0; batch < n_batch; ++batch) {
115         for (int i = 0; i < n_input; ++i) {
116             const int index = batch * n_input + i;
117             const int16_t a = input_1[index];
118             const int16_t b = input_2[index];
119             int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
120             value = MultiplyByQuantizedMultiplier(value, multiplier, shift);
121             value -= output_zp;
122             value = std::min(std::max(-128, value), 127);
123 
124             output[index] = static_cast<int8_t>(value);
125         }
126     }
127 }
128 
CheckedLog2(const float x,int * log2_result)129 bool CheckedLog2(const float x, int* log2_result) {
130     const float x_log2 = std::log(x) * (1.0f / std::log(2.0f));
131     const float x_log2_rounded = std::round(x_log2);
132     const float x_log2_fracpart = x_log2 - x_log2_rounded;
133 
134     *log2_result = static_cast<int>(x_log2_rounded);
135     return std::abs(x_log2_fracpart) < 1e-3;
136 }
137 
CwiseAdd(const int16_t * input_1,const int16_t * input_2,int n_batch,int n_input,int16_t * output)138 void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch, int n_input,
139               int16_t* output) {
140     for (int batch = 0; batch < n_batch; ++batch) {
141         for (int i = 0; i < n_input; ++i) {
142             const int index = batch * n_input + i;
143             int32_t sum = input_1[index] + input_2[index];
144             const int32_t sum_clamped = std::min(INT16_MAX, std::max(INT16_MIN, sum));
145             output[index] = static_cast<int16_t>(sum_clamped);
146         }
147     }
148 }
149 
CwiseClipping(int16_t * input,const int16_t clipping_value,int32_t n_batch,int32_t n_input)150 void CwiseClipping(int16_t* input, const int16_t clipping_value, int32_t n_batch, int32_t n_input) {
151     for (int batch = 0; batch < n_batch; ++batch) {
152         for (int i = 0; i < n_input; ++i) {
153             const int index = batch * n_input + i;
154             if (input[index] > clipping_value) {
155                 input[index] = clipping_value;
156             }
157             if (input[index] < -clipping_value) {
158                 input[index] = -clipping_value;
159             }
160         }
161     }
162 }
163 
CwiseClipping(int8_t * input,const int8_t clipping_value,int32_t n_batch,int32_t n_input)164 void CwiseClipping(int8_t* input, const int8_t clipping_value, int32_t n_batch, int32_t n_input) {
165     for (int batch = 0; batch < n_batch; ++batch) {
166         for (int i = 0; i < n_input; ++i) {
167             const int index = batch * n_input + i;
168             if (input[index] > clipping_value) {
169                 input[index] = clipping_value;
170             }
171             if (input[index] < -clipping_value) {
172                 input[index] = -clipping_value;
173             }
174         }
175     }
176 }
177 
VectorBatchVectorCwiseProductAccumulate(const int16_t * vector,int v_size,const int16_t * batch_vector,int n_batch,int32_t multiplier,int shift,int16_t * result)178 void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size,
179                                              const int16_t* batch_vector, int n_batch,
180                                              int32_t multiplier, int shift, int16_t* result) {
181     for (int b = 0; b < n_batch; b++) {
182         for (int v = 0; v < v_size; v++) {
183             int32_t prod = vector[v] * *batch_vector++;
184             prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift);
185             int32_t output = prod + *result;
186             output = std::max(std::min(32767, output), -32768);
187             *result++ = output;
188         }
189     }
190 }
191 
192 }  // namespace nn
193 }  // namespace android
194