1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Operations"
18
19 #include "LSHProjection.h"
20
21 #include <utils/hash/farmhash.h>
22
23 #include <memory>
24
25 #include "CpuExecutor.h"
26 #include "LegacyUtils.h"
27 #include "Tracing.h"
28 #include "nnapi/Types.h"
29
30 namespace android {
31 namespace nn {
32
LSHProjection(const Operation & operation,RunTimeOperandInfo * operands)33 LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* operands) {
34 input_ = GetInput(operation, operands, kInputTensor);
35 weight_ = GetInput(operation, operands, kWeightTensor);
36 hash_ = GetInput(operation, operands, kHashTensor);
37
38 type_ = static_cast<LSHProjectionType>(
39 getScalarData<int32_t>(*GetInput(operation, operands, kTypeParam)));
40
41 output_ = GetOutput(operation, operands, kOutputTensor);
42 }
43
Prepare(const Operation & operation,RunTimeOperandInfo * operands,Shape * outputShape)44 bool LSHProjection::Prepare(const Operation& operation, RunTimeOperandInfo* operands,
45 Shape* outputShape) {
46 // Check that none of the required inputs are omitted.
47 constexpr int requiredInputs[] = {kHashTensor, kInputTensor, kTypeParam};
48 for (const int requiredInput : requiredInputs) {
49 NN_RET_CHECK(!IsNullInput(GetInput(operation, operands, requiredInput)))
50 << "required input " << requiredInput << " is omitted";
51 }
52 NN_CHECK_EQ(NumOutputs(operation), 1);
53
54 const RunTimeOperandInfo* hash = GetInput(operation, operands, kHashTensor);
55 NN_CHECK_EQ(NumDimensions(hash), 2);
56 // Support up to 32 bits.
57 NN_CHECK(SizeOfDimension(hash, 1) <= 32);
58
59 const RunTimeOperandInfo* input = GetInput(operation, operands, kInputTensor);
60 NN_CHECK(NumDimensions(input) >= 1);
61
62 const auto& typeOperand = operands[operation.inputs[kTypeParam]];
63 NN_RET_CHECK(typeOperand.length >= sizeof(int32_t));
64 auto type = static_cast<LSHProjectionType>(getScalarData<int32_t>(typeOperand));
65 switch (type) {
66 case LSHProjectionType_SPARSE:
67 case LSHProjectionType_SPARSE_DEPRECATED:
68 NN_CHECK(NumInputsWithValues(operation, operands) == 3);
69 outputShape->dimensions = {SizeOfDimension(hash, 0)};
70 break;
71 case LSHProjectionType_DENSE: {
72 RunTimeOperandInfo* weight = GetInput(operation, operands, kWeightTensor);
73 NN_CHECK_EQ(NumInputsWithValues(operation, operands), 4);
74 NN_CHECK_EQ(NumDimensions(weight), 1);
75 NN_CHECK_EQ(SizeOfDimension(weight, 0), SizeOfDimension(input, 0));
76 outputShape->dimensions = {SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1)};
77 break;
78 }
79 default:
80 return false;
81 }
82
83 outputShape->type = OperandType::TENSOR_INT32;
84 outputShape->offset = 0;
85 outputShape->scale = 0.f;
86
87 return true;
88 }
89
90 // Compute sign bit of dot product of hash(seed, input) and weight.
91 // NOTE: use float as seed, and convert it to double as a temporary solution
92 // to match the trained model. This is going to be changed once the new
93 // model is trained in an optimized method.
94 //
95 template <typename T>
runningSignBit(const RunTimeOperandInfo * input,const RunTimeOperandInfo * weight,float seed)96 int runningSignBit(const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight, float seed) {
97 double score = 0.0;
98 int input_item_bytes = nonExtensionOperandSizeOfData(input->type, input->dimensions) /
99 SizeOfDimension(input, 0);
100 char* input_ptr = (char*)(input->buffer);
101
102 const size_t seed_size = sizeof(seed);
103 const size_t key_bytes = seed_size + input_item_bytes;
104 std::unique_ptr<char[]> key(new char[key_bytes]);
105
106 for (uint32_t i = 0; i < SizeOfDimension(input, 0); ++i) {
107 // Create running hash id and value for current dimension.
108 memcpy(key.get(), &seed, seed_size);
109 memcpy(key.get() + seed_size, input_ptr, input_item_bytes);
110
111 int64_t hash_signature = farmhash::Fingerprint64(key.get(), key_bytes);
112 double running_value = static_cast<double>(hash_signature);
113 input_ptr += input_item_bytes;
114 if (weight->lifetime == Operand::LifeTime::NO_VALUE) {
115 score += running_value;
116 } else {
117 score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
118 }
119 }
120
121 return (score > 0) ? 1 : 0;
122 }
123
124 template <typename T>
SparseLshProjection(LSHProjectionType type,const RunTimeOperandInfo * hash,const RunTimeOperandInfo * input,const RunTimeOperandInfo * weight,int32_t * out_buf)125 void SparseLshProjection(LSHProjectionType type, const RunTimeOperandInfo* hash,
126 const RunTimeOperandInfo* input, const RunTimeOperandInfo* weight,
127 int32_t* out_buf) {
128 int num_hash = SizeOfDimension(hash, 0);
129 int num_bits = SizeOfDimension(hash, 1);
130 for (int i = 0; i < num_hash; i++) {
131 int32_t hash_signature = 0;
132 for (int j = 0; j < num_bits; j++) {
133 T seed = reinterpret_cast<T*>(hash->buffer)[i * num_bits + j];
134 int bit = runningSignBit<T>(input, weight, static_cast<float>(seed));
135 hash_signature = (hash_signature << 1) | bit;
136 }
137 if (type == LSHProjectionType_SPARSE_DEPRECATED) {
138 *out_buf++ = hash_signature;
139 } else {
140 *out_buf++ = hash_signature + i * (1 << num_bits);
141 }
142 }
143 }
144
145 template <typename T>
DenseLshProjection(const RunTimeOperandInfo * hash,const RunTimeOperandInfo * input,const RunTimeOperandInfo * weight,int32_t * out_buf)146 void DenseLshProjection(const RunTimeOperandInfo* hash, const RunTimeOperandInfo* input,
147 const RunTimeOperandInfo* weight, int32_t* out_buf) {
148 int num_hash = SizeOfDimension(hash, 0);
149 int num_bits = SizeOfDimension(hash, 1);
150 for (int i = 0; i < num_hash; i++) {
151 for (int j = 0; j < num_bits; j++) {
152 T seed = reinterpret_cast<T*>(hash->buffer)[i * num_bits + j];
153 int bit = runningSignBit<T>(input, weight, static_cast<float>(seed));
154 *out_buf++ = bit;
155 }
156 }
157 }
158
159 template <typename T>
Eval()160 bool LSHProjection::Eval() {
161 NNTRACE_COMP("LSHProjection::Eval");
162
163 int32_t* out_buf = reinterpret_cast<int32_t*>(output_->buffer);
164
165 switch (type_) {
166 case LSHProjectionType_DENSE:
167 DenseLshProjection<T>(hash_, input_, weight_, out_buf);
168 break;
169 case LSHProjectionType_SPARSE:
170 case LSHProjectionType_SPARSE_DEPRECATED:
171 SparseLshProjection<T>(type_, hash_, input_, weight_, out_buf);
172 break;
173 default:
174 return false;
175 }
176 return true;
177 }
178
179 template bool LSHProjection::Eval<float>();
180 template bool LSHProjection::Eval<_Float16>();
181
182 template int runningSignBit<float>(const RunTimeOperandInfo* input,
183 const RunTimeOperandInfo* weight, float seed);
184 template int runningSignBit<_Float16>(const RunTimeOperandInfo* input,
185 const RunTimeOperandInfo* weight, float seed);
186
187 template void SparseLshProjection<float>(LSHProjectionType type, const RunTimeOperandInfo* hash,
188 const RunTimeOperandInfo* input,
189 const RunTimeOperandInfo* weight, int32_t* outBuffer);
190 template void SparseLshProjection<_Float16>(LSHProjectionType type, const RunTimeOperandInfo* hash,
191 const RunTimeOperandInfo* input,
192 const RunTimeOperandInfo* weight, int32_t* outBuffer);
193
194 template void DenseLshProjection<float>(const RunTimeOperandInfo* hash,
195 const RunTimeOperandInfo* input,
196 const RunTimeOperandInfo* weight, int32_t* outBuffer);
197 template void DenseLshProjection<_Float16>(const RunTimeOperandInfo* hash,
198 const RunTimeOperandInfo* input,
199 const RunTimeOperandInfo* weight, int32_t* outBuffer);
200
201 } // namespace nn
202 } // namespace android
203