1 /**
2 * Copyright 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "NN_RAND_MODEL"
18
19 #include <android-base/logging.h>
20 #include <jni.h>
21
22 #include <algorithm>
23 #include <fstream>
24 #include <memory>
25 #include <optional>
26 #include <random>
27 #include <set>
28 #include <sstream>
29 #include <string>
30 #include <vector>
31
32 #include "GeneratedTestUtils.h"
33 #include "fuzzing/OperationManager.h"
34 #include "fuzzing/RandomGraphGenerator.h"
35 #include "fuzzing/RandomGraphGeneratorUtils.h"
36
JNI_OnLoad(JavaVM * vm,void * reserved)37 extern "C" JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void* reserved) {
38 android::base::InitLogging(nullptr, android::base::LogdLogger());
39 android::base::SetMinimumLogSeverity(android::base::INFO);
40 return JNI_VERSION_1_6;
41 }
42
43 enum RandomModelExecutionResult {
44 kSuccess = 0,
45 kFailedCompilation,
46 kFailedExecution,
47 kFailedOtherNnApiCall,
48 // The following conditions are for internal retry
49 kInvalidModelGenerated,
50 kUnsupportedModelGenerated
51 };
52
53 class FuzzerLogRAII {
54 public:
FuzzerLogRAII(const std::string & nnapiLogPath)55 FuzzerLogRAII(const std::string& nnapiLogPath) {
56 using android::nn::fuzzing_test::alignedString;
57 using android::nn::fuzzing_test::Logger;
58 using android::nn::fuzzing_test::LoggerStream;
59
60 NN_FUZZER_LOG_WRITE_FATAL_TO_SYSLOG(LOG_TAG);
61
62 mFuzzerLogOpen = false;
63 if (!nnapiLogPath.empty()) {
64 // Checking if we can write to target file
65 std::ofstream os;
66 os.open(nnapiLogPath);
67
68 if (os.fail()) {
69 LOG(ERROR) << "Opening file " << nnapiLogPath << " failed";
70 } else {
71 NN_FUZZER_LOG_INIT(nnapiLogPath);
72 LOG(INFO) << "Logging NNAPI to file " << nnapiLogPath;
73 mFuzzerLogOpen = true;
74 }
75 }
76 }
~FuzzerLogRAII()77 ~FuzzerLogRAII() {
78 if (mFuzzerLogOpen) {
79 using android::nn::fuzzing_test::alignedString;
80 using android::nn::fuzzing_test::Logger;
81 using android::nn::fuzzing_test::LoggerStream;
82
83 NN_FUZZER_LOG_CLOSE;
84 }
85 }
86
87 private:
88 bool mFuzzerLogOpen;
89 };
90
getOperationsInModel(const test_helper::TestModel & testModel)91 std::vector<test_helper::TestOperationType> getOperationsInModel(
92 const test_helper::TestModel& testModel) {
93 std::vector<test_helper::TestOperationType> result;
94 testModel.forEachSubgraph(
95 [&result](const test_helper::TestSubgraph& subgraph) {
96 for (const auto& operation : subgraph.operations) {
97 result.push_back(operation.type);
98 }
99 });
100
101 return result;
102 }
103
findDeviceByName(const char * deviceName)104 const ANeuralNetworksDevice* findDeviceByName(const char* deviceName) {
105 if (!deviceName) return nullptr;
106
107 std::string deviceNameStr(deviceName);
108 uint32_t numDevices = 0;
109 ANeuralNetworks_getDeviceCount(&numDevices);
110
111 for (uint32_t i = 0; i < numDevices; i++) {
112 ANeuralNetworksDevice* device = nullptr;
113 const char* buffer = nullptr;
114 int getDeviceResult = ANeuralNetworks_getDevice(i, &device);
115 if (getDeviceResult != ANEURALNETWORKS_NO_ERROR) {
116 LOG(ERROR) << "Unable to get NNAPI device " << i << ": "
117 << getDeviceResult;
118 return nullptr;
119 }
120
121 int getDeviceNameResult = ANeuralNetworksDevice_getName(device, &buffer);
122 if (getDeviceNameResult != ANEURALNETWORKS_NO_ERROR) {
123 LOG(ERROR) << "Unable to get name of NNAPI device " << i << ": "
124 << getDeviceNameResult;
125 return nullptr;
126 }
127
128 if (deviceNameStr == buffer) {
129 return device;
130 }
131 }
132
133 LOG(ERROR) << "No device with name " << deviceNameStr;
134 return nullptr;
135 }
136
getNnApiReferenceDevice()137 const ANeuralNetworksDevice* getNnApiReferenceDevice() {
138 return findDeviceByName("nnapi-reference");
139 }
140
141 class RandomGraphGenerator {
142 public:
RandomGraphGenerator(const ANeuralNetworksDevice * device,const std::string & deviceName,const std::string & testName,uint32_t numOperations,uint32_t dimensionRange,std::string nnapiLogPath,std::string failedModelDumpPath)143 RandomGraphGenerator(const ANeuralNetworksDevice* device,
144 const std::string& deviceName,
145 const std::string& testName, uint32_t numOperations,
146 uint32_t dimensionRange, std::string nnapiLogPath,
147 std::string failedModelDumpPath)
148 : mTestName(testName),
149 mDevice(device),
150 mDeviceName(deviceName),
151 mNnApiReference(getNnApiReferenceDevice()),
152 mSupportedOpsFilter(),
153 mNumOperations(numOperations),
154 mDimensionRange(dimensionRange),
155 nnapiFuzzerLogRAII(nnapiLogPath),
156 mFailedModelDumpPath(failedModelDumpPath) {}
157
init()158 RandomModelExecutionResult init() {
159 // Limiting the ops in the generator to a subset we know the target device
160 // supports to avoid failing the test because we are unable to find a
161 // suitable model to compile.
162 RandomModelExecutionResult filterInitResult;
163 filterInitResult =
164 HalVersionsSupportedByDevice(&mSupportedOpsFilter.versions);
165 if (filterInitResult != kSuccess) return filterInitResult;
166
167 filterInitResult =
168 OperandTypesSupportedByDevice(&mSupportedOpsFilter.dataTypes);
169 if (filterInitResult != kSuccess) return filterInitResult;
170
171 return OperationsSupportedByDevice(mSupportedOpsFilter,
172 &mSupportedOpsFilter.opcodes);
173 }
174
runRandomModel(bool compilationOnly)175 RandomModelExecutionResult runRandomModel(bool compilationOnly) {
176 using android::nn::generated_tests::createModel;
177 using android::nn::generated_tests::createRequest;
178 using android::nn::generated_tests::GeneratedModel;
179 using android::nn::test_wrapper::Compilation;
180 using android::nn::test_wrapper::Execution;
181 using android::nn::wrapper::Result;
182
183 std::optional<test_helper::TestModel> testModel =
184 createRandomModel(mSupportedOpsFilter);
185 if (!testModel) {
186 LOG(ERROR) << mTestName << ": No model generated";
187 return kInvalidModelGenerated;
188 }
189
190 GeneratedModel model;
191 createModel(*testModel, &model);
192 if (!model.isValid()) {
193 LOG(ERROR) << mTestName << ": Randomly generated model is not valid";
194 return kInvalidModelGenerated;
195 }
196 auto modelFinishResult = model.finish();
197 if (modelFinishResult != Result::NO_ERROR) {
198 LOG(ERROR) << mTestName << ": Failed to finish model, result is "
199 << static_cast<int>(modelFinishResult);
200 return kInvalidModelGenerated;
201 }
202
203 bool fullySupportedModel = false;
204 if (mDevice) {
205 std::unique_ptr<bool[]> opsSupportedFlags =
206 std::make_unique<bool[]>(mNumOperations);
207 std::fill(opsSupportedFlags.get(),
208 opsSupportedFlags.get() + mNumOperations, false);
209 // Check if the device fully supports the graph.
210 int supportedOpResult =
211 ANeuralNetworksModel_getSupportedOperationsForDevices(
212 model.getHandle(), &mDevice, 1, opsSupportedFlags.get());
213 if (supportedOpResult != ANEURALNETWORKS_NO_ERROR) {
214 return kFailedOtherNnApiCall;
215 }
216
217 // accepting the model even if partially supported since we found that it
218 // is extremely difficult to have fully supported models.
219 // We could consider a minimum number (or percentage of total number) of
220 // operations to be supported to consider the model acceptable. For the
221 // moment we just accept any model that has any supported op.
222 bool supported = std::any_of(opsSupportedFlags.get(),
223 opsSupportedFlags.get() + mNumOperations,
224 [](bool v) { return v; });
225 if (!supported) {
226 return kUnsupportedModelGenerated;
227 }
228
229 fullySupportedModel = std::all_of(
230 opsSupportedFlags.get(), opsSupportedFlags.get() + mNumOperations,
231 [](bool v) { return v; });
232 }
233
234 std::vector<const ANeuralNetworksDevice*> devices;
235 if (mDevice) {
236 devices.push_back(mDevice);
237 if (!fullySupportedModel) {
238 // If model is not fully supported we allow NNAPI to use reference
239 // implementation. This is to avoid having this test constantly
240 // nullified by the inability of finding a fully supported model.
241 LOG(VERBOSE) << "Allowing model to be partially executed on NNAPI "
242 "reference device";
243 devices.push_back(mNnApiReference);
244 }
245 }
246
247 auto [compilationResult, compilation] = CreateCompilation(model, devices);
248 if (compilationResult != Result::NO_ERROR) {
249 LOG(WARNING) << mTestName
250 << ": Compilation preparation failed with result "
251 << static_cast<int>(compilationResult);
252
253 dumpModel(*testModel);
254 return kFailedCompilation;
255 }
256 compilationResult = compilation.finish();
257 if (compilationResult != Result::NO_ERROR) {
258 LOG(WARNING) << mTestName << ": Compilation failed with result "
259 << static_cast<int>(compilationResult);
260
261 dumpModel(*testModel);
262 return kFailedCompilation;
263 }
264
265 if (!compilationOnly) {
266 Execution execution(&compilation);
267 std::vector<test_helper::TestBuffer> outputs;
268 createRequest(*testModel, &execution, &outputs);
269
270 // Compute result.
271 Result executeReturn = execution.compute();
272 if (executeReturn != Result::NO_ERROR) {
273 LOG(WARNING) << mTestName << ": Execution failed with result "
274 << static_cast<int>(executeReturn);
275
276 dumpModel(*testModel);
277 return kFailedExecution;
278 }
279 }
280
281 return kSuccess;
282 }
283
284 const std::string mTestName;
285
286 private:
287 android::nn::fuzzing_test::RandomGraph mRandomGraph;
288 std::random_device mSeedGenerator;
289 const ANeuralNetworksDevice* mDevice;
290 // empty string if mDevice is null
291 const std::string mDeviceName;
292 const ANeuralNetworksDevice* mNnApiReference;
293 android::nn::fuzzing_test::OperationFilter mSupportedOpsFilter;
294 const uint32_t mNumOperations;
295 const uint32_t mDimensionRange;
296 FuzzerLogRAII nnapiFuzzerLogRAII;
297 const std::string mFailedModelDumpPath;
298
createRandomModel(const android::nn::fuzzing_test::OperationFilter & opFilter)299 std::optional<test_helper::TestModel> createRandomModel(
300 const android::nn::fuzzing_test::OperationFilter& opFilter) {
301 android::nn::fuzzing_test::OperationManager::get()->applyFilter(opFilter);
302
303 auto seed = mSeedGenerator();
304 if (!mRandomGraph.generate(seed, mNumOperations, mDimensionRange)) {
305 return std::nullopt;
306 }
307
308 return {mRandomGraph.createTestModel()};
309 }
310
HalVersionsSupportedByDevice(std::vector<test_helper::TestHalVersion> * result)311 RandomModelExecutionResult HalVersionsSupportedByDevice(
312 std::vector<test_helper::TestHalVersion>* result) {
313 if (!mDevice) {
314 return kSuccess;
315 }
316
317 int64_t featureLevel;
318 auto getDeviceFeatureLevelResult =
319 ANeuralNetworksDevice_getFeatureLevel(mDevice, &featureLevel);
320 if (getDeviceFeatureLevelResult != ANEURALNETWORKS_NO_ERROR) {
321 LOG(ERROR) << mTestName << ": Unable to query device feature level";
322 return kFailedOtherNnApiCall;
323 }
324
325 if (featureLevel == 27) *result = {test_helper::TestHalVersion::V1_0};
326 if (featureLevel == 28) *result = {test_helper::TestHalVersion::V1_1};
327 if (featureLevel == 29) *result = {test_helper::TestHalVersion::V1_2};
328
329 return kSuccess;
330 }
331
OperandTypesSupportedByDevice(std::vector<test_helper::TestOperandType> * result)332 RandomModelExecutionResult OperandTypesSupportedByDevice(
333 std::vector<test_helper::TestOperandType>* result) {
334 if (!mDevice) {
335 return kSuccess;
336 }
337
338 int32_t deviceType;
339 auto getDeviceTypeResult =
340 ANeuralNetworksDevice_getType(mDevice, &deviceType);
341 if (getDeviceTypeResult != ANEURALNETWORKS_NO_ERROR) {
342 LOG(ERROR) << mTestName << ": Unable to query device type";
343 return kFailedOtherNnApiCall;
344 }
345 using test_helper::TestOperandType;
346 switch (deviceType) {
347 case ANEURALNETWORKS_DEVICE_GPU:
348 // No quantized types
349 *result = {
350 TestOperandType::FLOAT32, TestOperandType::INT32,
351 TestOperandType::UINT32, TestOperandType::TENSOR_FLOAT32,
352 TestOperandType::TENSOR_INT32, TestOperandType::BOOL,
353 TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_BOOL8,
354 TestOperandType::FLOAT16};
355 break;
356 case ANEURALNETWORKS_DEVICE_CPU:
357 case ANEURALNETWORKS_DEVICE_ACCELERATOR:
358 result->clear(); // no filter
359 break;
360 case ANEURALNETWORKS_DEVICE_UNKNOWN:
361 case ANEURALNETWORKS_DEVICE_OTHER:
362 if (mDeviceName.find("dsp") != std::string::npos) {
363 *result = {TestOperandType::INT32,
364 TestOperandType::UINT32,
365 TestOperandType::TENSOR_INT32,
366 TestOperandType::BOOL,
367 TestOperandType::TENSOR_BOOL8,
368 TestOperandType::TENSOR_QUANT8_ASYMM,
369 TestOperandType::TENSOR_QUANT16_SYMM,
370 TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
371 TestOperandType::TENSOR_QUANT16_ASYMM,
372 TestOperandType::TENSOR_QUANT8_SYMM,
373 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED};
374 break;
375 }
376 FALLTHROUGH_INTENDED;
377 default:
378 result->clear(); // no filter
379 }
380 return kSuccess;
381 }
382
383 /// Finds some operations supported by the device
OperationsSupportedByDevice(const android::nn::fuzzing_test::OperationFilter & basicFilter,std::vector<test_helper::TestOperationType> * result)384 RandomModelExecutionResult OperationsSupportedByDevice(
385 const android::nn::fuzzing_test::OperationFilter& basicFilter,
386 std::vector<test_helper::TestOperationType>* result) {
387 if (!mDevice) {
388 return kSuccess;
389 }
390
391 constexpr int kNumOfAttempts = 50;
392 std::set<test_helper::TestOperationType> supportedOps;
393 for (int i = 0; i < kNumOfAttempts; i++) {
394 std::optional<test_helper::TestModel> testModel =
395 createRandomModel(basicFilter);
396 if (!testModel) {
397 LOG(ERROR)
398 << mTestName
399 << ": Unable to generate a model trying to understand the ops "
400 "supported by target device";
401 continue;
402 }
403
404 android::nn::generated_tests::GeneratedModel model;
405 createModel(*testModel, &model);
406 if (!model.isValid()) {
407 LOG(WARNING) << mTestName << ": Randomly generated model is not valid";
408 continue;
409 }
410 auto modelFinishResult = model.finish();
411 if (modelFinishResult != android::nn::wrapper::Result::NO_ERROR) {
412 LOG(WARNING) << "Model::finish call failed, result is "
413 << static_cast<int>(modelFinishResult);
414 continue;
415 }
416
417 std::unique_ptr<bool[]> opsSupportedFlags =
418 std::make_unique<bool[]>(mNumOperations);
419 std::fill(opsSupportedFlags.get(),
420 opsSupportedFlags.get() + mNumOperations, false);
421
422 // Check if the device fully supports the graph.
423 int supportedOpResult =
424 ANeuralNetworksModel_getSupportedOperationsForDevices(
425 model.getHandle(), &mDevice, 1, opsSupportedFlags.get());
426 if (supportedOpResult != ANEURALNETWORKS_NO_ERROR) {
427 return kFailedOtherNnApiCall;
428 }
429
430 std::vector<test_helper::TestOperationType> opsInModel =
431 getOperationsInModel(*testModel);
432 for (int opIndex = 0; opIndex < mNumOperations; opIndex++) {
433 test_helper::TestOperationType currOp = opsInModel[opIndex];
434 if (opsSupportedFlags[opIndex]) {
435 supportedOps.insert(currOp);
436 }
437 }
438 }
439 std::copy(supportedOps.begin(), supportedOps.end(),
440 std::back_inserter(*result));
441
442 if (result->empty()) {
443 LOG(WARNING)
444 << mTestName
445 << ": Could not find any operation supported by target device."
446 << " Returning no filter.";
447 } else {
448 LOG(INFO) << mTestName << ": Filtering to " << result->size()
449 << " supported operations";
450 }
451
452 return kSuccess;
453 }
454
dumpModel(const test_helper::TestModel & testModel)455 void dumpModel(const test_helper::TestModel& testModel) {
456 if (mFailedModelDumpPath.empty()) return;
457
458 LOG(INFO) << mTestName << ": Dumping model failing tests to "
459 << mFailedModelDumpPath;
460
461 std::ofstream os(mFailedModelDumpPath);
462 ASSERT_TRUE(os.is_open());
463 os << "# Generated from " << mTestName << ". Do not edit.\n\n";
464 test_helper::SpecDumper dumper(testModel, os);
465 dumper.dumpTestModel();
466 }
467
468 std::pair<android::nn::wrapper::Result,
469 android::nn::test_wrapper::Compilation>
CreateCompilation(const android::nn::generated_tests::GeneratedModel & model,const std::vector<const ANeuralNetworksDevice * > & devices)470 CreateCompilation(const android::nn::generated_tests::GeneratedModel& model,
471 const std::vector<const ANeuralNetworksDevice*>& devices) {
472 using android::nn::test_wrapper::Compilation;
473 if (!devices.empty())
474 return Compilation::createForDevices(&model, devices);
475 else
476 return {android::nn::wrapper::Result::NO_ERROR, Compilation(&model)};
477 }
478 };
479
480 extern "C" JNIEXPORT jint JNICALL
Java_com_android_nn_crashtest_core_test_RandomGraphTest_runRandomModel(JNIEnv * env,jclass,jlong _generatorHandle,jboolean _compilationOnly,jlong _maxModelSearchTimeSeconds)481 Java_com_android_nn_crashtest_core_test_RandomGraphTest_runRandomModel(
482 JNIEnv* env, jclass /* static method */, jlong _generatorHandle,
483 jboolean _compilationOnly, jlong _maxModelSearchTimeSeconds) {
484 RandomGraphGenerator* graphGenerator =
485 reinterpret_cast<RandomGraphGenerator*>(_generatorHandle);
486
487 std::time_t startTime = std::time(nullptr);
488
489 int result = kSuccess;
490 int modelSearchAttempt = 0;
491 while (std::difftime(std::time(nullptr), startTime) <
492 _maxModelSearchTimeSeconds) {
493 modelSearchAttempt++;
494
495 result = graphGenerator->runRandomModel(_compilationOnly);
496
497 // if by chance we generated an invalid model or a model that couldn't run
498 // on the target accelerator we will try again.
499 if (result != kInvalidModelGenerated &&
500 result != kUnsupportedModelGenerated) {
501 break;
502 }
503 }
504
505 if (result == kInvalidModelGenerated ||
506 result == kUnsupportedModelGenerated) {
507 LOG(WARNING) << graphGenerator->mTestName
508 << ": Max time to search for a model of "
509 << static_cast<long>(_maxModelSearchTimeSeconds)
510 << "seconds reached. Aborting test at attempt "
511 << modelSearchAttempt;
512 }
513
514 return result;
515 }
516
517 extern "C" JNIEXPORT jlong JNICALL
Java_com_android_nn_crashtest_core_test_RandomGraphTest_createRandomGraphGenerator(JNIEnv * env,jclass,jstring _nnApiDeviceName,jint _numOperations,jint _dimensionRange,jstring _testName,jstring _nnapiLogPath,jstring _failedModelDumpPath)518 Java_com_android_nn_crashtest_core_test_RandomGraphTest_createRandomGraphGenerator(
519 JNIEnv* env, jclass /* static method */, jstring _nnApiDeviceName,
520 jint _numOperations, jint _dimensionRange, jstring _testName,
521 jstring _nnapiLogPath, jstring _failedModelDumpPath) {
522 const char* nnApiDeviceName =
523 _nnApiDeviceName ? env->GetStringUTFChars(_nnApiDeviceName, nullptr)
524 : nullptr;
525
526 std::string nnApiDeviceNameStr{nnApiDeviceName ? nnApiDeviceName : ""};
527 const ANeuralNetworksDevice* device = nullptr;
528 if (nnApiDeviceName) {
529 device = findDeviceByName(nnApiDeviceName);
530 if (!device) {
531 LOG(ERROR) << ": Unable to find accelerator " << nnApiDeviceName;
532 env->ReleaseStringUTFChars(_nnApiDeviceName, nnApiDeviceName);
533 return reinterpret_cast<jlong>(nullptr);
534 }
535 env->ReleaseStringUTFChars(_nnApiDeviceName, nnApiDeviceName);
536 }
537
538 std::string testName{"no-test-name"};
539 if (_testName) {
540 const char* testNameBuf = env->GetStringUTFChars(_testName, nullptr);
541 testName = testNameBuf;
542 env->ReleaseStringUTFChars(_testName, testNameBuf);
543 }
544
545 std::string nnapiLogPath;
546 if (_nnapiLogPath) {
547 const char* nnapiLogPathTmp =
548 env->GetStringUTFChars(_nnapiLogPath, nullptr);
549 nnapiLogPath = nnapiLogPathTmp;
550 env->ReleaseStringUTFChars(_nnapiLogPath, nnapiLogPathTmp);
551 }
552
553 std::string failedModelDumpPath;
554 if (_failedModelDumpPath) {
555 const char* failedModelDumpPathTmp =
556 env->GetStringUTFChars(_failedModelDumpPath, nullptr);
557 failedModelDumpPath = failedModelDumpPathTmp;
558 env->ReleaseStringUTFChars(_failedModelDumpPath, failedModelDumpPathTmp);
559 }
560
561 uint32_t numOperations = static_cast<uint32_t>(_numOperations);
562 uint32_t dimensionRange = static_cast<uint32_t>(_dimensionRange);
563
564 RandomGraphGenerator* result = new RandomGraphGenerator(
565 device, nnApiDeviceNameStr, testName, numOperations, dimensionRange,
566 nnapiLogPath, failedModelDumpPath);
567
568 if (result->init() != kSuccess) {
569 delete result;
570 return reinterpret_cast<jlong>(nullptr);
571 }
572
573 return reinterpret_cast<jlong>(result);
574 }
575
576 extern "C" JNIEXPORT void JNICALL
Java_com_android_nn_crashtest_core_test_RandomGraphTest_destroyRandomGraphGenerator(JNIEnv * env,jclass,jlong generatorHandle)577 Java_com_android_nn_crashtest_core_test_RandomGraphTest_destroyRandomGraphGenerator(
578 JNIEnv* env, jclass /* static method */, jlong generatorHandle) {
579 RandomGraphGenerator* graphGenerator =
580 reinterpret_cast<RandomGraphGenerator*>(generatorHandle);
581 delete graphGenerator;
582 }
583