1 /* Copyright 2016 Google Inc. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #include <vector> 17 18 #include "tensorflow/cc/ops/array_ops.h" 19 #include "tensorflow/cc/ops/standard_ops.h" 20 #include "tensorflow/core/kernels/fuzzing/fuzz_session.h" 21 22 namespace tensorflow { 23 namespace fuzzing { 24 25 class FuzzScatterNd : public FuzzSession { BuildGraph(const Scope & scope)26 void BuildGraph(const Scope& scope) override { 27 auto indices = 28 tensorflow::ops::Placeholder(scope.WithOpName("indices"), DT_INT32); 29 auto updates = 30 tensorflow::ops::Placeholder(scope.WithOpName("updates"), DT_INT32); 31 auto shape = 32 tensorflow::ops::Placeholder(scope.WithOpName("shape"), DT_INT32); 33 (void)tensorflow::ops::ScatterNd(scope.WithOpName("output"), indices, 34 updates, shape); 35 } 36 FuzzImpl(const uint8_t * data,size_t size)37 void FuzzImpl(const uint8_t* data, size_t size) override { 38 // This op's runtime is heavily determined by the shape of the tensor 39 // arguments and almost not at all by the values of those tensors. Hence, 40 // the fuzzing data here is only used to determine the shape of the 41 // arguments and the output and the data of these tensors is just a constant 42 // value. Furthermore, the shape of the updates_tensor tensor is fully 43 // determined by the contents of the shape_tensor and the shape of the 44 // indices_tensor. Rather than using random values for the 45 // updates_tensor.shape and getting most of the fuzz runs stopped in the 46 // check, it's better to just create a proper update_tensor. 47 if (size < 1) { 48 return; 49 } 50 51 // First element of the data buffer gives the number of dimensions of the 52 // shape tensor. 53 size_t i; 54 size_t data_ix = 0; 55 size_t shape_dims = 1 + (data[data_ix++] % kMaxShapeDims); 56 Tensor shape_tensor(tensorflow::DT_INT32, 57 TensorShape({static_cast<int64>(shape_dims)})); 58 59 // Check that we have enough elements left for the shape tensor 60 if (data_ix + shape_dims >= size) { 61 return; // not enough elements, no fuzz 62 } 63 64 // Subsequent elements give the contents of the shape tensor. 65 // To not get out of memory, reduce all dimensions to at most kMaxDim 66 auto flat_shape = shape_tensor.flat<int32>(); 67 for (i = 0; i < shape_dims; i++) { 68 flat_shape(i) = data[data_ix++] % kMaxDim; 69 } 70 71 // Next, we have to fill in the indices tensor. Take the next element from 72 // the buffer to represent the rank of this tensor. 73 if (data_ix >= size) { 74 return; 75 } 76 size_t indices_rank = 1 + (data[data_ix++] % kMaxIndicesRank); 77 78 // Now, read the dimensions of the indices_tensor 79 if (data_ix + indices_rank >= size) { 80 return; 81 } 82 std::vector<int64> indices_dims; 83 size_t num_indices = 1; 84 for (i = 0; i < indices_rank; i++) { 85 // Modulo kMaxDim to not request too much memory 86 int64 dim = data[data_ix++] % kMaxDim; 87 num_indices *= dim; 88 indices_dims.push_back(dim); 89 } 90 Tensor indices_tensor(tensorflow::DT_INT32, TensorShape(indices_dims)); 91 92 // Rest of the buffer is used to fill in the indices_tensor 93 auto flat_indices = indices_tensor.flat<int32>(); 94 for (i = 0; i < num_indices && data_ix < size; i++) { 95 flat_indices(i) = data[data_ix++]; 96 } 97 for (; i < num_indices; i++) { 98 flat_indices(i) = 0; // ensure that indices_tensor has all values 99 } 100 101 // Given the values in the shape_tensor and the dimensions of the 102 // indices_tensor, the shape of updates_tensor is fixed. 103 num_indices = 1; 104 std::vector<int64> updates_dims; 105 for (i = 0; i < indices_rank - 1; i++) { 106 updates_dims.push_back(indices_dims[i]); 107 num_indices *= indices_dims[i]; 108 } 109 int64 last = indices_dims[indices_rank - 1]; 110 for (i = last; i < shape_dims; i++) { 111 updates_dims.push_back(flat_shape(i)); 112 num_indices *= flat_shape(i); 113 } 114 Tensor updates_tensor(tensorflow::DT_INT32, TensorShape(updates_dims)); 115 116 // We don't care about the values in the updates_tensor, make them all be 1 117 auto flat_updates = updates_tensor.flat<int32>(); 118 for (i = 0; i < num_indices; i++) { 119 flat_updates(i) = 1; 120 } 121 122 RunInputs({{"indices", indices_tensor}, 123 {"updates", updates_tensor}, 124 {"shape", shape_tensor}}); 125 } 126 127 private: 128 const size_t kMaxShapeDims = 5; 129 const size_t kMaxIndicesRank = 3; 130 const size_t kMaxDim = 10; 131 }; 132 133 STANDARD_TF_FUZZ_FUNCTION(FuzzScatterNd); 134 135 } // end namespace fuzzing 136 } // end namespace tensorflow 137