/external/tensorflow/tensorflow/lite/delegates/gpu/common/memory_management/ |
D | greedy_in_order_assignment.h | 77 TensorSizeT tensor_size = usage_records[i].tensor_size; variable 82 size_t size_diff = AbsDiffInElements(pool_it->object_size, tensor_size); 100 auto pool_it = pool.lower_bound({tensor_size, 0}); 104 size_diff = pool_it->object_size - tensor_size; 111 tensor_size - pool_it->object_size < size_diff) { 112 size_diff = tensor_size - pool_it->object_size; 127 assignment->object_sizes.push_back(tensor_size); 136 std::max(assignment->object_sizes[shared_id], tensor_size); 171 const TensorSizeT& tensor_size = usage_records[i].tensor_size; in GreedyInOrderAssignmentMultidimensional() local 181 if (IsCoveringObject(shared_object_size, tensor_size)) { in GreedyInOrderAssignmentMultidimensional() [all …]
|
D | equality_assignment.h | 57 const TensorSizeT tensor_size = usage_records[i].tensor_size; in EqualityAssignmentWithHash() local 58 auto pool_it = pool.find(tensor_size); in EqualityAssignmentWithHash() 63 assignment->object_sizes.push_back(tensor_size); in EqualityAssignmentWithHash() 91 const TensorSizeT tensor_size = usage_records[i].tensor_size; in EqualityAssignment() local 97 assignment->object_sizes[obj] == tensor_size) { in EqualityAssignment() 106 assignment->object_sizes.push_back(tensor_size); in EqualityAssignment()
|
D | greedy_by_size_assignment.cc | 44 tensor_size > other.tensor_size))); in operator >() 60 size_t tensor_size; member 105 if (diff >= rec->tensor_size && diff < best_diff) { in GreedyBySizeAssignment() 112 AlignByN(cur_offset + usage_records[allocated_id].tensor_size, in GreedyBySizeAssignment() 140 std::max(assignment->total_size, best_offset + rec->tensor_size); in GreedyBySizeAssignment() 179 priority_info[rec_id].tensor_size = usage_records[rec_id].tensor_size; in GreedyBySizeDistPriorityAssignment() 188 positional_max[pos] >= priority_info[rec_id].tensor_size) { in GreedyBySizeDistPriorityAssignment() 227 usage_records[best_rec_id].tensor_size); in GreedyBySizeDistPriorityAssignment() 233 usage_records[best_rec_id].tensor_size); in GreedyBySizeDistPriorityAssignment()
|
D | greedy_by_breadth_assignment.cc | 60 breadth += tensor_info.usage_record->tensor_size; in GreedyByBreadthAssignment() 87 if (best_size < rec.tensor_size) { in GreedyByBreadthAssignment() 93 } else if (cur_size < rec.tensor_size || cur_size >= best_size) { in GreedyByBreadthAssignment() 122 assignment->object_sizes.push_back(rec.tensor_size); in GreedyByBreadthAssignment() 129 std::max(assignment->object_sizes[best_object], rec.tensor_size); in GreedyByBreadthAssignment()
|
D | min_cost_flow_assignment.cc | 61 AddEdge(source_, RightPartTwin(i), 1, usage_records[i].tensor_size); in Build() 70 if (usage_records[i].tensor_size > in Build() 71 usage_records[record_id].tensor_size) { in Build() 72 cost = usage_records[i].tensor_size - in Build() 73 usage_records[record_id].tensor_size; in Build() 185 size_t cost = (*usage_records_)[tensor_id].tensor_size; in AssignTensorsToNewSharedObject()
|
D | internal.cc | 30 return first.usage_record->tensor_size > second.usage_record->tensor_size; in CompareBySize() 96 task_profile[i].usage_record->tensor_size); in CalculatePositionalMaximums() 101 positional_max.push_back(task_profile[i].usage_record->tensor_size); in CalculatePositionalMaximums()
|
/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | rpcbench_test.cc | 124 GraphDef CreateGraphDef(int num_stages, int width, int tensor_size, in CreateGraphDef() argument 133 Output x = Const(s.WithOpName("x"), 0.0f, {tensor_size, 1}); in CreateGraphDef() 157 string DebugString(const Tensor& x, const Tensor& y, int tensor_size) { in DebugString() argument 158 CHECK_EQ(x.NumElements(), tensor_size); in DebugString() 159 CHECK_EQ(y.NumElements(), tensor_size); in DebugString() 163 CHECK_GE(tensor_size, 2); in DebugString() 170 int num_stages, int tensor_size, in BM_Helper() argument 176 GraphDef def = CreateGraphDef(num_stages, width, tensor_size, in BM_Helper() 183 Tensor x(DT_FLOAT, TensorShape({tensor_size, 1})); in BM_Helper() 188 "; tensor bytes/send: ", tensor_size * sizeof(float))); in BM_Helper() [all …]
|
/external/XNNPACK/src/ |
D | memory-planner.c | 28 const size_t tensor_size_a = (*(struct xnn_value_usage *const*)a)->tensor_size; in cmp_value_usage_tensor_size() 29 const size_t tensor_size_b = (*(struct xnn_value_usage *const*)b)->tensor_size; in cmp_value_usage_tensor_size() 138 size_t tensor_size) { in xnn_add_value_allocation_tracker() argument 139 tracker->usage[value_id].tensor_size = tensor_size; in xnn_add_value_allocation_tracker() 163 if (info->tensor_size != 0) { in xnn_plan_value_allocation_tracker() 181 .end = allocated->alloc_offset + allocated->tensor_size, in xnn_plan_value_allocation_tracker() 185 …fset = find_value_alloc_offset(current_live_mem_blocks, num_live_mem_blocks, current->tensor_size); in xnn_plan_value_allocation_tracker() 186 if (mem_arena_size < current->alloc_offset + current->tensor_size) { in xnn_plan_value_allocation_tracker() 187 mem_arena_size = current->alloc_offset + current->tensor_size; in xnn_plan_value_allocation_tracker() 197 if (tracker->usage[i].tensor_size > 0) { in xnn_plan_value_allocation_tracker() [all …]
|
/external/pytorch/torch/fx/experimental/migrate_gradual_types/ |
D | constraint.py | 217 def __init__(self, tensor_size, input_var, dim_replace, index, output): argument 232 self.tensor_size = tensor_size 247 return self.tensor_size == other.tensor_size and \ 258 def __init__(self, tensor_size, input_var, index1, index2, output): argument 273 self.tensor_size = tensor_size 288 return self.tensor_size == other.tensor_size and \ 299 def __init__(self, tensor_size, index, res, input_var): argument 310 self.tensor_size = tensor_size 320 self.tensor_size == other.tensor_size and \ 328 def __init__(self, tensor_size, index_tuple, res, input_var): argument [all …]
|
/external/pytorch/binaries/ |
D | record_function_benchmark.cc | 35 float runTensorGEMMBench(int tensor_size, int iter) { in runTensorGEMMBench() argument 39 auto inp = torch::randn({tensor_size, tensor_size}); in runTensorGEMMBench() 66 for (auto tensor_size : std::set<int>({kSmallTensorSize, kTensorSize})) { in runBenchmark() local 67 duration = runTensorGEMMBench(tensor_size, FLAGS_iter); in runBenchmark() 69 << tensor_size in runBenchmark() 71 << tensor_size in runBenchmark()
|
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/ |
D | calibration_logger.cc | 26 TfLiteStatus MinMax::Update(const float* values, size_t tensor_size, in Update() argument 28 if (tensor_size <= 0) return kTfLiteOk; in Update() 31 for (size_t i = 0; i < tensor_size; ++i) { in Update() 42 const auto minmax = std::minmax_element(values, values + tensor_size); in Update()
|
D | calibrator_test.cc | 74 const size_t tensor_size = 1 * 8 * 8 * 3; in TEST() local 76 std::vector<float> ones(tensor_size, 1.0f); in TEST() 83 ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float)); in TEST() 84 for (size_t j = 0; j < tensor_size; j++) { in TEST() 94 for (size_t i = 0; i < tensor_size; i++) { in TEST() 98 for (size_t i = 0; i < tensor_size; i++) { in TEST() 137 const size_t tensor_size = 1 * 8 * 8 * 3; in TEST() local 144 ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float)); in TEST() 145 for (size_t j = 0; j < tensor_size; j++) { in TEST() 218 const size_t tensor_size = 1 * 8 * 8 * 3; in TEST() local [all …]
|
D | calibration_logger.h | 30 TfLiteStatus Update(const float* values, size_t tensor_size, 53 const float* tensor_values, size_t tensor_size, in LogTensorValue() argument 56 return tensor_id_to_stats_map_[key].Update(tensor_values, tensor_size, in LogTensorValue()
|
/external/executorch/examples/models/mobilenet_v2/ |
D | model.py | 28 tensor_size = (1, 3, 224, 224) 29 return (torch.randn(tensor_size),) 41 tensor_size = (1, 3, 224, 224) 42 return (torch.randn(tensor_size),)
|
/external/pytorch/torch/nn/utils/ |
D | prune.py | 455 tensor_size = t.nelement() 458 nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) 461 _validate_pruning_amount(nparams_toprune, tensor_size) 512 tensor_size = t.nelement() 515 nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) 518 _validate_pruning_amount(nparams_toprune, tensor_size) 607 tensor_size = t.shape[self.dim] 610 nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) 613 _validate_pruning_amount(nparams_toprune, tensor_size) 637 mask = make_mask(t, self.dim, tensor_size, nparams_toprune) [all …]
|
/external/tensorflow/tensorflow/core/grappler/inputs/ |
D | trivial_test_graph_input_yielder.cc | 31 GraphDef CreateGraphDef(int num_stages, int width, int tensor_size, in CreateGraphDef() argument 39 const int batch_size = tensor_size < 0 ? 1 : tensor_size; in CreateGraphDef() 90 int num_stages, int width, int tensor_size, bool insert_queue, in TrivialTestGraphInputYielder() argument 94 tensor_size_(tensor_size), in TrivialTestGraphInputYielder()
|
/external/pytorch/torch/csrc/jit/runtime/static/ |
D | memory_planner.cpp | 296 auto tensor_size = ms.first; in allocateOutputTensors() local 298 if (tensor_size == 0) { in allocateOutputTensors() 301 TORCH_DCHECK_LE(offset + tensor_size, output_buffer_bytes_); in allocateOutputTensors() 320 tensor->storage().set_nbytes(tensor_size); in allocateOutputTensors() 321 offset += tensor_size; in allocateOutputTensors() 411 auto tensor_size = storages_nbytes_[storages_idx]; in allocateManagedTensors() local 412 if (tensor_size == 0) { in allocateManagedTensors() 417 TORCH_DCHECK_LE(offset + tensor_size, managed_bytes_); in allocateManagedTensors() 421 TORCH_DCHECK_EQ(tensor_size, managed_tensors_[group_idx].maxTensorSize()); in allocateManagedTensors() 430 storageImpl->set_nbytes(tensor_size); in allocateManagedTensors() [all …]
|
/external/pytorch/torch/csrc/utils/ |
D | tensor_flatten.cpp | 21 size_t tensor_size = 0; in take_tensors() local 25 tensor_size = indices.numel() * indices.element_size() + in take_tensors() 28 tensor_size = tensor.numel() * tensor.element_size(); in take_tensors() 35 cur_group_size += tensor_size; in take_tensors() 47 type_group.size += tensor_size; in take_tensors()
|
/external/pytorch/torch/utils/tensorboard/ |
D | _pytorch_graph.py | 42 tensor_size=None, argument 50 self.tensor_size = tensor_size 95 tensor_size = node_cpp.type().sizes() 97 tensor_size = [ 100 self.tensor_size = tensor_size 225 outputsize=v.tensor_size,
|
/external/pytorch/torch/ao/pruning/_experimental/pruner/ |
D | FPGM_pruner.py | 89 tensor_size = tensor_weight.shape[0] # prune filter (row) 90 nparams_toprune = round(sparsity_level * tensor_size) 92 max(nparams_toprune, 0), tensor_size
|
/external/pytorch/test/distributed/checkpoint/ |
D | test_file_system_checkpoint_cpu.py | 379 tensor_size = 32 407 shard_sizes=[tensor_size - 8], 421 shard_sizes=[tensor_size - 10], 431 "sharded": sharded_tensor.rand(save_spec, tensor_size), 432 "replicated": torch.rand(tensor_size, device="cpu"), 441 "sharded": torch.zeros(tensor_size, device="cpu"), 442 "replicated": sharded_tensor.zeros(load_spec, tensor_size),
|
/external/pytorch/test/distributed/_tensor/ |
D | test_init.py | 44 tensor_size = [4, 8, 12] 46 local_tensor_size = tensor_size.copy() 52 tensor_size, 62 tensor_size = [5, 10, 15] 65 tensor_size, 75 init_op(tensor_size, *args, **kwargs), 83 exp_tensor = init_op(tensor_size, *args, **kwargs)
|
/external/executorch/examples/models/edsr/ |
D | model.py | 27 tensor_size = (1, 3, 224, 224) 28 return (torch.randn(tensor_size),)
|
/external/executorch/examples/models/mobilenet_v3/ |
D | model.py | 29 tensor_size = (1, 3, 224, 224) 30 return (torch.randn(tensor_size),)
|
/external/tensorflow/tensorflow/core/kernels/ |
D | unary_ops_composition_test.cc | 91 static Graph* UnaryOpsChain(int tensor_size, int repeat_graph, in UnaryOpsChain() argument 95 Tensor t(DT_FLOAT, TensorShape({tensor_size})); in UnaryOpsChain() 123 static Graph* UnaryOpsCompo(int tensor_size, int repeat_graph, in UnaryOpsCompo() argument 127 Tensor t(DT_FLOAT, TensorShape({tensor_size})); in UnaryOpsCompo()
|