Home
last modified time | relevance | path

Searched refs:all_tensors (Results 1 – 16 of 16) sorted by relevance

/external/tensorflow/tensorflow/lite/testing/op_tests/
Dpack.py72 all_tensors = []
78 all_tensors.append(input_tensor)
79 out = tf.stack(all_tensors, parameters["axis"])
80 return all_tensors, [out]
Dconcat.py79 all_tensors = []
85 all_tensors.append(input_tensor)
86 out = tf.concat(all_tensors, parameters["axis"])
87 return all_tensors, [out]
/external/pytorch/torch/testing/_internal/optests/
Dautograd_registration.py76 all_tensors = [arg for arg in flat_args if isinstance(arg, torch.Tensor)]
77 if not any(t.requires_grad for t in all_tensors):
85 all_device_types = {arg.device.type for arg in all_tensors}
/external/tensorflow/tensorflow/python/ops/
Dcustom_gradient.py485 all_tensors = flat_result + args + variables
510 original_tensors = all_tensors
512 all_tensors = array_ops.identity_n(all_tensors)
519 all_tensors[i]._handle_data = t._handle_data # pylint: disable=protected-access
521 f.__name__, all_tensors, original_tensors, tape_grad_fn)
522 for ot, t in zip(original_tensors, all_tensors):
525 structure=result, flat_sequence=all_tensors[:flat_result_len])
Dmath_ops.py219 all_tensors = (expanded_start, res, expanded_stop)
220 concatenated = array_ops.concat(all_tensors, axis=axis)
/external/tensorflow/tensorflow/python/tools/
Dinspect_checkpoint.py54 def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors, argument
73 if all_tensors or all_tensor_names:
78 if all_tensors:
159 FLAGS.all_tensors, FLAGS.all_tensor_names,
/external/pytorch/test/fx/
Dquantization.py40 self.all_tensors = True
45 self.all_tensors = False
76 if not self.all_tensors:
208 assert self.all_tensors
/external/pytorch/torch/testing/_internal/
Djit_metaprogramming_utils.py406 all_tensors: List[Any]
426 self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]]
464 traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False)
465 self.assertExportImport(traced.graph, split_inputs.all_tensors)
466 output = traced(*split_inputs.all_tensors)
473 output = traced_fn.traced(*split_inputs.all_tensors)
476 … traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined]
Dcomposite_compliance.py254 all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst)
255 if all_tensors:
/external/tensorflow/tensorflow/python/eager/
Dgradient_input_output_exclusions.py315 all_tensors = set(range(num_values))
316 unused_tensors = all_tensors - used_tensors
/external/pytorch/torch/utils/model_dump/
Dcode.js537 let all_tensors = [];
540 all_tensors.push(...tensors.values());
543 for (const storage of all_tensors.values()) {
/external/tensorflow/tensorflow/python/training/
Dsaver.py169 all_tensors = []
176 all_tensors.extend(
178 return all_tensors
360 all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,
381 saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]
/external/ComputeLibrary/src/dynamic_fusion/sketch/utils/
DDependencyGraph.h280 std::vector<TensorId> all_tensors() const in all_tensors() function
467 for(auto t : all_tensors()) in remove_operator()
/external/pytorch/torch/distributed/optim/
Dzero_redundancy_optimizer.py1415 all_tensors = True
1418 all_tensors &= isinstance(param, torch.Tensor)
1420 if not all_tensors and not all_dicts:
1425 if all_tensors:
/external/pytorch/torch/distributed/fsdp/
D_optim_utils.py1009 all_tensors = True
1012 all_tensors &= isinstance(param, torch.Tensor)
1014 if not all_tensors and not all_dicts:
1016 if all_tensors:
/external/pytorch/torch/onnx/
Dsymbolic_opset9.py6229 all_tensors = symbolic_helper._unpack_list(self)
6230 t_with_final_shape = zeros_like(g, all_tensors[0])
6234 for t in all_tensors:
6237 t_list = [expand_as(g, t, t_with_final_shape) for t in all_tensors]