/external/executorch/backends/qualcomm/_passes/ |
D | recompose_rms_norm.py | 29 def _get_gamma_node(self, output_node): argument 31 for a in output_node.args: 51 output_node = src_partition.output_nodes[0] 53 gamma_node = self._get_gamma_node(output_node) 55 with graph.inserting_before(output_node): 68 users = output_node.users.copy() 70 user.replace_input_with(output_node, rms_node) 72 rms_node.meta = output_node.meta
|
D | convert_interpolate_with_upsample2d.py | 28 output_node = src_partition.output_nodes[0] 32 if output_node.target.__name__ == "aten.index.Tensor": 35 output_size = list(output_node.meta["val"].shape) 41 output_size = list(output_node.meta["val"].shape) 48 users = output_node.users.copy() 50 user.replace_input_with(output_node, upsample2d_node) 52 upsample2d_node.meta = output_node.meta
|
D | convert_prelu.py | 27 output_node = src_partition.output_nodes[0] 36 users = output_node.users.copy() 38 user.replace_input_with(output_node, prelu_node) 40 prelu_node.meta = output_node.meta
|
D | fuse_consecutive_transpose.py | 53 input_node, output_node = self.nodes[0].args[0], self.nodes[-1] 64 users = output_node.users.copy() 66 user.replace_input_with(output_node, permute_node) 69 permute_node.meta = output_node.meta
|
/external/pytorch/torch/_export/passes/ |
D | replace_with_hop_pass_util.py | 43 output_node = next(iter(reversed(sub_gm.graph.nodes)), None) 48 if isinstance(output_node, torch.fx.Node) and output_node.op != "output": 49 output_node = None 50 if output_node is not None: 51 assert len(output_node.args) == 1 52 output_args = output_node.args[0] 68 output_node = output_args[idx] 69 get_item_node._rename(output_node.name) 70 get_item_node.meta = output_node.meta 81 output_node.args = ((output_args,),)
|
/external/executorch/exir/passes/ |
D | weights_to_outputs_pass.py | 49 output_node = None 52 output_node = node 54 assert output_node is not None 71 new_output_nodes.extend(output_node.args[0]) 75 output_node.replace_all_uses_with(new_output) 76 gm.graph.erase_node(output_node)
|
D | insert_write_back_for_buffers_pass.py | 31 output_node = None 34 output_node = node 36 assert output_node is not None 37 outputs = pytree.tree_flatten(output_node.args)[0] 57 with gm.graph.inserting_before(output_node): 64 with gm.graph.inserting_before(output_node): 68 output_node.replace_all_uses_with(new_output) 69 gm.graph.erase_node(output_node)
|
/external/tensorflow/tensorflow/python/tools/ |
D | strip_unused_test.py | 45 output_node = math_ops.multiply( 47 math_ops.add(output_node, 2.0, name="later_node") 49 output = self.evaluate(output_node) 94 output_node = sess.graph.get_tensor_by_name("output_node:0") 95 output = sess.run(output_node, feed_dict={input_node: [10.0]}) 108 output_node = math_ops.multiply( 110 math_ops.add(output_node, 2.0, name="later_node") 112 output = self.evaluate(output_node) 152 output_node = sess.graph.get_tensor_by_name("output_node:0") 153 output = sess.run(output_node,
|
D | freeze_graph_test.py | 60 output_node = math_ops.multiply(variable_node, 2.0, name="output_node") 64 output = sess.run(output_node) 114 output_node = sess.graph.get_tensor_by_name("output_node:0") 115 output = sess.run(output_node) 176 output_node = math_ops.multiply(variable_node, 2.0, name="output_node") 180 output = sess.run(output_node) 216 output_node = sess.graph.get_tensor_by_name("output_node:0") 217 output = sess.run(output_node) 270 output_node = sess.graph.get_tensor_by_name("output_node:0") 271 output = sess.run(output_node, feed_dict={input_node: [example]})
|
/external/executorch/backends/xnnpack/operators/ |
D | op_multiply.py | 61 output_node = get_relu_fused_node(node) or node 64 if output_node.target == exir_ops.edge.aten.relu.default: 65 output_node.meta["XNNPACK_FUSED"] = True 69 output_node, 72 quant_params=QuantParams.from_outputs(output_node), 75 output_id = vals_to_ids[output_node]
|
/external/executorch/exir/emit/ |
D | _emit_program.py | 53 output_node = None 56 output_node = node 57 assert output_node is not None 63 outputs = pytree.tree_flatten(output_node.args)[0] 71 with gm.graph.inserting_before(output_node): 74 new_output.meta = output_node.meta.copy() 75 output_node.replace_all_uses_with(new_output) 76 gm.graph.erase_node(output_node)
|
/external/tensorflow/tensorflow/core/data/ |
D | rewrite_utils.cc | 105 const std::string& output_node = tensor.substr(0, tensor.find(':')); in RemoveFakeSinks() local 106 if (identity_map.find(output_node) != identity_map.end()) { in RemoveFakeSinks() 108 identity_map.at(output_node); in RemoveFakeSinks() 176 string output_node; in RewriteDataset() local 178 AsGraphDefForRewrite(ctx, input, &input_list, &graph_def, &output_node)); in RewriteDataset() 182 ApplyRewrites(ctx, config_factory, &graph_def, &output_node)); in RewriteDataset() 203 graph_runner.Run(&graph, flr, input_list, {output_node}, &outputs)); in RewriteDataset() 214 output_node = std::move(output_node)]() { in RewriteDataset() 218 if (node.name() == output_node) { in RewriteDataset() 224 VLOG(3) << "Failed to find node: " << output_node; in RewriteDataset()
|
D | serialization_utils.cc | 59 const string& output_node, Tensor* result) { in FromGraphDef() argument 70 {output_node}, &outputs)); in FromGraphDef() 278 tstring output_node, serialized_graph_def; in ReadDatasetInternal() local 280 ReadScalar(n, strings::StrCat(key, kOutputNode), &output_node)); in ReadDatasetInternal() 285 TF_RETURN_IF_ERROR(FromGraphDef(flr, graph_def, {}, output_node, val)); in ReadDatasetInternal() 404 string output_node; in WriteDatasetInternal() local 407 output_node = node.input(0); in WriteDatasetInternal() 415 WriteScalar(n, strings::StrCat(key, kOutputNode), output_node)); in WriteDatasetInternal() 461 Node* output_node = nullptr; in AsGraphDef() local 463 db.AddInputDataset(&serialization_ctx, dataset, &output_node)); in AsGraphDef() [all …]
|
/external/executorch/exir/tests/ |
D | test_joint_graph.py | 44 output_node = None 47 output_node = node 50 orig_outputs = len(output_node.args[0]) 60 output_node = None 63 output_node = node 66 weight_outputs = len(output_node.args[0])
|
/external/autotest/client/site_tests/audio_Aplay/ |
D | audio_Aplay.py | 132 output_node = audio_spec.get_headphone_node(utils.get_board()) 135 output_node = "INTERNAL_SPEAKER" 139 logging.debug("Test output device %s", output_node) 141 cras_utils.set_single_selected_output_node(output_node) 145 if cras_device_type != output_node:
|
/external/tensorflow/tensorflow/core/graph/ |
D | benchmark_testlib.h | 168 NodeDef output_node = create_node(/*name=*/absl::StrFormat("out%05d", i)); in CreateFaninFanoutNodeGraph() local 171 output_node.add_input(input_node_index); in CreateFaninFanoutNodeGraph() 172 *graph.add_node() = std::move(output_node); in CreateFaninFanoutNodeGraph() 177 NodeDef output_node = in CreateFaninFanoutNodeGraph() local 179 output_node.add_input(controlled_fanout_input); in CreateFaninFanoutNodeGraph() 180 *graph.add_node() = std::move(output_node); in CreateFaninFanoutNodeGraph()
|
/external/pytorch/torch/_inductor/codegen/rocm/ |
D | rocm_template.py | 44 self.output_node: Buffer = Buffer("buf_out", layout) 65 V.graph, "get_dtype", self._fake_get_dtype(self.output_node) 86 expected_args.extend([self.output_node.get_name()]) 101 output_tensor_meta=TensorMeta.from_irnodes(self.output_node), 126 self.output_node.get_layout(),
|
/external/pytorch/torch/export/ |
D | _unlift.py | 80 output_node = None 83 output_node = node 85 assert output_node is not None 86 outputs = pytree.tree_flatten(output_node.args)[0] 105 with gm.graph.inserting_before(output_node): 115 with gm.graph.inserting_before(output_node): 118 new_output.meta.update(output_node.meta) 119 output_node.replace_all_uses_with(new_output) 120 gm.graph.erase_node(output_node)
|
D | _remove_effect_tokens_pass.py | 24 output_node = None 29 output_node = next(reversed(ep.graph_module.graph.find_nodes(op="output"))) 41 assert output_node is not None 42 output_args = output_node.args[0] 45 output_node.args = (tuple(output_args[num_tokens:]),)
|
/external/pytorch/torch/_inductor/codegen/ |
D | cpp_template.py | 36 self.output_node: ir.Buffer = ir.Buffer("buf_out", layout) 44 V.graph, "get_dtype", self._fake_get_dtype(self.output_node) 60 expected_args.extend([self.output_node.get_name()]) 78 output_tensor_meta=TensorMeta.from_irnodes(self.output_node), 105 self.output_node.get_layout(),
|
/external/pytorch/torch/_inductor/codegen/cuda/ |
D | cuda_template.py | 46 self.output_node: Buffer = Buffer("buf_out", layout) 66 V.graph, "get_dtype", self._fake_get_dtype(self.output_node) 87 expected_args.extend([self.output_node.get_name()]) 102 output_tensor_meta=TensorMeta.from_irnodes(self.output_node), 127 self.output_node.get_layout(),
|
/external/pytorch/torch/fx/passes/ |
D | split_utils.py | 157 output_node: Optional[torch.fx.Node] = None 168 if output_node is not None: 170 output_node = node 236 if output_node is None: 239 for x in flatten(output_node.args[0]): 289 main_g.output(map_arg(output_node.args[0], main_remapping.__getitem__)) 295 for x in flatten(output_node.args[0]):
|
/external/executorch/backends/cadence/aot/ |
D | compiler_utils.py | 22 output_node = next(iter(reversed(graph.nodes))) 24 output_node and output_node.op == "output" and len(output_node.args) == 1 26 return output_node 31 output_node = get_output_node(graph) 32 return node in tree_flatten(output_node.args[0])[0]
|
/external/executorch/backends/arm/quantizer/quantization_annotation/ |
D | max_pool2d_annotator.py | 37 output_node = maxpool_partition.output_nodes[0] 46 … if arm_quantizer_utils.are_annotated([output_node, maxpool_node]): # type: ignore[list-item] 68 output_node.meta["quantization_annotation"] = QuantizationAnnotation(
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/ |
D | feed-control-dep.pbtxt | 1 …-tf-input-arrays=input -tf-input-data-types=DT_FLOAT -tf-output-arrays=output_node -o - | FileChec… 46 name: "output_node" 66 # CHECK-SAME: outputs = "output_node"
|