/external/tensorflow/tensorflow/python/ops/ |
D | rnn.py | 218 def _copy_one_through(output, new_output): argument 221 return new_output 223 return new_output 225 with ops.colocate_with(new_output): 226 return array_ops.where(copy_cond, output, new_output) 233 _copy_one_through(zero_output, new_output) 234 for zero_output, new_output in zip(flat_zero_output, flat_new_output) 244 new_output, new_state = call_cell() 246 nest.assert_same_structure(zero_output, new_output) 250 flat_new_output = nest.flatten(new_output) [all …]
|
D | while_v2_indexed_slices_rewriter.py | 100 new_output = ops.convert_to_tensor_v2(grad_output_slices) 104 body_grad_graph.structured_outputs[idx] = new_output
|
D | while_v2.py | 665 new_output = grad_func_graph.internal_capture_to_output[ops.tensor_id( 672 grad_func_graph.outputs.append(new_output) 673 grad_func_graph.structured_outputs.append(new_output)
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | build_xla_ops_pass.cc | 133 Output new_output(new_node, oidx); in MergeOutgoingDataEdges() local 139 new_output, {new_output}, in MergeOutgoingDataEdges() 145 new_output = print_op; in MergeOutgoingDataEdges() 149 DataTypeIsFloating(new_output.type())) { in MergeOutgoingDataEdges() 154 new_output, in MergeOutgoingDataEdges() 156 new_output.name(), ") from cluster ", cluster_name)); in MergeOutgoingDataEdges() 157 new_output = check_numerics_op; in MergeOutgoingDataEdges() 161 Output(old_node, oidx), new_output); in MergeOutgoingDataEdges()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | tree_reduction_rewriter.cc | 88 std::unique_ptr<HloInstruction> new_output = in HandleReduce() local 92 return ReplaceWithNewInstruction(hlo, std::move(new_output)); in HandleReduce()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | horizontal_loop_fusion.cc | 386 auto new_output = clone_map[old_output]; in CreateFusedComputation() local 390 new_output->shape().element_type(), in CreateFusedComputation() 391 {ShapeUtil::ElementsIn(new_output->shape())}, in CreateFusedComputation() 393 new_output)); in CreateFusedComputation()
|
/external/mesa3d/src/gallium/drivers/r300/compiler/ |
D | radeon_compiler.h | 105 void rc_move_output(struct radeon_compiler * c, unsigned output, unsigned new_output, unsigned writ…
|
D | radeon_compiler.c | 172 void rc_move_output(struct radeon_compiler * c, unsigned output, unsigned new_output, unsigned writ… in rc_move_output() argument 183 inst->U.I.DstReg.Index = new_output; in rc_move_output() 186 c->Program.OutputsWritten |= 1 << new_output; in rc_move_output()
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_feed.py | 74 new_output = [] 76 new_output.append( 79 output = new_output
|
/external/tensorflow/tensorflow/python/ops/parallel_for/ |
D | pfor.py | 643 new_output = control_flow_ops.cond( 648 new_outputs.append(new_output) 1425 def _add_conversion(self, old_output, new_output): argument 1427 assert isinstance(new_output, (WrappedTensor, ops.Operation)), new_output 1428 self._conversion_map[old_output] = new_output 1578 for old_output, new_output in zip(y_op.outputs, new_op.outputs): 1579 custom_gradient.copy_handle_data(old_output, new_output) 1580 new_outputs.append(wrap(new_output, False)) 1641 for old_output, new_output in zip(y_op.outputs, new_outputs): 1642 assert isinstance(new_output, WrappedTensor), (new_output, y, y_op) [all …]
|
/external/python/cpython3/Modules/ |
D | unicodedata.c | 534 Py_UCS4 *new_output; in nfd_nfkd() local 537 new_output = PyMem_Realloc(output, osize*sizeof(Py_UCS4)); in nfd_nfkd() 538 if (new_output == NULL) { in nfd_nfkd() 543 output = new_output; in nfd_nfkd()
|
/external/adhd/cras/src/server/ |
D | cras_alsa_io.c | 1177 static struct alsa_output_node *new_output(struct alsa_io *aio, in new_output() function 1233 new_output(aio, cras_output, node_name); in new_output_by_mixer_control() 1236 new_output(aio, cras_output, ctl_name); in new_output_by_mixer_control() 1522 node = new_output(aio, NULL, jack_name); in jack_output_plug_event() 2238 new_output(aio, NULL, HDMI); in alsa_iodev_legacy_complete_init() 2240 new_output(aio, NULL, INTERNAL_SPEAKER); in alsa_iodev_legacy_complete_init() 2242 new_output(aio, NULL, DEFAULT); in alsa_iodev_legacy_complete_init() 2315 output_node = new_output(aio, control, section->name); in alsa_iodev_ucm_add_nodes_and_jacks()
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | dot_handler.cc | 3379 auto new_output = in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() local 3383 if (!ShapeUtil::Compatible(new_output->shape(), in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3385 new_output = computation->AddInstruction(HloInstruction::CreateSlice( in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3386 reduce_outputs[i]->shape(), new_output, in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3387 std::vector<int64>(new_output->shape().rank(), 0), in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3389 std::vector<int64>(new_output->shape().rank(), 1))); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3391 TF_RETURN_IF_ERROR(reduce_outputs[i]->ReplaceAllUsesWith(new_output)); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/translate/ |
D | import_model.cc | 2082 auto new_output = new_dst->getResult(i); in AddBackedge() local 2083 dst->getResult(i).replaceAllUsesWith(new_output); in AddBackedge()
|