Home
last modified time | relevance | path

Searched refs:return_tensors (Results 1 – 15 of 15) sorted by relevance

/external/tensorflow/tensorflow/core/common_runtime/
Dgraph_constructor_test.cc1640 opts.return_tensors.push_back({"input", 1}); in TEST_F()
1641 opts.return_tensors.push_back({"t1", 0}); in TEST_F()
1642 opts.return_tensors.push_back({"input", 0}); in TEST_F()
1656 ASSERT_EQ(results.return_tensors.size(), 3); in TEST_F()
1657 EXPECT_EQ(results.return_tensors[0].first->name(), "input"); in TEST_F()
1658 EXPECT_EQ(results.return_tensors[0].second, 1); in TEST_F()
1659 EXPECT_EQ(results.return_tensors[1].first->name(), "t1"); in TEST_F()
1660 EXPECT_EQ(results.return_tensors[1].second, 0); in TEST_F()
1661 EXPECT_EQ(results.return_tensors[2].first->name(), "input"); in TEST_F()
1662 EXPECT_EQ(results.return_tensors[2].second, 0); in TEST_F()
[all …]
Dgraph_constructor.cc113 return_tensors(in.return_tensors.begin(), in.return_tensors.end()), in Options()
130 std::vector<TensorId> return_tensors; member
162 std::vector<std::pair<Node*, int>>* return_tensors,
168 ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
174 std::vector<std::pair<Node*, int>>* return_tensors, in GraphConstructor() argument
182 return_tensors_(return_tensors), in GraphConstructor()
403 std::vector<std::pair<Node*, int>>* return_tensors, in NodeDefCopyingGraphConstructor() argument
406 : GraphConstructor(opts, g, refiner, return_tensors, return_nodes, in NodeDefCopyingGraphConstructor()
430 ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors, in NodeDefMovingGraphConstructor() argument
433 : GraphConstructor(opts, g, refiner, return_tensors, return_nodes, in NodeDefMovingGraphConstructor()
[all …]
Dgraph_constructor.h127 std::vector<SafeTensorId> return_tensors; member
165 std::vector<std::pair<Node*, Index>> return_tensors; member
/external/executorch/examples/mediatek/aot_utils/llm_utils/tokenizers_/
Dtokenization_utils_fast.py519 return_tensors: Optional[str] = None,
590 sanitized_tokens, sanitized_encodings, tensor_type=return_tensors
604 return_tensors: Optional[bool] = None,
624 return_tensors=return_tensors,
637 if return_tensors is None and not return_overflowing_tokens:
/external/executorch/examples/models/mobilebert/
Dmodel.py31 return (tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"],)
/external/tensorflow/tensorflow/c/
Dc_api.cc1689 opts->opts.return_tensors.emplace_back(oper_name_str, index); in TF_ImportGraphDefOptionsAddReturnOutput()
1694 return opts->opts.return_tensors.size(); in TF_ImportGraphDefOptionsNumReturnOutputs()
1710 *num_outputs = results->return_tensors.size(); in TF_ImportGraphDefResultsReturnOutputs()
1711 *outputs = results->return_tensors.data(); in TF_ImportGraphDefResultsReturnOutputs()
1751 DCHECK(tf_results->return_tensors.empty()); in GraphImportGraphDefLocked()
1752 tf_results->return_tensors.resize(results.return_tensors.size()); in GraphImportGraphDefLocked()
1753 for (int i = 0; i < results.return_tensors.size(); ++i) { in GraphImportGraphDefLocked()
1754 tf_results->return_tensors[i].oper = in GraphImportGraphDefLocked()
1755 ToOperation(results.return_tensors[i].first); in GraphImportGraphDefLocked()
1756 tf_results->return_tensors[i].index = results.return_tensors[i].second; in GraphImportGraphDefLocked()
[all …]
Dc_api_internal.h145 std::vector<TF_Output> return_tensors; member
/external/executorch/examples/models/phi-3-mini/
Deager.py72 tokens = tokenizer.encode(args.prompt, return_tensors="pt")
/external/executorch/examples/qualcomm/scripts/
Dmobilebert_fine_tune.py149 return_tensors="pt",
158 return_tensors="pt",
/external/executorch/examples/mediatek/aot_utils/llm_utils/
Dutils.py139 prompt_tokens = tokenizer(prompt_formatted, return_tensors="np")[
175 curr_chunk_formatted, return_tensors="np"
/external/executorch/examples/models/llava/
Dmodel.py309 self.input_ids = self.tokenizer.encode(self.prompt, return_tensors="pt").cpu()
/external/executorch/examples/mediatek/model_export_scripts/
Dllama.py155 inp_encoded = tokenizer(text, return_tensors="pt") # dict
/external/pytorch/docs/source/
Dtorch.compiler_get_started.rst120 encoded_input = tokenizer(text, return_tensors='pt').to(device="cuda:0")
/external/pytorch/test/onnx/
Dtest_fx_to_onnx_with_onnxruntime.py889 kwargs = tokenizer("Hello world!", return_tensors="pt")
1116 kwargs = tokenizer("Hello world!", return_tensors="pt")
Dtest_pytorch_onnx_no_runtime.py1201 ids = tokenizer.batch_encode_plus(["This is a test"], return_tensors="pt").to(