/third_party/mindspore/tests/ut/python/dataset/ |
D | test_datasets_textfileop.py | 26 data = ds.TextFileDataset(DATA_FILE) 35 data = ds.TextFileDataset(DATA_ALL_FILE) 45 data = ds.TextFileDataset(DATA_FILE) 56 data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=False) 73 data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=False) 90 data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=ds.Shuffle.FILES) 107 data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=ds.Shuffle.FILES) 124 data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=ds.Shuffle.GLOBAL) 141 data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=ds.Shuffle.GLOBAL) 156 data = ds.TextFileDataset(DATA_FILE, num_samples=2) [all …]
|
D | test_text_jieba_tokenizer.py | 53 data = ds.TextFileDataset(DATA_FILE) 67 data = ds.TextFileDataset(DATA_FILE) 80 data = ds.TextFileDataset(DATA_FILE) 94 data = ds.TextFileDataset(DATA_FILE4) 109 data = ds.TextFileDataset(DATA_FILE4) 133 data = ds.TextFileDataset(DATA_FILE4) 151 data = ds.TextFileDataset(DATA_FILE4) 170 data = ds.TextFileDataset(DATA_FILE4) 186 data = ds.TextFileDataset(DATA_FILE4) 212 data = ds.TextFileDataset(DATA_FILE4) [all …]
|
D | test_sentencepiece_tokenizer.py | 35 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 47 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 59 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 72 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 84 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 97 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 110 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 120 data = ds.TextFileDataset(VOCAB_FILE, shuffle=False) 123 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 161 data = ds.TextFileDataset(VOCAB_FILE, shuffle=False) [all …]
|
D | test_text_tokenizer.py | 44 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 60 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 89 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 108 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 136 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 156 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 175 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 202 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 226 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) 243 dataset = ds.TextFileDataset(NORMALIZE_FILE, shuffle=False) [all …]
|
D | test_flat_map.py | 33 data = ds.TextFileDataset(DATA_FILE) 55 d = ds.TextFileDataset(text_file) 59 data = ds.TextFileDataset(INDEX_FILE)
|
D | test_vocab.py | 42 data = ds.TextFileDataset(DATA_FILE, shuffle=False) 54 data = ds.TextFileDataset(DATA_FILE, shuffle=False) 66 data = ds.TextFileDataset(DATA_FILE, shuffle=False) 126 data = ds.TextFileDataset(DATA_FILE, shuffle=False) 138 data = ds.TextFileDataset(DATA_FILE, shuffle=False)
|
D | test_from_dataset.py | 25 data = ds.TextFileDataset("../data/dataset/testVocab/words.txt", shuffle=False) 38 data = ds.TextFileDataset("../data/dataset/testTokenizerData/1.txt", shuffle=False) 128 data = ds.TextFileDataset("../data/dataset/testVocab/words.txt", shuffle=False)
|
D | test_nlp.py | 29 data = ds.TextFileDataset("../data/dataset/testVocab/lines.txt", shuffle=False) 46 data = ds.TextFileDataset("../data/dataset/testVocab/lines.txt", shuffle=False)
|
D | test_text_basic_tokenizer.py | 72 dataset = ds.TextFileDataset(BASIC_TOKENIZER_FILE, shuffle=False) 96 dataset = ds.TextFileDataset(BASIC_TOKENIZER_FILE, shuffle=False)
|
D | test_split.py | 77 d = ds.TextFileDataset(text_file_dataset_path) 80 d = ds.TextFileDataset(text_file_dataset_path, num_shards=2, shard_id=0) 89 d = ds.TextFileDataset(text_file_dataset_path, shuffle=False) 141 d = ds.TextFileDataset(text_file_dataset_path, shuffle=False) 167 d = ds.TextFileDataset(text_file_dataset_path, shuffle=False) 191 d = ds.TextFileDataset(text_file_dataset_path, shuffle=False) 205 d = ds.TextFileDataset(text_file_dataset_path, shuffle=False)
|
D | test_text_wordpiece_tokenizer.py | 101 dataset = ds.TextFileDataset(WORDPIECE_TOKENIZER_FILE, shuffle=False) 121 dataset = ds.TextFileDataset(WORDPIECE_TOKENIZER_FILE, shuffle=False)
|
D | test_opt_pass.py | 76 data1 = ds.TextFileDataset(DATA_ALL_FILE, shuffle=ds.Shuffle.GLOBAL) 77 data2 = ds.TextFileDataset(DATA_ALL_FILE, shuffle=ds.Shuffle.FILES)
|
D | test_python_tokenizer.py | 40 dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
|
D | test_text_bert_tokenizer.py | 177 dataset = ds.TextFileDataset(BERT_TOKENIZER_FILE, shuffle=False) 206 dataset = ds.TextFileDataset(BERT_TOKENIZER_FILE, shuffle=False)
|
D | test_datasets_get_dataset_size.py | 215 dataset = ds.TextFileDataset(TEXT_DATA_FILE) 218 dataset_shard_2_0 = ds.TextFileDataset(TEXT_DATA_FILE, num_shards=2, shard_id=0)
|
D | test_datasets_clue.py | 75 data = ds.TextFileDataset(TRAIN_FILE)
|
D | test_cache_nomap.py | 1809 ds1 = ds.TextFileDataset(TEXT_FILE_DATA_DIR, num_shards=3, shard_id=1, cache=some_cache) 1850 ds1 = ds.TextFileDataset(TEXT_FILE_DATA_DIR, num_samples=2) 2166 data = ds.TextFileDataset(TEXT_FILE_DATA_DIR)
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/include/dataset/ |
D | samplers.h | 49 friend class TextFileDataset; variable
|
D | datasets.h | 2405 class TextFileDataset : public Dataset { 2421 …TextFileDataset(const std::vector<std::vector<char>> &dataset_files, int64_t num_samples, ShuffleM… 2425 ~TextFileDataset() = default; 2444 inline std::shared_ptr<TextFileDataset> TextFile(const std::vector<std::string> &dataset_files, int… 2448 …return std::make_shared<TextFileDataset>(VectorStringToChar(dataset_files), num_samples, shuffle, …
|
/third_party/mindspore/mindspore/train/callback/ |
D | _summary_collector.py | 838 dataset_files_set = (dataset_package.TFRecordDataset, dataset_package.TextFileDataset)
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/api/ |
D | datasets.cc | 1253 TextFileDataset::TextFileDataset(const std::vector<std::vector<char>> &dataset_files, int64_t num_s… in TextFileDataset() function in mindspore::dataset::TextFileDataset
|
/third_party/mindspore/mindspore/dataset/engine/ |
D | datasets.py | 6061 class TextFileDataset(SourceDataset): class
|