1# Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Keras text dataset generation utilities.""" 16 17import numpy as np 18 19from tensorflow.python.data.ops import dataset_ops 20from tensorflow.python.keras.preprocessing import dataset_utils 21from tensorflow.python.ops import io_ops 22from tensorflow.python.ops import string_ops 23from tensorflow.python.util.tf_export import keras_export 24 25 26@keras_export('keras.utils.text_dataset_from_directory', 27 'keras.preprocessing.text_dataset_from_directory', 28 v1=[]) 29def text_dataset_from_directory(directory, 30 labels='inferred', 31 label_mode='int', 32 class_names=None, 33 batch_size=32, 34 max_length=None, 35 shuffle=True, 36 seed=None, 37 validation_split=None, 38 subset=None, 39 follow_links=False): 40 """Generates a `tf.data.Dataset` from text files in a directory. 41 42 If your directory structure is: 43 44 ``` 45 main_directory/ 46 ...class_a/ 47 ......a_text_1.txt 48 ......a_text_2.txt 49 ...class_b/ 50 ......b_text_1.txt 51 ......b_text_2.txt 52 ``` 53 54 Then calling `text_dataset_from_directory(main_directory, labels='inferred')` 55 will return a `tf.data.Dataset` that yields batches of texts from 56 the subdirectories `class_a` and `class_b`, together with labels 57 0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`). 58 59 Only `.txt` files are supported at this time. 60 61 Args: 62 directory: Directory where the data is located. 63 If `labels` is "inferred", it should contain 64 subdirectories, each containing text files for a class. 65 Otherwise, the directory structure is ignored. 66 labels: Either "inferred" 67 (labels are generated from the directory structure), 68 None (no labels), 69 or a list/tuple of integer labels of the same size as the number of 70 text files found in the directory. Labels should be sorted according 71 to the alphanumeric order of the text file paths 72 (obtained via `os.walk(directory)` in Python). 73 label_mode: 74 - 'int': means that the labels are encoded as integers 75 (e.g. for `sparse_categorical_crossentropy` loss). 76 - 'categorical' means that the labels are 77 encoded as a categorical vector 78 (e.g. for `categorical_crossentropy` loss). 79 - 'binary' means that the labels (there can be only 2) 80 are encoded as `float32` scalars with values 0 or 1 81 (e.g. for `binary_crossentropy`). 82 - None (no labels). 83 class_names: Only valid if "labels" is "inferred". This is the explict 84 list of class names (must match names of subdirectories). Used 85 to control the order of the classes 86 (otherwise alphanumerical order is used). 87 batch_size: Size of the batches of data. Default: 32. 88 max_length: Maximum size of a text string. Texts longer than this will 89 be truncated to `max_length`. 90 shuffle: Whether to shuffle the data. Default: True. 91 If set to False, sorts the data in alphanumeric order. 92 seed: Optional random seed for shuffling and transformations. 93 validation_split: Optional float between 0 and 1, 94 fraction of data to reserve for validation. 95 subset: One of "training" or "validation". 96 Only used if `validation_split` is set. 97 follow_links: Whether to visits subdirectories pointed to by symlinks. 98 Defaults to False. 99 100 Returns: 101 A `tf.data.Dataset` object. 102 - If `label_mode` is None, it yields `string` tensors of shape 103 `(batch_size,)`, containing the contents of a batch of text files. 104 - Otherwise, it yields a tuple `(texts, labels)`, where `texts` 105 has shape `(batch_size,)` and `labels` follows the format described 106 below. 107 108 Rules regarding labels format: 109 - if `label_mode` is `int`, the labels are an `int32` tensor of shape 110 `(batch_size,)`. 111 - if `label_mode` is `binary`, the labels are a `float32` tensor of 112 1s and 0s of shape `(batch_size, 1)`. 113 - if `label_mode` is `categorial`, the labels are a `float32` tensor 114 of shape `(batch_size, num_classes)`, representing a one-hot 115 encoding of the class index. 116 """ 117 if labels not in ('inferred', None): 118 if not isinstance(labels, (list, tuple)): 119 raise ValueError( 120 '`labels` argument should be a list/tuple of integer labels, of ' 121 'the same size as the number of text files in the target ' 122 'directory. If you wish to infer the labels from the subdirectory ' 123 'names in the target directory, pass `labels="inferred"`. ' 124 'If you wish to get a dataset that only contains text samples ' 125 '(no labels), pass `labels=None`.') 126 if class_names: 127 raise ValueError('You can only pass `class_names` if the labels are ' 128 'inferred from the subdirectory names in the target ' 129 'directory (`labels="inferred"`).') 130 if label_mode not in {'int', 'categorical', 'binary', None}: 131 raise ValueError( 132 '`label_mode` argument must be one of "int", "categorical", "binary", ' 133 'or None. Received: %s' % (label_mode,)) 134 if labels is None or label_mode is None: 135 labels = None 136 label_mode = None 137 dataset_utils.check_validation_split_arg( 138 validation_split, subset, shuffle, seed) 139 140 if seed is None: 141 seed = np.random.randint(1e6) 142 file_paths, labels, class_names = dataset_utils.index_directory( 143 directory, 144 labels, 145 formats=('.txt',), 146 class_names=class_names, 147 shuffle=shuffle, 148 seed=seed, 149 follow_links=follow_links) 150 151 if label_mode == 'binary' and len(class_names) != 2: 152 raise ValueError( 153 'When passing `label_mode="binary", there must exactly 2 classes. ' 154 'Found the following classes: %s' % (class_names,)) 155 156 file_paths, labels = dataset_utils.get_training_or_validation_split( 157 file_paths, labels, validation_split, subset) 158 if not file_paths: 159 raise ValueError('No text files found.') 160 161 dataset = paths_and_labels_to_dataset( 162 file_paths=file_paths, 163 labels=labels, 164 label_mode=label_mode, 165 num_classes=len(class_names), 166 max_length=max_length) 167 if shuffle: 168 # Shuffle locally at each iteration 169 dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed) 170 dataset = dataset.batch(batch_size) 171 # Users may need to reference `class_names`. 172 dataset.class_names = class_names 173 return dataset 174 175 176def paths_and_labels_to_dataset(file_paths, 177 labels, 178 label_mode, 179 num_classes, 180 max_length): 181 """Constructs a dataset of text strings and labels.""" 182 path_ds = dataset_ops.Dataset.from_tensor_slices(file_paths) 183 string_ds = path_ds.map( 184 lambda x: path_to_string_content(x, max_length)) 185 if label_mode: 186 label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes) 187 string_ds = dataset_ops.Dataset.zip((string_ds, label_ds)) 188 return string_ds 189 190 191def path_to_string_content(path, max_length): 192 txt = io_ops.read_file(path) 193 if max_length is not None: 194 txt = string_ops.substr(txt, 0, max_length) 195 return txt 196