• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""
16Testing dataset serialize and deserialize in DE
17"""
18import filecmp
19import glob
20import json
21import os
22import pytest
23
24import numpy as np
25from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME
26from util import config_get_set_num_parallel_workers, config_get_set_seed
27
28import mindspore.common.dtype as mstype
29import mindspore.dataset as ds
30import mindspore.dataset.transforms.c_transforms as c
31import mindspore.dataset.transforms.py_transforms as py
32import mindspore.dataset.vision.c_transforms as vision
33import mindspore.dataset.vision.py_transforms as py_vision
34from mindspore import log as logger
35from mindspore.dataset.vision import Inter
36
37
38def test_serdes_imagefolder_dataset(remove_json_files=True):
39    """
40    Test simulating resnet50 dataset pipeline.
41    """
42    data_dir = "../data/dataset/testPK/data"
43    ds.config.set_seed(1)
44
45    # define data augmentation parameters
46    rescale = 1.0 / 255.0
47    shift = 0.0
48    resize_height, resize_width = 224, 224
49    weights = [1.0, 0.1, 0.02, 0.3, 0.4, 0.05, 1.2, 0.13, 0.14, 0.015, 0.16, 1.1]
50
51    # Constructing DE pipeline
52    sampler = ds.WeightedRandomSampler(weights, 11)
53    child_sampler = ds.SequentialSampler()
54    sampler.add_child(child_sampler)
55    data1 = ds.ImageFolderDataset(data_dir, sampler=sampler)
56    data1 = data1.repeat(1)
57    data1 = data1.map(operations=[vision.Decode(True)], input_columns=["image"])
58    rescale_op = vision.Rescale(rescale, shift)
59
60    resize_op = vision.Resize((resize_height, resize_width), Inter.LINEAR)
61    data1 = data1.map(operations=[rescale_op, resize_op], input_columns=["image"])
62    data1_1 = ds.TFRecordDataset(["../data/dataset/testTFTestAllTypes/test.data"], num_samples=6).batch(2).repeat(10)
63    data1 = data1.zip(data1_1)
64
65    # Serialize the dataset pre-processing pipeline.
66    # data1 should still work after saving.
67    ds.serialize(data1, "imagenet_dataset_pipeline.json")
68    ds1_dict = ds.serialize(data1)
69    assert validate_jsonfile("imagenet_dataset_pipeline.json") is True
70
71    # Print the serialized pipeline to stdout
72    ds.show(data1)
73
74    # Deserialize the serialized json file
75    data2 = ds.deserialize(json_filepath="imagenet_dataset_pipeline.json")
76
77    # Serialize the pipeline we just deserialized.
78    # The content of the json file should be the same to the previous serialize.
79    ds.serialize(data2, "imagenet_dataset_pipeline_1.json")
80    assert validate_jsonfile("imagenet_dataset_pipeline_1.json") is True
81    assert filecmp.cmp('imagenet_dataset_pipeline.json', 'imagenet_dataset_pipeline_1.json')
82    assert data1.get_dataset_size() == data2.get_dataset_size()
83
84    # Deserialize the latest json file again
85    data3 = ds.deserialize(json_filepath="imagenet_dataset_pipeline_1.json")
86    data4 = ds.deserialize(input_dict=ds1_dict)
87    num_samples = 0
88    # Iterate and compare the data in the original pipeline (data1) against the deserialized pipeline (data2)
89    for item1, item2, item3, item4 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
90                                          data2.create_dict_iterator(num_epochs=1, output_numpy=True),
91                                          data3.create_dict_iterator(num_epochs=1, output_numpy=True),
92                                          data4.create_dict_iterator(num_epochs=1, output_numpy=True)):
93        np.testing.assert_array_equal(item1['image'], item2['image'])
94        np.testing.assert_array_equal(item1['image'], item3['image'])
95        np.testing.assert_array_equal(item1['label'], item2['label'])
96        np.testing.assert_array_equal(item1['label'], item3['label'])
97        np.testing.assert_array_equal(item3['image'], item4['image'])
98        np.testing.assert_array_equal(item3['label'], item4['label'])
99        num_samples += 1
100
101    logger.info("Number of data in data1: {}".format(num_samples))
102    assert num_samples == 11
103
104    # Remove the generated json file
105    if remove_json_files:
106        delete_json_files()
107
108
109def test_serdes_mnist_dataset(remove_json_files=True):
110    """
111    Test serdes on mnist dataset pipeline.
112    """
113    data_dir = "../data/dataset/testMnistData"
114    ds.config.set_seed(1)
115
116    data1 = ds.MnistDataset(data_dir, num_samples=100)
117    one_hot_encode = c.OneHot(10)  # num_classes is input argument
118    data1 = data1.map(operations=one_hot_encode, input_columns="label")
119
120    # batch_size is input argument
121    data1 = data1.batch(batch_size=10, drop_remainder=True)
122
123    ds.serialize(data1, "mnist_dataset_pipeline.json")
124    assert validate_jsonfile("mnist_dataset_pipeline.json") is True
125
126    data2 = ds.deserialize(json_filepath="mnist_dataset_pipeline.json")
127    ds.serialize(data2, "mnist_dataset_pipeline_1.json")
128    assert validate_jsonfile("mnist_dataset_pipeline_1.json") is True
129    assert filecmp.cmp('mnist_dataset_pipeline.json', 'mnist_dataset_pipeline_1.json')
130
131    data3 = ds.deserialize(json_filepath="mnist_dataset_pipeline_1.json")
132
133    num = 0
134    for data1, data2, data3 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
135                                   data2.create_dict_iterator(num_epochs=1, output_numpy=True),
136                                   data3.create_dict_iterator(num_epochs=1, output_numpy=True)):
137        np.testing.assert_array_equal(data1['image'], data2['image'])
138        np.testing.assert_array_equal(data1['image'], data3['image'])
139        np.testing.assert_array_equal(data1['label'], data2['label'])
140        np.testing.assert_array_equal(data1['label'], data3['label'])
141        num += 1
142
143    logger.info("mnist total num samples is {}".format(str(num)))
144    assert num == 10
145
146    if remove_json_files:
147        delete_json_files()
148
149
150def test_serdes_cifar10_dataset(remove_json_files=True):
151    """
152    Test serdes on Cifar10 dataset pipeline
153    """
154    data_dir = "../data/dataset/testCifar10Data"
155    original_seed = config_get_set_seed(1)
156    original_num_parallel_workers = config_get_set_num_parallel_workers(1)
157
158    data1 = ds.Cifar10Dataset(data_dir, num_samples=10, shuffle=False)
159    data1 = data1.take(6)
160
161    trans = [
162        vision.RandomCrop((32, 32), (4, 4, 4, 4)),
163        vision.Resize((224, 224)),
164        vision.Rescale(1.0 / 255.0, 0.0),
165        vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
166        vision.HWC2CHW()
167    ]
168
169    type_cast_op = c.TypeCast(mstype.int32)
170    data1 = data1.map(operations=type_cast_op, input_columns="label")
171    data1 = data1.map(operations=trans, input_columns="image")
172    data1 = data1.batch(3, drop_remainder=True)
173    data1 = data1.repeat(1)
174    # json files are needed for create iterator, remove_json_files = False
175    data2 = util_check_serialize_deserialize_file(data1, "cifar10_dataset_pipeline", False)
176    num_samples = 0
177    # Iterate and compare the data in the original pipeline (data1) against the deserialized pipeline (data2)
178    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
179                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
180        np.testing.assert_array_equal(item1['image'], item2['image'])
181        num_samples += 1
182
183    assert num_samples == 2
184
185    # Restore configuration num_parallel_workers
186    ds.config.set_seed(original_seed)
187    ds.config.set_num_parallel_workers(original_num_parallel_workers)
188    if remove_json_files:
189        delete_json_files()
190
191
192def test_serdes_celeba_dataset(remove_json_files=True):
193    """
194    Test serdes on Celeba dataset pipeline.
195    """
196    DATA_DIR = "../data/dataset/testCelebAData/"
197    data1 = ds.CelebADataset(DATA_DIR, decode=True, num_shards=1, shard_id=0)
198    # define map operations
199    data1 = data1.repeat(2)
200    center_crop = vision.CenterCrop((80, 80))
201    pad_op = vision.Pad(20, fill_value=(20, 20, 20))
202    data1 = data1.map(operations=[center_crop, pad_op], input_columns=["image"], num_parallel_workers=8)
203    # json files are needed for create iterator, remove_json_files = False
204    data2 = util_check_serialize_deserialize_file(data1, "celeba_dataset_pipeline", False)
205
206    num_samples = 0
207    # Iterate and compare the data in the original pipeline (data1) against the deserialized pipeline (data2)
208    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
209                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
210        np.testing.assert_array_equal(item1['image'], item2['image'])
211        num_samples += 1
212
213    assert num_samples == 8
214    if remove_json_files:
215        delete_json_files()
216
217
218def test_serdes_csv_dataset(remove_json_files=True):
219    """
220    Test serdes on Csvdataset pipeline.
221    """
222    DATA_DIR = "../data/dataset/testCSV/1.csv"
223    data1 = ds.CSVDataset(
224        DATA_DIR,
225        column_defaults=["1", "2", "3", "4"],
226        column_names=['col1', 'col2', 'col3', 'col4'],
227        shuffle=False)
228    columns = ["col1", "col4", "col2"]
229    data1 = data1.project(columns=columns)
230    # json files are needed for create iterator, remove_json_files = False
231    data2 = util_check_serialize_deserialize_file(data1, "csv_dataset_pipeline", False)
232
233    num_samples = 0
234    # Iterate and compare the data in the original pipeline (data1) against the deserialized pipeline (data2)
235    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
236                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
237        np.testing.assert_array_equal(item1['col1'], item2['col1'])
238        np.testing.assert_array_equal(item1['col2'], item2['col2'])
239        np.testing.assert_array_equal(item1['col4'], item2['col4'])
240        num_samples += 1
241
242    assert num_samples == 3
243    if remove_json_files:
244        delete_json_files()
245
246
247def test_serdes_voc_dataset(remove_json_files=True):
248    """
249    Test serdes on VOC dataset pipeline.
250    """
251    data_dir = "../data/dataset/testVOC2012"
252    original_seed = config_get_set_seed(1)
253    original_num_parallel_workers = config_get_set_num_parallel_workers(1)
254
255    # define map operations
256    random_color_adjust_op = vision.RandomColorAdjust(brightness=(0.5, 0.5))
257    random_rotation_op = vision.RandomRotation((0, 90), expand=True, resample=Inter.BILINEAR, center=(50, 50),
258                                               fill_value=150)
259
260    data1 = ds.VOCDataset(data_dir, task="Detection", usage="train", decode=True)
261    data1 = data1.map(operations=random_color_adjust_op, input_columns=["image"])
262    data1 = data1.map(operations=random_rotation_op, input_columns=["image"])
263    data1 = data1.skip(2)
264    # json files are needed for create iterator, remove_json_files = False
265    data2 = util_check_serialize_deserialize_file(data1, "voc_dataset_pipeline", False)
266
267    num_samples = 0
268    # Iterate and compare the data in the original pipeline (data1) against the deserialized pipeline (data2)
269    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
270                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
271        np.testing.assert_array_equal(item1['image'], item2['image'])
272        num_samples += 1
273
274    assert num_samples == 7
275
276    # Restore configuration num_parallel_workers
277    ds.config.set_seed(original_seed)
278    ds.config.set_num_parallel_workers(original_num_parallel_workers)
279    if remove_json_files:
280        delete_json_files()
281
282
283def test_serdes_zip_dataset(remove_json_files=True):
284    """
285    Test serdes on zip dataset pipeline.
286    """
287    files = ["../data/dataset/testTFTestAllTypes/test.data"]
288    schema_file = "../data/dataset/testTFTestAllTypes/datasetSchema.json"
289    ds.config.set_seed(1)
290
291    ds0 = ds.TFRecordDataset(files, schema=schema_file, shuffle=ds.Shuffle.GLOBAL)
292    data1 = ds.TFRecordDataset(files, schema=schema_file, shuffle=ds.Shuffle.GLOBAL)
293    data2 = ds.TFRecordDataset(files, schema=schema_file, shuffle=ds.Shuffle.FILES)
294    data2 = data2.shuffle(10000)
295    data2 = data2.rename(input_columns=["col_sint16", "col_sint32", "col_sint64", "col_float",
296                                        "col_1d", "col_2d", "col_3d", "col_binary"],
297                         output_columns=["column_sint16", "column_sint32", "column_sint64", "column_float",
298                                         "column_1d", "column_2d", "column_3d", "column_binary"])
299    data3 = ds.zip((data1, data2))
300    ds.serialize(data3, "zip_dataset_pipeline.json")
301    assert validate_jsonfile("zip_dataset_pipeline.json") is True
302    assert validate_jsonfile("zip_dataset_pipeline_typo.json") is False
303
304    data4 = ds.deserialize(json_filepath="zip_dataset_pipeline.json")
305    ds.serialize(data4, "zip_dataset_pipeline_1.json")
306    assert validate_jsonfile("zip_dataset_pipeline_1.json") is True
307    assert filecmp.cmp('zip_dataset_pipeline.json', 'zip_dataset_pipeline_1.json')
308
309    rows = 0
310    for d0, d3, d4 in zip(ds0.create_tuple_iterator(output_numpy=True), data3.create_tuple_iterator(output_numpy=True),
311                          data4.create_tuple_iterator(output_numpy=True)):
312        num_cols = len(d0)
313        offset = 0
314        for t1 in d0:
315            np.testing.assert_array_equal(t1, d3[offset])
316            np.testing.assert_array_equal(t1, d3[offset + num_cols])
317            np.testing.assert_array_equal(t1, d4[offset])
318            np.testing.assert_array_equal(t1, d4[offset + num_cols])
319            offset += 1
320        rows += 1
321    assert rows == 12
322
323    if remove_json_files:
324        delete_json_files()
325
326
327def test_serdes_random_crop():
328    """
329    Test serdes on RandomCrop pipeline.
330    """
331    logger.info("test_random_crop")
332    DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
333    SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
334    original_seed = config_get_set_seed(1)
335    original_num_parallel_workers = config_get_set_num_parallel_workers(1)
336
337    # First dataset
338    data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
339    decode_op = vision.Decode()
340    random_crop_op = vision.RandomCrop([512, 512], [200, 200, 200, 200])
341    data1 = data1.map(operations=decode_op, input_columns="image")
342    data1 = data1.map(operations=random_crop_op, input_columns="image")
343
344    # Serializing into python dictionary
345    ds1_dict = ds.serialize(data1)
346    # Serializing into json object
347    _ = json.dumps(ds1_dict, indent=2)
348
349    # Reconstruct dataset pipeline from its serialized form
350    data1_1 = ds.deserialize(input_dict=ds1_dict)
351
352    # Second dataset
353    data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
354    data2 = data2.map(operations=decode_op, input_columns="image")
355
356    for item1, item1_1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
357                                     data1_1.create_dict_iterator(num_epochs=1, output_numpy=True),
358                                     data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
359        np.testing.assert_array_equal(item1['image'], item1_1['image'])
360        _ = item2["image"]
361
362    # Restore configuration num_parallel_workers
363    ds.config.set_seed(original_seed)
364    ds.config.set_num_parallel_workers(original_num_parallel_workers)
365
366
367def test_serdes_to_device(remove_json_files=True):
368    """
369    Test serdes on transfer dataset pipeline.
370    """
371    data_dir = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
372    schema_file = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
373    data1 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False)
374    data1 = data1.to_device()
375    util_check_serialize_deserialize_file(data1, "transfer_dataset_pipeline", remove_json_files)
376
377
378def test_serdes_pyvision(remove_json_files=True):
379    """
380    Test serdes on py_transform pipeline.
381    """
382    data_dir = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
383    schema_file = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
384    data1 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False)
385    transforms1 = [
386        py_vision.Decode(),
387        py_vision.CenterCrop([32, 32]),
388        py_vision.ToTensor()
389    ]
390    transforms2 = [
391        py_vision.RandomColorAdjust(),
392        py_vision.FiveCrop(1),
393        py_vision.Grayscale(),
394        py.OneHotOp(1)
395    ]
396    data1 = data1.map(operations=py.Compose(transforms1), input_columns=["image"])
397    data1 = data1.map(operations=py.RandomApply(transforms2), input_columns=["image"])
398    util_check_serialize_deserialize_file(data1, "pyvision_dataset_pipeline", remove_json_files)
399    data2 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False)
400    data2 = data2.map(operations=(lambda x, y, z: (
401        np.array(x).flatten().reshape(10, 39),
402        np.array(y).flatten().reshape(10, 39),
403        np.array(z).flatten().reshape(10, 1)
404    )))
405    ds.serialize(data2, "pyvision_dataset_pipeline.json")
406    assert validate_jsonfile("pyvision_dataset_pipeline.json") is True
407
408    if remove_json_files:
409        delete_json_files()
410
411
412def test_serdes_uniform_augment(remove_json_files=True):
413    """
414    Test serdes on uniform augment.
415    """
416    data_dir = "../data/dataset/testPK/data"
417    data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
418    ds.config.set_seed(1)
419
420    transforms_ua = [vision.RandomHorizontalFlip(),
421                     vision.RandomVerticalFlip(),
422                     vision.RandomColor(),
423                     vision.RandomSharpness(),
424                     vision.Invert(),
425                     vision.AutoContrast(),
426                     vision.Equalize()]
427    transforms_all = [vision.Decode(), vision.Resize(size=[224, 224]),
428                      vision.UniformAugment(transforms=transforms_ua, num_ops=5)]
429    data = data.map(operations=transforms_all, input_columns="image", num_parallel_workers=1)
430    util_check_serialize_deserialize_file(data, "uniform_augment_pipeline", remove_json_files)
431
432
433def skip_test_serdes_fill(remove_json_files=True):
434    """
435    Test serdes on Fill data transform.
436    """
437    def gen():
438        yield (np.array([4, 5, 6, 7], dtype=np.int32),)
439
440    data = ds.GeneratorDataset(gen, column_names=["col"])
441    fill_op = c.Fill(3)
442
443    data = data.map(operations=fill_op, input_columns=["col"])
444    expected = np.array([3, 3, 3, 3], dtype=np.int32)
445    for data_row in data:
446        np.testing.assert_array_equal(data_row[0].asnumpy(), expected)
447
448    util_check_serialize_deserialize_file(data, "fill_pipeline", remove_json_files)
449
450
451def test_serdes_exception():
452    """
453    Test exception case in serdes
454    """
455    data_dir = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
456    schema_file = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
457    data1 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False)
458    data1 = data1.filter(input_columns=["image", "label"], predicate=lambda data: data < 11, num_parallel_workers=4)
459    data1_json = ds.serialize(data1)
460    with pytest.raises(RuntimeError) as msg:
461        data2 = ds.deserialize(input_dict=data1_json)
462        ds.serialize(data2, "filter_dataset_fail.json")
463    assert "Invalid data, unsupported operation type: Filter" in str(msg)
464    delete_json_files()
465
466
467def util_check_serialize_deserialize_file(data_orig, filename, remove_json_files):
468    """
469    Utility function for testing serdes files. It is to check if a json file is indeed created with correct name
470    after serializing and if it remains the same after repeatedly saving and loading.
471    :param data_orig: original data pipeline to be serialized
472    :param filename: filename to be saved as json format
473    :param remove_json_files: whether to remove the json file after testing
474    :return: The data pipeline after serializing and deserializing using the original pipeline
475    """
476    file1 = filename + ".json"
477    file2 = filename + "_1.json"
478    ds.serialize(data_orig, file1)
479    assert validate_jsonfile(file1) is True
480    assert validate_jsonfile("wrong_name.json") is False
481
482    data_changed = ds.deserialize(json_filepath=file1)
483    ds.serialize(data_changed, file2)
484    assert validate_jsonfile(file2) is True
485    assert filecmp.cmp(file1, file2, shallow=False)
486
487    # Remove the generated json file
488    if remove_json_files:
489        delete_json_files()
490    return data_changed
491
492
493def validate_jsonfile(filepath):
494    try:
495        file_exist = os.path.exists(filepath)
496        with open(filepath, 'r') as jfile:
497            loaded_json = json.load(jfile)
498    except IOError:
499        return False
500    return file_exist and isinstance(loaded_json, dict)
501
502
503def delete_json_files():
504    file_list = glob.glob('*.json')
505    for f in file_list:
506        try:
507            os.remove(f)
508        except IOError:
509            logger.info("Error while deleting: {}".format(f))
510
511
512# Test save load minddataset
513def skip_test_minddataset(add_and_remove_cv_file=True):
514    """tutorial for cv minderdataset."""
515    columns_list = ["data", "file_name", "label"]
516    num_readers = 4
517    indices = [1, 2, 3, 5, 7]
518    sampler = ds.SubsetRandomSampler(indices)
519    data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
520                              sampler=sampler)
521
522    # Serializing into python dictionary
523    ds1_dict = ds.serialize(data_set)
524    # Serializing into json object
525    ds1_json = json.dumps(ds1_dict, sort_keys=True)
526
527    # Reconstruct dataset pipeline from its serialized form
528    data_set = ds.deserialize(input_dict=ds1_dict)
529    ds2_dict = ds.serialize(data_set)
530    # Serializing into json object
531    ds2_json = json.dumps(ds2_dict, sort_keys=True)
532
533    assert ds1_json == ds2_json
534
535    _ = get_data(CV_DIR_NAME)
536    assert data_set.get_dataset_size() == 5
537    num_iter = 0
538    for _ in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
539        num_iter += 1
540    assert num_iter == 5
541
542
543if __name__ == '__main__':
544    test_serdes_imagefolder_dataset()
545    test_serdes_mnist_dataset()
546    test_serdes_cifar10_dataset()
547    test_serdes_celeba_dataset()
548    test_serdes_csv_dataset()
549    test_serdes_voc_dataset()
550    test_serdes_zip_dataset()
551    test_serdes_random_crop()
552    test_serdes_to_device()
553    test_serdes_pyvision()
554    test_serdes_uniform_augment()
555    skip_test_serdes_fill()
556    test_serdes_exception()
557    skip_test_minddataset()
558