• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15import os
16
17import json
18import matplotlib.pyplot as plt
19import numpy as np
20import pytest
21
22import mindspore.dataset as ds
23import mindspore.dataset.vision.c_transforms as c_vision
24
25
26DATASET_DIR = "../data/dataset/testCityscapesData/cityscapes"
27DATASET_DIR_TASK_JSON = "../data/dataset/testCityscapesData/cityscapes/testTaskJson"
28
29
30def test_cityscapes_basic(plot=False):
31    """
32    Validate CityscapesDataset basic read.
33    """
34    task = "color"         # instance semantic polygon color
35    quality_mode = "fine"  # fine coarse
36    usage = "train"        # quality_mode=fine 'train', 'test', 'val', 'all' else 'train', 'train_extra', 'val', 'all'
37    data = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task,
38                                decode=True, shuffle=False)
39    count = 0
40    images_list = []
41    task_list = []
42    for item in data.create_dict_iterator(num_epochs=1, output_numpy=True):
43        images_list.append(item['image'])
44        task_list.append(item['task'])
45        count = count + 1
46    assert count == 5
47    if plot:
48        visualize_dataset(images_list, task_list, task)
49
50
51def visualize_dataset(images, labels, task):
52    """
53    Helper function to visualize the dataset samples.
54    """
55    if task == "polygon":
56        return
57    image_num = len(images)
58    for i in range(image_num):
59        plt.subplot(121)
60        plt.imshow(images[i])
61        plt.title('Original')
62        plt.subplot(122)
63        plt.imshow(labels[i])
64        plt.title(task)
65        plt.savefig('./cityscapes_{}_{}.jpg'.format(task, str(i)))
66
67
68def test_cityscapes_polygon():
69    """
70    Validate CityscapesDataset with task of polygon.
71    """
72    usage = "train"
73    quality_mode = "fine"
74    task = "polygon"
75    data = ds.CityscapesDataset(DATASET_DIR_TASK_JSON, usage=usage, quality_mode=quality_mode, task=task)
76    count = 0
77    json_file = os.path.join(DATASET_DIR_TASK_JSON, "gtFine/train/aa/aa_000000_gtFine_polygons.json")
78    with open(json_file, "r") as f:
79        expected = json.load(f)
80    for item in data.create_dict_iterator(num_epochs=1, output_numpy=True):
81        task_dict = json.loads(str(item['task'], encoding="utf-8"))
82        assert task_dict == expected
83        count = count + 1
84    assert count == 1
85
86
87def test_cityscapes_basic_func():
88    """
89    Validate CityscapesDataset with repeat, batch and getter operation.
90    """
91    # case 1: test num_samples
92    usage = "train"
93    quality_mode = "fine"
94    task = "color"
95    data1 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_samples=4)
96    num_iter1 = 0
97    for _ in data1.create_dict_iterator(num_epochs=1):
98        num_iter1 += 1
99    assert num_iter1 == 4
100
101    # case 2: test repeat
102    data2 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_samples=5)
103    data2 = data2.repeat(5)
104    num_iter2 = 0
105    for _ in data2.create_dict_iterator(num_epochs=1):
106        num_iter2 += 1
107    assert num_iter2 == 25
108
109    # case 3: test batch with drop_remainder=False
110    data3 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, decode=True)
111    resize_op = c_vision.Resize((100, 100))
112    data3 = data3.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
113    data3 = data3.map(operations=resize_op, input_columns=["task"], num_parallel_workers=1)
114    assert data3.get_dataset_size() == 5
115    assert data3.get_batch_size() == 1
116    data3 = data3.batch(batch_size=3)  # drop_remainder is default to be False
117    assert data3.get_dataset_size() == 2
118    assert data3.get_batch_size() == 3
119    num_iter3 = 0
120    for _ in data3.create_dict_iterator(num_epochs=1):
121        num_iter3 += 1
122    assert num_iter3 == 2
123
124    # case 4: test batch with drop_remainder=True
125    data4 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, decode=True)
126    resize_op = c_vision.Resize((100, 100))
127    data4 = data4.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
128    data4 = data4.map(operations=resize_op, input_columns=["task"], num_parallel_workers=1)
129    assert data4.get_dataset_size() == 5
130    assert data4.get_batch_size() == 1
131    data4 = data4.batch(batch_size=3, drop_remainder=True)  # the rest of incomplete batch will be dropped
132    assert data4.get_dataset_size() == 1
133    assert data4.get_batch_size() == 3
134    num_iter4 = 0
135    for _ in data4.create_dict_iterator(num_epochs=1):
136        num_iter4 += 1
137    assert num_iter4 == 1
138
139    # case 5: test get_col_names
140    data5 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, decode=True)
141    assert data5.get_col_names() == ["image", "task"]
142
143
144def test_cityscapes_sequential_sampler():
145    """
146    Test CityscapesDataset with SequentialSampler.
147    """
148    task = "color"
149    quality_mode = "fine"
150    usage = "train"
151
152    num_samples = 5
153    sampler = ds.SequentialSampler(num_samples=num_samples)
154    data1 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, sampler=sampler)
155    data2 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task,
156                                 shuffle=False, num_samples=num_samples)
157    num_iter = 0
158    for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
159                            data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
160        np.testing.assert_array_equal(item1["task"], item2["task"])
161        num_iter += 1
162    assert num_iter == num_samples
163
164
165def test_cityscapes_exception():
166    """
167    Validate CityscapesDataset with error parameters.
168    """
169    task = "color"
170    quality_mode = "fine"
171    usage = "train"
172
173    error_msg_1 = "does not exist or is not a directory or permission denied!"
174    with pytest.raises(ValueError, match=error_msg_1):
175        ds.CityscapesDataset("NoExistsDir", usage=usage, quality_mode=quality_mode, task=task)
176
177    error_msg_2 = "sampler and shuffle cannot be specified at the same time"
178    with pytest.raises(RuntimeError, match=error_msg_2):
179        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, shuffle=False,
180                             sampler=ds.PKSampler(3))
181
182    error_msg_3 = "sampler and sharding cannot be specified at the same time"
183    with pytest.raises(RuntimeError, match=error_msg_3):
184        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_shards=2,
185                             shard_id=0, sampler=ds.PKSampler(3))
186
187    error_msg_4 = "num_shards is specified and currently requires shard_id as well"
188    with pytest.raises(RuntimeError, match=error_msg_4):
189        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_shards=10)
190
191    error_msg_5 = "shard_id is specified but num_shards is not"
192    with pytest.raises(RuntimeError, match=error_msg_5):
193        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, shard_id=0)
194
195    error_msg_6 = "Input shard_id is not within the required interval"
196    with pytest.raises(ValueError, match=error_msg_6):
197        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_shards=5, shard_id=-1)
198    with pytest.raises(ValueError, match=error_msg_6):
199        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_shards=5, shard_id=5)
200    with pytest.raises(ValueError, match=error_msg_6):
201        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_shards=2, shard_id=5)
202
203    error_msg_7 = "num_parallel_workers exceeds"
204    with pytest.raises(ValueError, match=error_msg_7):
205        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, shuffle=False,
206                             num_parallel_workers=0)
207    with pytest.raises(ValueError, match=error_msg_7):
208        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, shuffle=False,
209                             num_parallel_workers=256)
210    with pytest.raises(ValueError, match=error_msg_7):
211        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, shuffle=False,
212                             num_parallel_workers=-2)
213
214    error_msg_8 = "Argument shard_id"
215    with pytest.raises(TypeError, match=error_msg_8):
216        ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, num_shards=2, shard_id="0")
217
218    def exception_func(item):
219        raise Exception("Error occur!")
220
221    try:
222        data = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task)
223        data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
224        num_rows = 0
225        for _ in data.create_dict_iterator():
226            num_rows += 1
227        assert False
228    except RuntimeError as e:
229        assert "map operation: [PyFunc] failed. The corresponding data files:" in str(e)
230
231    try:
232        data = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task)
233        data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
234        num_rows = 0
235        for _ in data.create_dict_iterator():
236            num_rows += 1
237        assert False
238    except RuntimeError as e:
239        assert "map operation: [PyFunc] failed. The corresponding data files:" in str(e)
240
241
242def test_cityscapes_param():
243    """
244    Validate CityscapesDataset with basic parameters like usage, quality_mode and task.
245    """
246    def test_config(usage="train", quality_mode="fine", task="color"):
247        try:
248            data = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task)
249            num_rows = 0
250            for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
251                num_rows += 1
252        except (ValueError, TypeError, RuntimeError) as e:
253            return str(e)
254        return num_rows
255
256    assert test_config(usage="train") == 5
257    assert test_config(usage="test") == 1
258    assert test_config(usage="val") == 1
259    assert test_config(usage="all") == 7
260    assert "usage is not within the valid set of ['train', 'test', 'val', 'all']" \
261           in test_config("invalid", "fine", "instance")
262    assert "Argument usage with value ['list'] is not of type [<class 'str'>]" \
263           in test_config(["list"], "fine", "instance")
264    assert "quality_mode is not within the valid set of ['fine', 'coarse']" \
265           in test_config("train", "invalid", "instance")
266    assert "Argument quality_mode with value ['list'] is not of type [<class 'str'>]" \
267           in test_config("train", ["list"], "instance")
268    assert "task is not within the valid set of ['instance', 'semantic', 'polygon', 'color']." \
269           in test_config("train", "fine", "invalid")
270    assert "Argument task with value ['list'] is not of type [<class 'str'>], but got <class 'list'>." \
271           in test_config("train", "fine", ["list"])
272
273
274if __name__ == "__main__":
275    test_cityscapes_basic()
276    test_cityscapes_polygon()
277    test_cityscapes_basic_func()
278    test_cityscapes_sequential_sampler()
279    test_cityscapes_exception()
280    test_cityscapes_param()
281