1# Copyright 2024 The Pigweed Authors 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); you may not 4# use this file except in compliance with the License. You may obtain a copy of 5# the License at 6# 7# https://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12# License for the specific language governing permissions and limitations under 13# the License. 14"""Unit tests for the sensor metadata validator""" 15 16from pathlib import Path 17import unittest 18import tempfile 19import yaml 20from pw_sensor.validator import Validator 21 22 23class ValidatorTest(unittest.TestCase): 24 """Tests the Validator class.""" 25 26 maxDiff = None 27 28 def test_missing_compatible(self) -> None: 29 """Check that missing 'compatible' key throws exception""" 30 self._check_with_exception( 31 metadata={}, 32 exception_string="ERROR: Malformed sensor metadata YAML:\n{}", 33 cause_substrings=["'compatible' is a required property"], 34 ) 35 36 def test_invalid_compatible_type(self) -> None: 37 """Check that incorrect type of 'compatible' throws exception""" 38 self._check_with_exception( 39 metadata={"compatible": {}}, 40 exception_string=( 41 "ERROR: Malformed sensor metadata YAML:\ncompatible: {}" 42 ), 43 cause_substrings=[ 44 "'org' is a required property", 45 ], 46 ) 47 48 self._check_with_exception( 49 metadata={"compatible": []}, 50 exception_string=( 51 "ERROR: Malformed sensor metadata YAML:\ncompatible: []" 52 ), 53 cause_substrings=["[] is not of type 'object'"], 54 ) 55 56 self._check_with_exception( 57 metadata={"compatible": 1}, 58 exception_string=( 59 "ERROR: Malformed sensor metadata YAML:\ncompatible: 1" 60 ), 61 cause_substrings=["1 is not of type 'object'"], 62 ) 63 64 self._check_with_exception( 65 metadata={"compatible": ""}, 66 exception_string=( 67 "ERROR: Malformed sensor metadata YAML:\ncompatible: ''" 68 ), 69 cause_substrings=[" is not of type 'object'"], 70 ) 71 72 def test_empty_dependency_list(self) -> None: 73 """ 74 Check that an empty or missing 'deps' resolves to one with an empty 75 'deps' list 76 """ 77 expected = { 78 "sensors": { 79 "google,foo": { 80 "compatible": {"org": "google", "part": "foo"}, 81 "channels": {}, 82 "attributes": {}, 83 "triggers": {}, 84 }, 85 }, 86 "channels": {}, 87 "attributes": {}, 88 "triggers": {}, 89 } 90 metadata = { 91 "compatible": {"org": "google", "part": "foo"}, 92 "deps": [], 93 } 94 result = Validator().validate(metadata=metadata) 95 self.assertEqual(result, expected) 96 97 metadata = {"compatible": {"org": "google", "part": "foo"}} 98 result = Validator().validate(metadata=metadata) 99 self.assertEqual(result, expected) 100 101 def test_invalid_dependency_file(self) -> None: 102 """ 103 Check that if an invalid dependency file is listed, we throw an error. 104 We know this will not be a valid file, because we have no files in the 105 include path so we have nowhere to look for the file. 106 """ 107 self._check_with_exception( 108 metadata={ 109 "compatible": {"org": "google", "part": "foo"}, 110 "deps": ["test.yaml"], 111 }, 112 exception_string="Failed to find test.yaml using search paths:", 113 cause_substrings=[], 114 exception_type=FileNotFoundError, 115 ) 116 117 def test_invalid_channel_name_raises_exception(self) -> None: 118 """ 119 Check that if given a channel name that's not defined, we raise an Error 120 """ 121 self._check_with_exception( 122 metadata={ 123 "compatible": {"org": "google", "part": "foo"}, 124 "channels": {"bar": {}}, 125 }, 126 exception_string="Failed to find a definition for 'bar', did" 127 " you forget a dependency?", 128 cause_substrings=[], 129 ) 130 131 def test_channel_info_from_deps(self) -> None: 132 """ 133 End to end test resolving a dependency file and setting the right 134 default attribute values. 135 """ 136 with tempfile.NamedTemporaryFile( 137 mode="w", suffix=".yaml", encoding="utf-8", delete=False 138 ) as dep: 139 dep_filename = Path(dep.name) 140 dep.write( 141 yaml.safe_dump( 142 { 143 "attributes": { 144 "sample_rate": { 145 "units": {"symbol": "Hz"}, 146 }, 147 }, 148 "channels": { 149 "bar": { 150 "units": {"symbol": "sandwiches"}, 151 }, 152 "soap": { 153 "name": "The soap", 154 "description": ( 155 "Measurement of how clean something is" 156 ), 157 "units": {"symbol": "sqeaks"}, 158 }, 159 "laundry": { 160 "description": "Clean clothes count", 161 "units": {"symbol": "items"}, 162 "sub-channels": { 163 "shirts": { 164 "description": "Clean shirt count", 165 }, 166 "pants": { 167 "description": "Clean pants count", 168 }, 169 }, 170 }, 171 }, 172 "triggers": { 173 "data_ready": { 174 "description": "notify when new data is ready", 175 }, 176 }, 177 }, 178 ) 179 ) 180 181 metadata = Validator(include_paths=[dep_filename.parent]).validate( 182 metadata={ 183 "compatible": {"org": "google", "part": "foo"}, 184 "deps": [dep_filename.name], 185 "attributes": { 186 "sample_rate": {}, 187 }, 188 "channels": { 189 "bar": {}, 190 "soap": { 191 "name": "soap name override", 192 }, 193 "laundry_shirts": {}, 194 "laundry_pants": {}, 195 "laundry": { 196 "indicies": [ 197 {"name": "kids' laundry"}, 198 {"name": "adults' laundry"}, 199 ] 200 }, 201 }, 202 "triggers": { 203 "data_ready": {}, 204 }, 205 }, 206 ) 207 expected_trigger_data_ready = { 208 "name": "data_ready", 209 "description": "notify when new data is ready", 210 } 211 expected_attribute_sample_rate = { 212 "name": "sample_rate", 213 "description": "", 214 "units": {"name": "Hz", "symbol": "Hz"}, 215 } 216 expected_channel_bar = { 217 "name": "bar", 218 "description": "", 219 "units": { 220 "name": "sandwiches", 221 "symbol": "sandwiches", 222 }, 223 } 224 expected_channel_soap = { 225 "name": "The soap", 226 "description": "Measurement of how clean something is", 227 "units": { 228 "name": "sqeaks", 229 "symbol": "sqeaks", 230 }, 231 } 232 expected_channel_laundry_shirts = { 233 "name": "laundry_shirts", 234 "description": "Clean shirt count", 235 "units": { 236 "name": "items", 237 "symbol": "items", 238 }, 239 } 240 expected_channel_laundry_pants = { 241 "name": "laundry_pants", 242 "description": "Clean pants count", 243 "units": { 244 "name": "items", 245 "symbol": "items", 246 }, 247 } 248 expected_channel_laundry = { 249 "name": "laundry", 250 "description": "Clean clothes count", 251 "units": { 252 "name": "items", 253 "symbol": "items", 254 }, 255 } 256 expected_sensor_channel_bar = { 257 "name": "bar", 258 "description": "", 259 "units": { 260 "name": "sandwiches", 261 "symbol": "sandwiches", 262 }, 263 "indicies": [ 264 { 265 "name": "bar", 266 "description": "", 267 }, 268 ], 269 } 270 expected_sensor_channel_soap = { 271 "name": "soap name override", 272 "description": "Measurement of how clean something is", 273 "units": { 274 "name": "sqeaks", 275 "symbol": "sqeaks", 276 }, 277 "indicies": [ 278 { 279 "name": "soap name override", 280 "description": "Measurement of how clean something is", 281 }, 282 ], 283 } 284 expected_sensor_channel_laundry_shirts = { 285 "name": "laundry_shirts", 286 "description": "Clean shirt count", 287 "units": { 288 "name": "items", 289 "symbol": "items", 290 }, 291 "indicies": [ 292 { 293 "name": "laundry_shirts", 294 "description": "Clean shirt count", 295 }, 296 ], 297 } 298 expected_sensor_channel_laundry_pants = { 299 "name": "laundry_pants", 300 "description": "Clean pants count", 301 "units": { 302 "name": "items", 303 "symbol": "items", 304 }, 305 "indicies": [ 306 { 307 "name": "laundry_pants", 308 "description": "Clean pants count", 309 }, 310 ], 311 } 312 expected_sensor_channel_laundry = { 313 "name": "laundry", 314 "description": "Clean clothes count", 315 "units": { 316 "name": "items", 317 "symbol": "items", 318 }, 319 "indicies": [ 320 { 321 "name": "kids' laundry", 322 "description": "Clean clothes count", 323 }, 324 { 325 "name": "adults' laundry", 326 "description": "Clean clothes count", 327 }, 328 ], 329 } 330 self.assertEqual( 331 metadata, 332 { 333 "attributes": {"sample_rate": expected_attribute_sample_rate}, 334 "channels": { 335 "bar": expected_channel_bar, 336 "soap": expected_channel_soap, 337 "laundry_shirts": expected_channel_laundry_shirts, 338 "laundry_pants": expected_channel_laundry_pants, 339 "laundry": expected_channel_laundry, 340 }, 341 "triggers": { 342 "data_ready": expected_trigger_data_ready, 343 }, 344 "sensors": { 345 "google,foo": { 346 "compatible": { 347 "org": "google", 348 "part": "foo", 349 }, 350 "attributes": { 351 "sample_rate": expected_attribute_sample_rate, 352 }, 353 "triggers": { 354 "data_ready": expected_trigger_data_ready, 355 }, 356 "channels": { 357 "bar": expected_sensor_channel_bar, 358 "soap": expected_sensor_channel_soap, 359 "laundry_shirts": ( 360 expected_sensor_channel_laundry_shirts 361 ), 362 "laundry_pants": ( 363 expected_sensor_channel_laundry_pants 364 ), 365 "laundry": expected_sensor_channel_laundry, 366 }, 367 }, 368 }, 369 }, 370 ) 371 372 def _check_with_exception( 373 self, 374 metadata: dict, 375 exception_string: str, 376 cause_substrings: list[str], 377 exception_type: type[BaseException] = RuntimeError, 378 ) -> None: 379 with self.assertRaises(exception_type) as context: 380 Validator().validate(metadata=metadata) 381 382 self.assertEqual(str(context.exception).rstrip(), exception_string) 383 for cause_substring in cause_substrings: 384 self.assertTrue( 385 cause_substring in str(context.exception.__cause__), 386 f"Actual cause: {str(context.exception.__cause__)}", 387 ) 388 389 390if __name__ == "__main__": 391 unittest.main() 392