• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# This file is dual licensed under the terms of the Apache License, Version
2# 2.0, and the BSD License. See the LICENSE file in the root of this repository
3# for complete details.
4
5from __future__ import absolute_import, division, print_function
6
7import binascii
8import collections
9import json
10import os
11import re
12from contextlib import contextmanager
13
14import pytest
15
16import six
17
18from cryptography.exceptions import UnsupportedAlgorithm
19
20import cryptography_vectors
21
22
23HashVector = collections.namedtuple("HashVector", ["message", "digest"])
24KeyedHashVector = collections.namedtuple(
25    "KeyedHashVector", ["message", "digest", "key"]
26)
27
28
29def check_backend_support(backend, item):
30    for mark in item.node.iter_markers("supported"):
31        if not mark.kwargs["only_if"](backend):
32            pytest.skip("{} ({})".format(mark.kwargs["skip_message"], backend))
33
34
35@contextmanager
36def raises_unsupported_algorithm(reason):
37    with pytest.raises(UnsupportedAlgorithm) as exc_info:
38        yield exc_info
39
40    assert exc_info.value._reason is reason
41
42
43def load_vectors_from_file(filename, loader, mode="r"):
44    with cryptography_vectors.open_vector_file(filename, mode) as vector_file:
45        return loader(vector_file)
46
47
48def load_nist_vectors(vector_data):
49    test_data = None
50    data = []
51
52    for line in vector_data:
53        line = line.strip()
54
55        # Blank lines, comments, and section headers are ignored
56        if (
57            not line
58            or line.startswith("#")
59            or (line.startswith("[") and line.endswith("]"))
60        ):
61            continue
62
63        if line.strip() == "FAIL":
64            test_data["fail"] = True
65            continue
66
67        # Build our data using a simple Key = Value format
68        name, value = [c.strip() for c in line.split("=")]
69
70        # Some tests (PBKDF2) contain \0, which should be interpreted as a
71        # null character rather than literal.
72        value = value.replace("\\0", "\0")
73
74        # COUNT is a special token that indicates a new block of data
75        if name.upper() == "COUNT":
76            test_data = {}
77            data.append(test_data)
78            continue
79        # For all other tokens we simply want the name, value stored in
80        # the dictionary
81        else:
82            test_data[name.lower()] = value.encode("ascii")
83
84    return data
85
86
87def load_cryptrec_vectors(vector_data):
88    cryptrec_list = []
89
90    for line in vector_data:
91        line = line.strip()
92
93        # Blank lines and comments are ignored
94        if not line or line.startswith("#"):
95            continue
96
97        if line.startswith("K"):
98            key = line.split(" : ")[1].replace(" ", "").encode("ascii")
99        elif line.startswith("P"):
100            pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
101        elif line.startswith("C"):
102            ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
103            # after a C is found the K+P+C tuple is complete
104            # there are many P+C pairs for each K
105            cryptrec_list.append(
106                {"key": key, "plaintext": pt, "ciphertext": ct}
107            )
108        else:
109            raise ValueError("Invalid line in file '{}'".format(line))
110    return cryptrec_list
111
112
113def load_hash_vectors(vector_data):
114    vectors = []
115    key = None
116    msg = None
117    md = None
118
119    for line in vector_data:
120        line = line.strip()
121
122        if not line or line.startswith("#") or line.startswith("["):
123            continue
124
125        if line.startswith("Len"):
126            length = int(line.split(" = ")[1])
127        elif line.startswith("Key"):
128            # HMAC vectors contain a key attribute. Hash vectors do not.
129            key = line.split(" = ")[1].encode("ascii")
130        elif line.startswith("Msg"):
131            # In the NIST vectors they have chosen to represent an empty
132            # string as hex 00, which is of course not actually an empty
133            # string. So we parse the provided length and catch this edge case.
134            msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
135        elif line.startswith("MD") or line.startswith("Output"):
136            md = line.split(" = ")[1]
137            # after MD is found the Msg+MD (+ potential key) tuple is complete
138            if key is not None:
139                vectors.append(KeyedHashVector(msg, md, key))
140                key = None
141                msg = None
142                md = None
143            else:
144                vectors.append(HashVector(msg, md))
145                msg = None
146                md = None
147        else:
148            raise ValueError("Unknown line in hash vector")
149    return vectors
150
151
152def load_pkcs1_vectors(vector_data):
153    """
154    Loads data out of RSA PKCS #1 vector files.
155    """
156    private_key_vector = None
157    public_key_vector = None
158    attr = None
159    key = None
160    example_vector = None
161    examples = []
162    vectors = []
163    for line in vector_data:
164        if (
165            line.startswith("# PSS Example")
166            or line.startswith("# OAEP Example")
167            or line.startswith("# PKCS#1 v1.5")
168        ):
169            if example_vector:
170                for key, value in six.iteritems(example_vector):
171                    hex_str = "".join(value).replace(" ", "").encode("ascii")
172                    example_vector[key] = hex_str
173                examples.append(example_vector)
174
175            attr = None
176            example_vector = collections.defaultdict(list)
177
178        if line.startswith("# Message"):
179            attr = "message"
180            continue
181        elif line.startswith("# Salt"):
182            attr = "salt"
183            continue
184        elif line.startswith("# Seed"):
185            attr = "seed"
186            continue
187        elif line.startswith("# Signature"):
188            attr = "signature"
189            continue
190        elif line.startswith("# Encryption"):
191            attr = "encryption"
192            continue
193        elif example_vector and line.startswith(
194            "# ============================================="
195        ):
196            for key, value in six.iteritems(example_vector):
197                hex_str = "".join(value).replace(" ", "").encode("ascii")
198                example_vector[key] = hex_str
199            examples.append(example_vector)
200            example_vector = None
201            attr = None
202        elif example_vector and line.startswith("#"):
203            continue
204        else:
205            if attr is not None and example_vector is not None:
206                example_vector[attr].append(line.strip())
207                continue
208
209        if line.startswith("# Example") or line.startswith(
210            "# ============================================="
211        ):
212            if key:
213                assert private_key_vector
214                assert public_key_vector
215
216                for key, value in six.iteritems(public_key_vector):
217                    hex_str = "".join(value).replace(" ", "")
218                    public_key_vector[key] = int(hex_str, 16)
219
220                for key, value in six.iteritems(private_key_vector):
221                    hex_str = "".join(value).replace(" ", "")
222                    private_key_vector[key] = int(hex_str, 16)
223
224                private_key_vector["examples"] = examples
225                examples = []
226
227                assert (
228                    private_key_vector["public_exponent"]
229                    == public_key_vector["public_exponent"]
230                )
231
232                assert (
233                    private_key_vector["modulus"]
234                    == public_key_vector["modulus"]
235                )
236
237                vectors.append((private_key_vector, public_key_vector))
238
239            public_key_vector = collections.defaultdict(list)
240            private_key_vector = collections.defaultdict(list)
241            key = None
242            attr = None
243
244        if private_key_vector is None or public_key_vector is None:
245            # Random garbage to defeat CPython's peephole optimizer so that
246            # coverage records correctly: https://bugs.python.org/issue2506
247            1 + 1
248            continue
249
250        if line.startswith("# Private key"):
251            key = private_key_vector
252        elif line.startswith("# Public key"):
253            key = public_key_vector
254        elif line.startswith("# Modulus:"):
255            attr = "modulus"
256        elif line.startswith("# Public exponent:"):
257            attr = "public_exponent"
258        elif line.startswith("# Exponent:"):
259            if key is public_key_vector:
260                attr = "public_exponent"
261            else:
262                assert key is private_key_vector
263                attr = "private_exponent"
264        elif line.startswith("# Prime 1:"):
265            attr = "p"
266        elif line.startswith("# Prime 2:"):
267            attr = "q"
268        elif line.startswith("# Prime exponent 1:"):
269            attr = "dmp1"
270        elif line.startswith("# Prime exponent 2:"):
271            attr = "dmq1"
272        elif line.startswith("# Coefficient:"):
273            attr = "iqmp"
274        elif line.startswith("#"):
275            attr = None
276        else:
277            if key is not None and attr is not None:
278                key[attr].append(line.strip())
279    return vectors
280
281
282def load_rsa_nist_vectors(vector_data):
283    test_data = None
284    p = None
285    salt_length = None
286    data = []
287
288    for line in vector_data:
289        line = line.strip()
290
291        # Blank lines and section headers are ignored
292        if not line or line.startswith("["):
293            continue
294
295        if line.startswith("# Salt len:"):
296            salt_length = int(line.split(":")[1].strip())
297            continue
298        elif line.startswith("#"):
299            continue
300
301        # Build our data using a simple Key = Value format
302        name, value = [c.strip() for c in line.split("=")]
303
304        if name == "n":
305            n = int(value, 16)
306        elif name == "e" and p is None:
307            e = int(value, 16)
308        elif name == "p":
309            p = int(value, 16)
310        elif name == "q":
311            q = int(value, 16)
312        elif name == "SHAAlg":
313            if p is None:
314                test_data = {
315                    "modulus": n,
316                    "public_exponent": e,
317                    "salt_length": salt_length,
318                    "algorithm": value,
319                    "fail": False,
320                }
321            else:
322                test_data = {"modulus": n, "p": p, "q": q, "algorithm": value}
323                if salt_length is not None:
324                    test_data["salt_length"] = salt_length
325            data.append(test_data)
326        elif name == "e" and p is not None:
327            test_data["public_exponent"] = int(value, 16)
328        elif name == "d":
329            test_data["private_exponent"] = int(value, 16)
330        elif name == "Result":
331            test_data["fail"] = value.startswith("F")
332        # For all other tokens we simply want the name, value stored in
333        # the dictionary
334        else:
335            test_data[name.lower()] = value.encode("ascii")
336
337    return data
338
339
340def load_fips_dsa_key_pair_vectors(vector_data):
341    """
342    Loads data out of the FIPS DSA KeyPair vector files.
343    """
344    vectors = []
345    for line in vector_data:
346        line = line.strip()
347
348        if not line or line.startswith("#") or line.startswith("[mod"):
349            continue
350
351        if line.startswith("P"):
352            vectors.append({"p": int(line.split("=")[1], 16)})
353        elif line.startswith("Q"):
354            vectors[-1]["q"] = int(line.split("=")[1], 16)
355        elif line.startswith("G"):
356            vectors[-1]["g"] = int(line.split("=")[1], 16)
357        elif line.startswith("X") and "x" not in vectors[-1]:
358            vectors[-1]["x"] = int(line.split("=")[1], 16)
359        elif line.startswith("X") and "x" in vectors[-1]:
360            vectors.append(
361                {
362                    "p": vectors[-1]["p"],
363                    "q": vectors[-1]["q"],
364                    "g": vectors[-1]["g"],
365                    "x": int(line.split("=")[1], 16),
366                }
367            )
368        elif line.startswith("Y"):
369            vectors[-1]["y"] = int(line.split("=")[1], 16)
370
371    return vectors
372
373
374def load_fips_dsa_sig_vectors(vector_data):
375    """
376    Loads data out of the FIPS DSA SigVer vector files.
377    """
378    vectors = []
379    sha_regex = re.compile(
380        r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
381    )
382
383    for line in vector_data:
384        line = line.strip()
385
386        if not line or line.startswith("#"):
387            continue
388
389        sha_match = sha_regex.match(line)
390        if sha_match:
391            digest_algorithm = "SHA-{}".format(sha_match.group("sha"))
392
393        if line.startswith("[mod"):
394            continue
395
396        name, value = [c.strip() for c in line.split("=")]
397
398        if name == "P":
399            vectors.append(
400                {"p": int(value, 16), "digest_algorithm": digest_algorithm}
401            )
402        elif name == "Q":
403            vectors[-1]["q"] = int(value, 16)
404        elif name == "G":
405            vectors[-1]["g"] = int(value, 16)
406        elif name == "Msg" and "msg" not in vectors[-1]:
407            hexmsg = value.strip().encode("ascii")
408            vectors[-1]["msg"] = binascii.unhexlify(hexmsg)
409        elif name == "Msg" and "msg" in vectors[-1]:
410            hexmsg = value.strip().encode("ascii")
411            vectors.append(
412                {
413                    "p": vectors[-1]["p"],
414                    "q": vectors[-1]["q"],
415                    "g": vectors[-1]["g"],
416                    "digest_algorithm": vectors[-1]["digest_algorithm"],
417                    "msg": binascii.unhexlify(hexmsg),
418                }
419            )
420        elif name == "X":
421            vectors[-1]["x"] = int(value, 16)
422        elif name == "Y":
423            vectors[-1]["y"] = int(value, 16)
424        elif name == "R":
425            vectors[-1]["r"] = int(value, 16)
426        elif name == "S":
427            vectors[-1]["s"] = int(value, 16)
428        elif name == "Result":
429            vectors[-1]["result"] = value.split("(")[0].strip()
430
431    return vectors
432
433
434# https://tools.ietf.org/html/rfc4492#appendix-A
435_ECDSA_CURVE_NAMES = {
436    "P-192": "secp192r1",
437    "P-224": "secp224r1",
438    "P-256": "secp256r1",
439    "P-384": "secp384r1",
440    "P-521": "secp521r1",
441    "K-163": "sect163k1",
442    "K-233": "sect233k1",
443    "K-256": "secp256k1",
444    "K-283": "sect283k1",
445    "K-409": "sect409k1",
446    "K-571": "sect571k1",
447    "B-163": "sect163r2",
448    "B-233": "sect233r1",
449    "B-283": "sect283r1",
450    "B-409": "sect409r1",
451    "B-571": "sect571r1",
452}
453
454
455def load_fips_ecdsa_key_pair_vectors(vector_data):
456    """
457    Loads data out of the FIPS ECDSA KeyPair vector files.
458    """
459    vectors = []
460    key_data = None
461    for line in vector_data:
462        line = line.strip()
463
464        if not line or line.startswith("#"):
465            continue
466
467        if line[1:-1] in _ECDSA_CURVE_NAMES:
468            curve_name = _ECDSA_CURVE_NAMES[line[1:-1]]
469
470        elif line.startswith("d = "):
471            if key_data is not None:
472                vectors.append(key_data)
473
474            key_data = {"curve": curve_name, "d": int(line.split("=")[1], 16)}
475
476        elif key_data is not None:
477            if line.startswith("Qx = "):
478                key_data["x"] = int(line.split("=")[1], 16)
479            elif line.startswith("Qy = "):
480                key_data["y"] = int(line.split("=")[1], 16)
481
482    assert key_data is not None
483    vectors.append(key_data)
484
485    return vectors
486
487
488def load_fips_ecdsa_signing_vectors(vector_data):
489    """
490    Loads data out of the FIPS ECDSA SigGen vector files.
491    """
492    vectors = []
493
494    curve_rx = re.compile(
495        r"\[(?P<curve>[PKB]-[0-9]{3}),SHA-(?P<sha>1|224|256|384|512)\]"
496    )
497
498    data = None
499    for line in vector_data:
500        line = line.strip()
501
502        curve_match = curve_rx.match(line)
503        if curve_match:
504            curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
505            digest_name = "SHA-{}".format(curve_match.group("sha"))
506
507        elif line.startswith("Msg = "):
508            if data is not None:
509                vectors.append(data)
510
511            hexmsg = line.split("=")[1].strip().encode("ascii")
512
513            data = {
514                "curve": curve_name,
515                "digest_algorithm": digest_name,
516                "message": binascii.unhexlify(hexmsg),
517            }
518
519        elif data is not None:
520            if line.startswith("Qx = "):
521                data["x"] = int(line.split("=")[1], 16)
522            elif line.startswith("Qy = "):
523                data["y"] = int(line.split("=")[1], 16)
524            elif line.startswith("R = "):
525                data["r"] = int(line.split("=")[1], 16)
526            elif line.startswith("S = "):
527                data["s"] = int(line.split("=")[1], 16)
528            elif line.startswith("d = "):
529                data["d"] = int(line.split("=")[1], 16)
530            elif line.startswith("Result = "):
531                data["fail"] = line.split("=")[1].strip()[0] == "F"
532
533    assert data is not None
534    vectors.append(data)
535    return vectors
536
537
538def load_kasvs_dh_vectors(vector_data):
539    """
540    Loads data out of the KASVS key exchange vector data
541    """
542
543    result_rx = re.compile(r"([FP]) \(([0-9]+) -")
544
545    vectors = []
546    data = {"fail_z": False, "fail_agree": False}
547
548    for line in vector_data:
549        line = line.strip()
550
551        if not line or line.startswith("#"):
552            continue
553
554        if line.startswith("P = "):
555            data["p"] = int(line.split("=")[1], 16)
556        elif line.startswith("Q = "):
557            data["q"] = int(line.split("=")[1], 16)
558        elif line.startswith("G = "):
559            data["g"] = int(line.split("=")[1], 16)
560        elif line.startswith("Z = "):
561            z_hex = line.split("=")[1].strip().encode("ascii")
562            data["z"] = binascii.unhexlify(z_hex)
563        elif line.startswith("XstatCAVS = "):
564            data["x1"] = int(line.split("=")[1], 16)
565        elif line.startswith("YstatCAVS = "):
566            data["y1"] = int(line.split("=")[1], 16)
567        elif line.startswith("XstatIUT = "):
568            data["x2"] = int(line.split("=")[1], 16)
569        elif line.startswith("YstatIUT = "):
570            data["y2"] = int(line.split("=")[1], 16)
571        elif line.startswith("Result = "):
572            result_str = line.split("=")[1].strip()
573            match = result_rx.match(result_str)
574
575            if match.group(1) == "F":
576                if int(match.group(2)) in (5, 10):
577                    data["fail_z"] = True
578                else:
579                    data["fail_agree"] = True
580
581            vectors.append(data)
582
583            data = {
584                "p": data["p"],
585                "q": data["q"],
586                "g": data["g"],
587                "fail_z": False,
588                "fail_agree": False,
589            }
590
591    return vectors
592
593
594def load_kasvs_ecdh_vectors(vector_data):
595    """
596    Loads data out of the KASVS key exchange vector data
597    """
598
599    curve_name_map = {
600        "P-192": "secp192r1",
601        "P-224": "secp224r1",
602        "P-256": "secp256r1",
603        "P-384": "secp384r1",
604        "P-521": "secp521r1",
605    }
606
607    result_rx = re.compile(r"([FP]) \(([0-9]+) -")
608
609    tags = []
610    sets = {}
611    vectors = []
612
613    # find info in header
614    for line in vector_data:
615        line = line.strip()
616
617        if line.startswith("#"):
618            parm = line.split("Parameter set(s) supported:")
619            if len(parm) == 2:
620                names = parm[1].strip().split()
621                for n in names:
622                    tags.append("[%s]" % n)
623                break
624
625    # Sets Metadata
626    tag = None
627    curve = None
628    for line in vector_data:
629        line = line.strip()
630
631        if not line or line.startswith("#"):
632            continue
633
634        if line in tags:
635            tag = line
636            curve = None
637        elif line.startswith("[Curve selected:"):
638            curve = curve_name_map[line.split(":")[1].strip()[:-1]]
639
640        if tag is not None and curve is not None:
641            sets[tag.strip("[]")] = curve
642            tag = None
643        if len(tags) == len(sets):
644            break
645
646    # Data
647    data = {
648        "CAVS": {},
649        "IUT": {},
650    }
651    tag = None
652    for line in vector_data:
653        line = line.strip()
654
655        if not line or line.startswith("#"):
656            continue
657
658        if line.startswith("["):
659            tag = line.split()[0][1:]
660        elif line.startswith("COUNT = "):
661            data["COUNT"] = int(line.split("=")[1])
662        elif line.startswith("dsCAVS = "):
663            data["CAVS"]["d"] = int(line.split("=")[1], 16)
664        elif line.startswith("QsCAVSx = "):
665            data["CAVS"]["x"] = int(line.split("=")[1], 16)
666        elif line.startswith("QsCAVSy = "):
667            data["CAVS"]["y"] = int(line.split("=")[1], 16)
668        elif line.startswith("dsIUT = "):
669            data["IUT"]["d"] = int(line.split("=")[1], 16)
670        elif line.startswith("QsIUTx = "):
671            data["IUT"]["x"] = int(line.split("=")[1], 16)
672        elif line.startswith("QsIUTy = "):
673            data["IUT"]["y"] = int(line.split("=")[1], 16)
674        elif line.startswith("OI = "):
675            data["OI"] = int(line.split("=")[1], 16)
676        elif line.startswith("Z = "):
677            data["Z"] = int(line.split("=")[1], 16)
678        elif line.startswith("DKM = "):
679            data["DKM"] = int(line.split("=")[1], 16)
680        elif line.startswith("Result = "):
681            result_str = line.split("=")[1].strip()
682            match = result_rx.match(result_str)
683
684            if match.group(1) == "F":
685                data["fail"] = True
686            else:
687                data["fail"] = False
688            data["errno"] = int(match.group(2))
689
690            data["curve"] = sets[tag]
691
692            vectors.append(data)
693
694            data = {
695                "CAVS": {},
696                "IUT": {},
697            }
698
699    return vectors
700
701
702def load_x963_vectors(vector_data):
703    """
704    Loads data out of the X9.63 vector data
705    """
706
707    vectors = []
708
709    # Sets Metadata
710    hashname = None
711    vector = {}
712    for line in vector_data:
713        line = line.strip()
714
715        if line.startswith("[SHA"):
716            hashname = line[1:-1]
717            shared_secret_len = 0
718            shared_info_len = 0
719            key_data_len = 0
720        elif line.startswith("[shared secret length"):
721            shared_secret_len = int(line[1:-1].split("=")[1].strip())
722        elif line.startswith("[SharedInfo length"):
723            shared_info_len = int(line[1:-1].split("=")[1].strip())
724        elif line.startswith("[key data length"):
725            key_data_len = int(line[1:-1].split("=")[1].strip())
726        elif line.startswith("COUNT"):
727            count = int(line.split("=")[1].strip())
728            vector["hash"] = hashname
729            vector["count"] = count
730            vector["shared_secret_length"] = shared_secret_len
731            vector["sharedinfo_length"] = shared_info_len
732            vector["key_data_length"] = key_data_len
733        elif line.startswith("Z"):
734            vector["Z"] = line.split("=")[1].strip()
735            assert ((shared_secret_len + 7) // 8) * 2 == len(vector["Z"])
736        elif line.startswith("SharedInfo"):
737            if shared_info_len != 0:
738                vector["sharedinfo"] = line.split("=")[1].strip()
739                silen = len(vector["sharedinfo"])
740                assert ((shared_info_len + 7) // 8) * 2 == silen
741        elif line.startswith("key_data"):
742            vector["key_data"] = line.split("=")[1].strip()
743            assert ((key_data_len + 7) // 8) * 2 == len(vector["key_data"])
744            vectors.append(vector)
745            vector = {}
746
747    return vectors
748
749
750def load_nist_kbkdf_vectors(vector_data):
751    """
752    Load NIST SP 800-108 KDF Vectors
753    """
754    vectors = []
755    test_data = None
756    tag = {}
757
758    for line in vector_data:
759        line = line.strip()
760
761        if not line or line.startswith("#"):
762            continue
763
764        if line.startswith("[") and line.endswith("]"):
765            tag_data = line[1:-1]
766            name, value = [c.strip() for c in tag_data.split("=")]
767            if value.endswith("_BITS"):
768                value = int(value.split("_")[0])
769                tag.update({name.lower(): value})
770                continue
771
772            tag.update({name.lower(): value.lower()})
773        elif line.startswith("COUNT="):
774            test_data = {}
775            test_data.update(tag)
776            vectors.append(test_data)
777        elif line.startswith("L"):
778            name, value = [c.strip() for c in line.split("=")]
779            test_data[name.lower()] = int(value)
780        else:
781            name, value = [c.strip() for c in line.split("=")]
782            test_data[name.lower()] = value.encode("ascii")
783
784    return vectors
785
786
787def load_ed25519_vectors(vector_data):
788    data = []
789    for line in vector_data:
790        secret_key, public_key, message, signature, _ = line.split(":")
791        # In the vectors the first element is secret key + public key
792        secret_key = secret_key[0:64]
793        # In the vectors the signature section is signature + message
794        signature = signature[0:128]
795        data.append(
796            {
797                "secret_key": secret_key,
798                "public_key": public_key,
799                "message": message,
800                "signature": signature,
801            }
802        )
803    return data
804
805
806def load_nist_ccm_vectors(vector_data):
807    test_data = None
808    section_data = None
809    global_data = {}
810    new_section = False
811    data = []
812
813    for line in vector_data:
814        line = line.strip()
815
816        # Blank lines and comments should be ignored
817        if not line or line.startswith("#"):
818            continue
819
820        # Some of the CCM vectors have global values for this. They are always
821        # at the top before the first section header (see: VADT, VNT, VPT)
822        if line.startswith(("Alen", "Plen", "Nlen", "Tlen")):
823            name, value = [c.strip() for c in line.split("=")]
824            global_data[name.lower()] = int(value)
825            continue
826
827        # section headers contain length data we might care about
828        if line.startswith("["):
829            new_section = True
830            section_data = {}
831            section = line[1:-1]
832            items = [c.strip() for c in section.split(",")]
833            for item in items:
834                name, value = [c.strip() for c in item.split("=")]
835                section_data[name.lower()] = int(value)
836            continue
837
838        name, value = [c.strip() for c in line.split("=")]
839
840        if name.lower() in ("key", "nonce") and new_section:
841            section_data[name.lower()] = value.encode("ascii")
842            continue
843
844        new_section = False
845
846        # Payload is sometimes special because these vectors are absurd. Each
847        # example may or may not have a payload. If it does not then the
848        # previous example's payload should be used. We accomplish this by
849        # writing it into the section_data. Because we update each example
850        # with the section data it will be overwritten if a new payload value
851        # is present. NIST should be ashamed of their vector creation.
852        if name.lower() == "payload":
853            section_data[name.lower()] = value.encode("ascii")
854
855        # Result is a special token telling us if the test should pass/fail.
856        # This is only present in the DVPT CCM tests
857        if name.lower() == "result":
858            if value.lower() == "pass":
859                test_data["fail"] = False
860            else:
861                test_data["fail"] = True
862            continue
863
864        # COUNT is a special token that indicates a new block of data
865        if name.lower() == "count":
866            test_data = {}
867            test_data.update(global_data)
868            test_data.update(section_data)
869            data.append(test_data)
870            continue
871        # For all other tokens we simply want the name, value stored in
872        # the dictionary
873        else:
874            test_data[name.lower()] = value.encode("ascii")
875
876    return data
877
878
879class WycheproofTest(object):
880    def __init__(self, testfiledata, testgroup, testcase):
881        self.testfiledata = testfiledata
882        self.testgroup = testgroup
883        self.testcase = testcase
884
885    def __repr__(self):
886        return "<WycheproofTest({!r}, {!r}, {!r}, tcId={})>".format(
887            self.testfiledata,
888            self.testgroup,
889            self.testcase,
890            self.testcase["tcId"],
891        )
892
893    @property
894    def valid(self):
895        return self.testcase["result"] == "valid"
896
897    @property
898    def acceptable(self):
899        return self.testcase["result"] == "acceptable"
900
901    @property
902    def invalid(self):
903        return self.testcase["result"] == "invalid"
904
905    def has_flag(self, flag):
906        return flag in self.testcase["flags"]
907
908
909def load_wycheproof_tests(wycheproof, test_file):
910    path = os.path.join(wycheproof, "testvectors", test_file)
911    with open(path) as f:
912        data = json.load(f)
913        for group in data.pop("testGroups"):
914            cases = group.pop("tests")
915            for c in cases:
916                yield WycheproofTest(data, group, c)
917