• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1from contextlib import nullcontext as does_not_raise
2from datetime import datetime
3from io import StringIO
4from itertools import cycle
5from typing import Any, Callable, Generator, Iterable, Optional, Tuple, Union
6
7from freezegun import freeze_time
8from lava.utils.log_section import (
9    DEFAULT_GITLAB_SECTION_TIMEOUTS,
10    FALLBACK_GITLAB_SECTION_TIMEOUT,
11    LogSectionType,
12)
13from lavacli.utils import flow_yaml as lava_yaml
14
15
16def yaml_dump(data: dict[str, Any]) -> str:
17    stream = StringIO()
18    lava_yaml.dump(data, stream)
19    return stream.getvalue()
20
21
22def section_timeout(section_type: LogSectionType) -> int:
23    return int(
24        DEFAULT_GITLAB_SECTION_TIMEOUTS.get(
25            section_type, FALLBACK_GITLAB_SECTION_TIMEOUT
26        ).total_seconds()
27    )
28
29
30def create_lava_yaml_msg(
31    dt: Callable = datetime.now, msg="test", lvl="target"
32) -> dict[str, str]:
33    return {"dt": str(dt()), "msg": msg, "lvl": lvl}
34
35
36def generate_testsuite_result(
37    name="test-mesa-ci", result="pass", metadata_extra=None, extra=None
38):
39    if metadata_extra is None:
40        metadata_extra = {}
41    if extra is None:
42        extra = {}
43    return {"metadata": {"result": result, **metadata_extra}, "name": name}
44
45
46def jobs_logs_response(
47    finished=False, msg=None, lvl="target", result=None
48) -> Tuple[bool, str]:
49    timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
50    if result:
51        timed_msg["lvl"] = "target"
52        timed_msg["msg"] = f"hwci: mesa: {result}"
53
54    logs = [timed_msg] if msg is None else msg
55
56    return finished, yaml_dump(logs)
57
58
59def section_aware_message_generator(
60    messages: dict[LogSectionType, Iterable[int]], result: Optional[str] = None
61) -> Iterable[tuple[dict, Iterable[int]]]:
62    default = [1]
63
64    result_message_section = LogSectionType.TEST_CASE
65
66    for section_type in LogSectionType:
67        delay = messages.get(section_type, default)
68        yield mock_lava_signal(section_type), delay
69        if result and section_type == result_message_section:
70            # To consider the job finished, the result `echo` should be produced
71            # in the correct section
72            yield create_lava_yaml_msg(msg=f"hwci: mesa: {result}"), delay
73
74
75def message_generator():
76    for section_type in LogSectionType:
77        yield mock_lava_signal(section_type)
78
79
80def level_generator():
81    # Tests all known levels by default
82    yield from cycle(("results", "feedback", "warning", "error", "debug", "target"))
83
84
85def generate_n_logs(
86    n=1,
87    tick_fn: Union[Generator, Iterable[int], int] = 1,
88    level_fn=level_generator,
89    result="pass",
90):
91    """Simulate a log partitionated in n components"""
92    level_gen = level_fn()
93
94    if isinstance(tick_fn, Generator):
95        tick_gen = tick_fn
96    elif isinstance(tick_fn, Iterable):
97        tick_gen = cycle(tick_fn)
98    else:
99        tick_gen = cycle((tick_fn,))
100
101    with freeze_time(datetime.now()) as time_travel:
102        tick_sec: int = next(tick_gen)
103        while True:
104            # Simulate a scenario where the target job is waiting for being started
105            for _ in range(n - 1):
106                level: str = next(level_gen)
107
108                time_travel.tick(tick_sec)
109                yield jobs_logs_response(finished=False, msg=[], lvl=level)
110
111            time_travel.tick(tick_sec)
112            yield jobs_logs_response(finished=True, result=result)
113
114
115def to_iterable(tick_fn):
116    if isinstance(tick_fn, Generator):
117        return tick_fn
118    elif isinstance(tick_fn, Iterable):
119        return cycle(tick_fn)
120    else:
121        return cycle((tick_fn,))
122
123
124def mock_logs(messages=None, result=None):
125    if messages is None:
126        messages = {}
127    with freeze_time(datetime.now()) as time_travel:
128        # Simulate a complete run given by message_fn
129        for msg, tick_list in section_aware_message_generator(messages, result):
130            for tick_sec in tick_list:
131                yield jobs_logs_response(finished=False, msg=[msg])
132                time_travel.tick(tick_sec)
133
134
135def mock_lava_signal(type: LogSectionType) -> dict[str, str]:
136    return {
137        LogSectionType.TEST_CASE: create_lava_yaml_msg(
138            msg="<STARTTC> case", lvl="debug"
139        ),
140        LogSectionType.TEST_SUITE: create_lava_yaml_msg(
141            msg="<STARTRUN> suite", lvl="debug"
142        ),
143        LogSectionType.LAVA_POST_PROCESSING: create_lava_yaml_msg(
144            msg="<LAVA_SIGNAL_ENDTC case>", lvl="target"
145        ),
146    }.get(type, create_lava_yaml_msg())
147