• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1from io import StringIO
2from typing import TYPE_CHECKING, Any
3
4from ruamel.yaml import YAML
5
6from lava.utils.lava_farm import LavaFarm, get_lava_farm
7from lava.utils.ssh_job_definition import (
8    generate_docker_test,
9    generate_dut_test,
10    wrap_boot_action,
11    wrap_final_deploy_action,
12)
13from lava.utils.uart_job_definition import (
14    fastboot_boot_action,
15    fastboot_deploy_actions,
16    tftp_boot_action,
17    tftp_deploy_actions,
18    uart_test_actions,
19)
20
21if TYPE_CHECKING:
22    from lava.lava_job_submitter import LAVAJobSubmitter
23
24from .constants import FORCE_UART, JOB_PRIORITY, NUMBER_OF_ATTEMPTS_LAVA_BOOT
25
26
27class LAVAJobDefinition:
28    """
29    This class is responsible for generating the YAML payload to submit a LAVA
30    job.
31    """
32
33    def __init__(self, job_submitter: "LAVAJobSubmitter") -> None:
34        self.job_submitter: "LAVAJobSubmitter" = job_submitter
35
36    def has_ssh_support(self) -> bool:
37        if FORCE_UART:
38            return False
39
40        # Only Collabora's farm supports to run docker container as a LAVA actions,
41        # which is required to follow the job in a SSH section
42        current_farm = get_lava_farm()
43
44        return current_farm == LavaFarm.COLLABORA
45
46    def generate_lava_yaml_payload(self) -> dict[str, Any]:
47        """
48        Generates a YAML payload for submitting a LAVA job, based on the provided arguments.
49
50        Args:
51            None
52
53        Returns:
54            a dictionary containing the values generated by the `generate_metadata` function and the
55            actions for the LAVA job submission.
56        """
57        args = self.job_submitter
58        values = self.generate_metadata()
59        nfsrootfs = {
60            "url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
61            "compression": "zstd",
62        }
63
64        init_stage1_steps = self.init_stage1_steps()
65        artifact_download_steps = self.artifact_download_steps()
66
67        deploy_actions = []
68        boot_action = []
69        test_actions = uart_test_actions(args, init_stage1_steps, artifact_download_steps)
70
71        if args.boot_method == "fastboot":
72            deploy_actions = fastboot_deploy_actions(self, nfsrootfs)
73            boot_action = fastboot_boot_action(args)
74        else:  # tftp
75            deploy_actions = tftp_deploy_actions(self, nfsrootfs)
76            boot_action = tftp_boot_action(args)
77
78        if self.has_ssh_support():
79            wrap_final_deploy_action(deploy_actions[-1])
80            # SSH jobs use namespaces to differentiate between the DUT and the
81            # docker container. Every LAVA action needs an explicit namespace, when we are not using
82            # the default one.
83            for deploy_action in deploy_actions:
84                deploy_action["namespace"] = "dut"
85            wrap_boot_action(boot_action)
86            test_actions = (
87                generate_dut_test(args, init_stage1_steps),
88                generate_docker_test(args, artifact_download_steps),
89            )
90
91        values["actions"] = [
92            *[{"deploy": d} for d in deploy_actions],
93            {"boot": boot_action},
94            *[{"test": t} for t in test_actions],
95        ]
96
97        return values
98
99    def generate_lava_job_definition(self) -> str:
100        """
101        Generates a LAVA job definition in YAML format and returns it as a string.
102
103        Returns:
104            a string representation of the job definition generated by analysing job submitter
105            arguments and environment variables
106        """
107        job_stream = StringIO()
108        yaml = YAML()
109        yaml.width = 4096
110        yaml.dump(self.generate_lava_yaml_payload(), job_stream)
111        return job_stream.getvalue()
112
113    def generate_metadata(self) -> dict[str, Any]:
114        # General metadata and permissions
115        values = {
116            "job_name": f"{self.job_submitter.project_name}: {self.job_submitter.pipeline_info}",
117            "device_type": self.job_submitter.device_type,
118            "visibility": {"group": [self.job_submitter.visibility_group]},
119            "priority": JOB_PRIORITY,
120            "context": {"extra_nfsroot_args": " init=/init rootwait usbcore.quirks=0bda:8153:k"},
121            "timeouts": {
122                "job": {"minutes": self.job_submitter.job_timeout_min},
123                "actions": {
124                    "depthcharge-retry": {
125                        # Could take between 1 and 1.5 min in slower boots
126                        "minutes": 4
127                    },
128                    "depthcharge-start": {
129                        # Should take less than 1 min.
130                        "minutes": 1,
131                    },
132                    "depthcharge-action": {
133                        # This timeout englobes the entire depthcharge timing,
134                        # including retries
135                        "minutes": 5
136                        * NUMBER_OF_ATTEMPTS_LAVA_BOOT,
137                    },
138                },
139            },
140        }
141
142        if self.job_submitter.lava_tags:
143            values["tags"] = self.job_submitter.lava_tags.split(",")
144
145        return values
146
147    def attach_kernel_and_dtb(self, deploy_field):
148        if self.job_submitter.kernel_image_type:
149            deploy_field["kernel"]["type"] = self.job_submitter.kernel_image_type
150        if self.job_submitter.dtb_filename:
151            deploy_field["dtb"] = {
152                "url": f"{self.job_submitter.kernel_url_prefix}/"
153                f"{self.job_submitter.dtb_filename}.dtb"
154            }
155
156    def attach_external_modules(self, deploy_field):
157        if self.job_submitter.kernel_external:
158            deploy_field["modules"] = {
159                "url": f"{self.job_submitter.kernel_url_prefix}/modules.tar.zst",
160                "compression": "zstd"
161            }
162
163    def artifact_download_steps(self):
164        """
165        This function is responsible for setting up the SSH server in the DUT and to
166        export the first boot environment to a file.
167        """
168        # Putting JWT pre-processing and mesa download, within init-stage1.sh file,
169        # as we do with non-SSH version.
170        download_steps = [
171            "set -ex",
172            "curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
173            f"{self.job_submitter.job_rootfs_overlay_url} | tar -xz -C /",
174            f"mkdir -p {self.job_submitter.ci_project_dir}",
175            f"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 {self.job_submitter.build_url} | "
176            f"tar --zstd -x -C {self.job_submitter.ci_project_dir}",
177        ]
178
179        # If the JWT file is provided, we will use it to authenticate with the cloud
180        # storage provider and will hide it from the job output in Gitlab.
181        if self.job_submitter.jwt_file:
182            with open(self.job_submitter.jwt_file) as jwt_file:
183                download_steps += [
184                    "set +x  # HIDE_START",
185                    f'echo -n "{jwt_file.read()}" > "{self.job_submitter.jwt_file}"',
186                    "set -x  # HIDE_END",
187                    f'echo "export CI_JOB_JWT_FILE={self.job_submitter.jwt_file}" >> /set-job-env-vars.sh',
188                ]
189        else:
190            download_steps += [
191                "echo Could not find jwt file, disabling S3 requests...",
192                "sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh",
193            ]
194
195        return download_steps
196
197    def init_stage1_steps(self) -> list[str]:
198        run_steps = []
199        # job execution script:
200        #   - inline .gitlab-ci/common/init-stage1.sh
201        #   - fetch and unpack per-pipeline build artifacts from build job
202        #   - fetch and unpack per-job environment from lava-submit.sh
203        #   - exec .gitlab-ci/common/init-stage2.sh
204
205        with open(self.job_submitter.first_stage_init, "r") as init_sh:
206            run_steps += [x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()]
207        # We cannot distribute the Adreno 660 shader firmware inside rootfs,
208        # since the license isn't bundled inside the repository
209        if self.job_submitter.device_type == "sm8350-hdk":
210            run_steps.append(
211                "curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
212                + "https://github.com/allahjasif1990/hdk888-firmware/raw/main/a660_zap.mbn "
213                + '-o "/lib/firmware/qcom/sm8350/a660_zap.mbn"'
214            )
215
216        return run_steps
217