1 /*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "acr_r361.h"
24
25 #include <engine/falcon.h>
26 #include <core/msgqueue.h>
27 #include <subdev/pmu.h>
28 #include <engine/sec2.h>
29
30 static void
acr_r361_generate_flcn_bl_desc(const struct nvkm_acr * acr,const struct ls_ucode_img * img,u64 wpr_addr,void * _desc)31 acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
32 const struct ls_ucode_img *img, u64 wpr_addr,
33 void *_desc)
34 {
35 struct acr_r361_flcn_bl_desc *desc = _desc;
36 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
37 u64 base, addr_code, addr_data;
38
39 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
40 addr_code = base + pdesc->app_resident_code_offset;
41 addr_data = base + pdesc->app_resident_data_offset;
42
43 desc->ctx_dma = FALCON_DMAIDX_UCODE;
44 desc->code_dma_base = u64_to_flcn64(addr_code);
45 desc->non_sec_code_off = pdesc->app_resident_code_offset;
46 desc->non_sec_code_size = pdesc->app_resident_code_size;
47 desc->code_entry_point = pdesc->app_imem_entry;
48 desc->data_dma_base = u64_to_flcn64(addr_data);
49 desc->data_size = pdesc->app_resident_data_size;
50 }
51
52 void
acr_r361_generate_hs_bl_desc(const struct hsf_load_header * hdr,void * _bl_desc,u64 offset)53 acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
54 u64 offset)
55 {
56 struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
57
58 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
59 bl_desc->code_dma_base = u64_to_flcn64(offset);
60 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
61 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
62 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
63 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
64 bl_desc->code_entry_point = 0;
65 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
66 bl_desc->data_size = hdr->data_size;
67 }
68
69 static const struct acr_r352_lsf_func
70 acr_r361_ls_fecs_func_0 = {
71 .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
72 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
73 };
74
75 const struct acr_r352_ls_func
76 acr_r361_ls_fecs_func = {
77 .load = acr_ls_ucode_load_fecs,
78 .version_max = 0,
79 .version = {
80 &acr_r361_ls_fecs_func_0,
81 }
82 };
83
84 static const struct acr_r352_lsf_func
85 acr_r361_ls_gpccs_func_0 = {
86 .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
87 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
88 /* GPCCS will be loaded using PRI */
89 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
90 };
91
92 const struct acr_r352_ls_func
93 acr_r361_ls_gpccs_func = {
94 .load = acr_ls_ucode_load_gpccs,
95 .version_max = 0,
96 .version = {
97 &acr_r361_ls_gpccs_func_0,
98 }
99 };
100
101 struct acr_r361_pmu_bl_desc {
102 u32 reserved;
103 u32 dma_idx;
104 struct flcn_u64 code_dma_base;
105 u32 total_code_size;
106 u32 code_size_to_load;
107 u32 code_entry_point;
108 struct flcn_u64 data_dma_base;
109 u32 data_size;
110 struct flcn_u64 overlay_dma_base;
111 u32 argc;
112 u32 argv;
113 };
114
115 static void
acr_r361_generate_pmu_bl_desc(const struct nvkm_acr * acr,const struct ls_ucode_img * img,u64 wpr_addr,void * _desc)116 acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
117 const struct ls_ucode_img *img, u64 wpr_addr,
118 void *_desc)
119 {
120 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
121 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
122 struct acr_r361_pmu_bl_desc *desc = _desc;
123 u64 base, addr_code, addr_data;
124 u32 addr_args;
125
126 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
127 addr_code = base + pdesc->app_resident_code_offset;
128 addr_data = base + pdesc->app_resident_data_offset;
129 addr_args = pmu->falcon->data.limit;
130 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
131
132 desc->dma_idx = FALCON_DMAIDX_UCODE;
133 desc->code_dma_base = u64_to_flcn64(addr_code);
134 desc->total_code_size = pdesc->app_size;
135 desc->code_size_to_load = pdesc->app_resident_code_size;
136 desc->code_entry_point = pdesc->app_imem_entry;
137 desc->data_dma_base = u64_to_flcn64(addr_data);
138 desc->data_size = pdesc->app_resident_data_size;
139 desc->overlay_dma_base = u64_to_flcn64(addr_code);
140 desc->argc = 1;
141 desc->argv = addr_args;
142 }
143
144 static const struct acr_r352_lsf_func
145 acr_r361_ls_pmu_func_0 = {
146 .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
147 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
148 };
149
150 const struct acr_r352_ls_func
151 acr_r361_ls_pmu_func = {
152 .load = acr_ls_ucode_load_pmu,
153 .post_run = acr_ls_pmu_post_run,
154 .version_max = 0,
155 .version = {
156 &acr_r361_ls_pmu_func_0,
157 }
158 };
159
160 static void
acr_r361_generate_sec2_bl_desc(const struct nvkm_acr * acr,const struct ls_ucode_img * img,u64 wpr_addr,void * _desc)161 acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
162 const struct ls_ucode_img *img, u64 wpr_addr,
163 void *_desc)
164 {
165 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
166 const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
167 struct acr_r361_pmu_bl_desc *desc = _desc;
168 u64 base, addr_code, addr_data;
169 u32 addr_args;
170
171 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
172 /* For some reason we should not add app_resident_code_offset here */
173 addr_code = base;
174 addr_data = base + pdesc->app_resident_data_offset;
175 addr_args = sec->falcon->data.limit;
176 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
177
178 desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
179 desc->code_dma_base = u64_to_flcn64(addr_code);
180 desc->total_code_size = pdesc->app_size;
181 desc->code_size_to_load = pdesc->app_resident_code_size;
182 desc->code_entry_point = pdesc->app_imem_entry;
183 desc->data_dma_base = u64_to_flcn64(addr_data);
184 desc->data_size = pdesc->app_resident_data_size;
185 desc->overlay_dma_base = u64_to_flcn64(addr_code);
186 desc->argc = 1;
187 /* args are stored at the beginning of EMEM */
188 desc->argv = 0x01000000;
189 }
190
191 const struct acr_r352_lsf_func
192 acr_r361_ls_sec2_func_0 = {
193 .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
194 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
195 };
196
197 static const struct acr_r352_ls_func
198 acr_r361_ls_sec2_func = {
199 .load = acr_ls_ucode_load_sec2,
200 .post_run = acr_ls_sec2_post_run,
201 .version_max = 0,
202 .version = {
203 &acr_r361_ls_sec2_func_0,
204 }
205 };
206
207
208 const struct acr_r352_func
209 acr_r361_func = {
210 .fixup_hs_desc = acr_r352_fixup_hs_desc,
211 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
212 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
213 .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
214 .ls_fill_headers = acr_r352_ls_fill_headers,
215 .ls_write_wpr = acr_r352_ls_write_wpr,
216 .ls_func = {
217 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
218 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
219 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
220 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
221 },
222 };
223
224 struct nvkm_acr *
acr_r361_new(unsigned long managed_falcons)225 acr_r361_new(unsigned long managed_falcons)
226 {
227 return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
228 managed_falcons);
229 }
230