1 /*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "priv.h"
24
25 #include <core/gpuobj.h>
26
27 /*
28 * The BL header format used by GM20B's firmware is slightly different
29 * from the one of GM200. Fix the differences here.
30 */
31 struct gm20b_flcn_bl_desc {
32 u32 reserved[4];
33 u32 signature[4];
34 u32 ctx_dma;
35 u32 code_dma_base;
36 u32 non_sec_code_off;
37 u32 non_sec_code_size;
38 u32 sec_code_off;
39 u32 sec_code_size;
40 u32 code_entry_point;
41 u32 data_dma_base;
42 u32 data_size;
43 };
44
45 static int
gm20b_secboot_prepare_blobs(struct gm200_secboot * gsb)46 gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
47 {
48 struct nvkm_subdev *subdev = &gsb->base.subdev;
49 int acr_size;
50 int ret;
51
52 ret = gm20x_secboot_prepare_blobs(gsb);
53 if (ret)
54 return ret;
55
56 acr_size = gsb->acr_load_blob->size;
57 /*
58 * On Tegra the WPR region is set by the bootloader. It is illegal for
59 * the HS blob to be larger than this region.
60 */
61 if (acr_size > gsb->wpr_size) {
62 nvkm_error(subdev, "WPR region too small for FW blob!\n");
63 nvkm_error(subdev, "required: %dB\n", acr_size);
64 nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
65 return -ENOSPC;
66 }
67
68 return 0;
69 }
70
71 /**
72 * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
73 *
74 * There is only a slight format difference (DMA addresses being 32-bits and
75 * 256B-aligned) to address.
76 */
77 static void
gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc * desc,void * ret)78 gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
79 {
80 struct gm20b_flcn_bl_desc *gdesc = ret;
81 u64 addr;
82
83 memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved));
84 memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature));
85 gdesc->ctx_dma = desc->ctx_dma;
86 addr = desc->code_dma_base.hi;
87 addr <<= 32;
88 addr |= desc->code_dma_base.lo;
89 gdesc->code_dma_base = lower_32_bits(addr >> 8);
90 gdesc->non_sec_code_off = desc->non_sec_code_off;
91 gdesc->non_sec_code_size = desc->non_sec_code_size;
92 gdesc->sec_code_off = desc->sec_code_off;
93 gdesc->sec_code_size = desc->sec_code_size;
94 gdesc->code_entry_point = desc->code_entry_point;
95 addr = desc->data_dma_base.hi;
96 addr <<= 32;
97 addr |= desc->data_dma_base.lo;
98 gdesc->data_dma_base = lower_32_bits(addr >> 8);
99 gdesc->data_size = desc->data_size;
100 }
101
102 static void
gm20b_secboot_fixup_hs_desc(struct gm200_secboot * gsb,struct hsflcn_acr_desc * desc)103 gm20b_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
104 struct hsflcn_acr_desc *desc)
105 {
106 desc->ucode_blob_base = gsb->ls_blob->addr;
107 desc->ucode_blob_size = gsb->ls_blob->size;
108
109 desc->wpr_offset = 0;
110 }
111
112 static const struct gm200_secboot_func
113 gm20b_secboot_func = {
114 .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
115 .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
116 .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
117 .prepare_blobs = gm20b_secboot_prepare_blobs,
118 };
119
120
121 #ifdef CONFIG_ARCH_TEGRA
122 #define TEGRA_MC_BASE 0x70019000
123 #define MC_SECURITY_CARVEOUT2_CFG0 0xc58
124 #define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
125 #define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
126 #define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
127 #define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
128 /**
129 * sb_tegra_read_wpr() - read the WPR registers on Tegra
130 *
131 * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
132 * is reserved from system memory by the bootloader and irreversibly locked.
133 * This function reads the address and size of the pre-configured WPR region.
134 */
135 static int
gm20b_tegra_read_wpr(struct gm200_secboot * gsb)136 gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
137 {
138 struct nvkm_secboot *sb = &gsb->base;
139 void __iomem *mc;
140 u32 cfg;
141
142 mc = ioremap(TEGRA_MC_BASE, 0xd00);
143 if (!mc) {
144 nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
145 return PTR_ERR(mc);
146 }
147 gsb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
148 ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
149 gsb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
150 << 17;
151 cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
152 iounmap(mc);
153
154 /* Check that WPR settings are valid */
155 if (gsb->wpr_size == 0) {
156 nvkm_error(&sb->subdev, "WPR region is empty\n");
157 return -EINVAL;
158 }
159
160 if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
161 nvkm_error(&sb->subdev, "WPR region not locked\n");
162 return -EINVAL;
163 }
164
165 return 0;
166 }
167 #else
168 static int
gm20b_tegra_read_wpr(struct gm200_secboot * gsb)169 gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
170 {
171 nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
172 return -EINVAL;
173 }
174 #endif
175
176 static int
gm20b_secboot_init(struct nvkm_secboot * sb)177 gm20b_secboot_init(struct nvkm_secboot *sb)
178 {
179 struct gm200_secboot *gsb = gm200_secboot(sb);
180 int ret;
181
182 ret = gm20b_tegra_read_wpr(gsb);
183 if (ret)
184 return ret;
185
186 return gm200_secboot_init(sb);
187 }
188
189 static const struct nvkm_secboot_func
190 gm20b_secboot = {
191 .dtor = gm200_secboot_dtor,
192 .init = gm20b_secboot_init,
193 .reset = gm200_secboot_reset,
194 .start = gm200_secboot_start,
195 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
196 .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
197 };
198
199 int
gm20b_secboot_new(struct nvkm_device * device,int index,struct nvkm_secboot ** psb)200 gm20b_secboot_new(struct nvkm_device *device, int index,
201 struct nvkm_secboot **psb)
202 {
203 int ret;
204 struct gm200_secboot *gsb;
205
206 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
207 if (!gsb) {
208 psb = NULL;
209 return -ENOMEM;
210 }
211 *psb = &gsb->base;
212
213 ret = nvkm_secboot_ctor(&gm20b_secboot, device, index, &gsb->base);
214 if (ret)
215 return ret;
216
217 gsb->func = &gm20b_secboot_func;
218
219 return 0;
220 }
221
222 MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
223 MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
224 MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
225 MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
226 MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
227 MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
228 MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
229 MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
230 MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
231 MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
232 MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
233 MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
234