1 /*
2 * bootstub 32 bit entry setting routings
3 *
4 * Copyright (C) 2008-2010 Intel Corporation.
5 * Author: Alek Du <alek.du@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 */
21
22 #include "types.h"
23 #include "bootstub.h"
24 #include "bootparam.h"
25 #include "spi-uart.h"
26 #include "ssp-uart.h"
27 #include "mb.h"
28 #include "sfi.h"
29 #include "bootimg.h"
30
31 #include <stdint.h>
32 #include <stddef.h>
33 #include "imr_toc.h"
34
35 #define PAGE_SIZE_MASK 0xFFF
36 #define MASK_1K 0x3FF
37 #define PAGE_ALIGN_FWD(x) ((x + PAGE_SIZE_MASK) & ~PAGE_SIZE_MASK)
38 #define PAGE_ALIGN_BACK(x) ((x) & ~PAGE_SIZE_MASK)
39
40 #define IMR_START_ADDRESS(x) (((x) & 0xFFFFFFFC) << 8)
41 #define IMR_END_ADDRESS(x) ((x == 0) ? (x) : ((((x) & 0xFFFFFFFC) << 8) | MASK_1K))
42
43 #define IMR6_START_ADDRESS IMR_START_ADDRESS(*((u32 *)0xff108160))
44 #define IMR6_END_ADDRESS IMR_END_ADDRESS(*((u32 *)0xff108164))
45 #define IMR7_START_ADDRESS IMR_START_ADDRESS(*((u32 *)0xff108170))
46 #define IMR7_END_ADDRESS IMR_END_ADDRESS(*((u32 *)0xff108174))
47
48 #define FATAL_HANG() { asm("cli"); while (1) { asm("nop"); } }
49
50 extern int no_uart_used;
51
52 extern imr_toc_t imr6_toc;
53 static u32 imr7_size;
54
55 static u32 sps_load_adrs;
56
57 static memory_map_t mb_mmap[E820MAX];
58 u32 mb_magic, mb_info;
59
60 struct gdt_ptr {
61 u16 len;
62 u32 ptr;
63 } __attribute__((packed));
64
memcpy(void * dest,const void * src,size_t count)65 static void *memcpy(void *dest, const void *src, size_t count)
66 {
67 char *tmp = dest;
68 const char *s = src;
69 size_t _count = count / 4;
70
71 while (_count--) {
72 *(long *)tmp = *(long *)s;
73 tmp += 4;
74 s += 4;
75 }
76 count %= 4;
77 while (count--)
78 *tmp++ = *s++;
79 return dest;
80 }
81
memset(void * s,unsigned char c,size_t count)82 static void *memset(void *s, unsigned char c, size_t count)
83 {
84 char *xs = s;
85 size_t _count = count / 4;
86 unsigned long _c = c << 24 | c << 16 | c << 8 | c;
87
88 while (_count--) {
89 *(long *)xs = _c;
90 xs += 4;
91 }
92 count %= 4;
93 while (count--)
94 *xs++ = c;
95 return s;
96 }
97
strnlen(const char * s,size_t maxlen)98 static size_t strnlen(const char *s, size_t maxlen)
99 {
100 const char *es = s;
101 while (*es && maxlen) {
102 es++;
103 maxlen--;
104 }
105
106 return (es - s);
107 }
108
strnchr(const char * s,int c,size_t maxlen)109 static const char *strnchr(const char *s, int c, size_t maxlen)
110 {
111 int i;
112 for (i = 0; i < maxlen && *s != c; s++, i++)
113 ;
114 return s;
115 }
116
strncmp(const char * cs,const char * ct,size_t count)117 int strncmp(const char *cs, const char *ct, size_t count)
118 {
119 unsigned char c1, c2;
120
121 while (count) {
122 c1 = *cs++;
123 c2 = *ct++;
124 if (c1 != c2)
125 return c1 < c2 ? -1 : 1;
126 if (!c1)
127 break;
128 count--;
129 }
130 return 0;
131 }
132
is_image_aosp(unsigned char * magic)133 static inline int is_image_aosp(unsigned char *magic)
134 {
135 return !strncmp((char *)magic, (char *)BOOT_MAGIC, sizeof(BOOT_MAGIC)-1);
136 }
137
setup_boot_params(struct boot_params * bp,struct setup_header * sh)138 static void setup_boot_params(struct boot_params *bp, struct setup_header *sh)
139 {
140 bp->screen_info.orig_video_mode = 0;
141 bp->screen_info.orig_video_lines = 0;
142 bp->screen_info.orig_video_cols = 0;
143 bp->alt_mem_k = 128*1024; // hard coded 128M mem here, since SFI will override it
144 memcpy(&bp->hdr, sh, sizeof (struct setup_header));
145 bp->hdr.type_of_loader = 0xff; //bootstub is unknown bootloader for kernel :)
146 bp->hdr.hardware_subarch = X86_SUBARCH_MRST;
147 }
148
bzImage_setup(struct boot_params * bp,struct setup_header * sh)149 static u32 bzImage_setup(struct boot_params *bp, struct setup_header *sh)
150 {
151 void *cmdline = (void *)BOOT_CMDLINE_OFFSET;
152 struct boot_img_hdr *aosp = (struct boot_img_hdr *)AOSP_HEADER_ADDRESS;
153 size_t cmdline_len;
154 u8 *initramfs, *ptr;
155
156 if (is_image_aosp(aosp->magic)) {
157 ptr = (u8*)aosp->kernel_addr;
158 cmdline_len = strnlen((const char *)aosp->cmdline, sizeof(aosp->cmdline));
159
160 /*
161 * Copy the command line to be after bootparams so that it won't be
162 * overwritten by the kernel executable.
163 */
164 memset(cmdline, 0, sizeof(aosp->cmdline));
165 memcpy(cmdline, (const void *)aosp->cmdline, cmdline_len);
166
167 bp->hdr.ramdisk_size = aosp->ramdisk_size;
168
169 initramfs = (u8 *)aosp->ramdisk_addr;
170 } else {
171 ptr = (u8*)BZIMAGE_OFFSET;
172 cmdline_len = strnlen((const char *)CMDLINE_OFFSET, CMDLINE_SIZE);
173 /*
174 * Copy the command line to be after bootparams so that it won't be
175 * overwritten by the kernel executable.
176 */
177 memset(cmdline, 0, CMDLINE_SIZE);
178 memcpy(cmdline, (const void *)CMDLINE_OFFSET, cmdline_len);
179
180 bp->hdr.ramdisk_size = *(u32 *)INITRD_SIZE_OFFSET;
181
182 initramfs = (u8 *)BZIMAGE_OFFSET + *(u32 *)BZIMAGE_SIZE_OFFSET;
183 }
184
185 bp->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET;
186 bp->hdr.cmdline_size = cmdline_len;
187 bp->hdr.ramdisk_image = (bp->alt_mem_k*1024 - bp->hdr.ramdisk_size) & 0xFFFFF000;
188
189 if (*initramfs) {
190 bs_printk("Relocating initramfs to high memory ...\n");
191 memcpy((u8*)bp->hdr.ramdisk_image, initramfs, bp->hdr.ramdisk_size);
192 } else {
193 bs_printk("Won't relocate initramfs, are you in SLE?\n");
194 }
195
196 while (1){
197 if (*(u32 *)ptr == SETUP_SIGNATURE && *(u32 *)(ptr+4) == 0)
198 break;
199 ptr++;
200 }
201 ptr+=4;
202 return (((unsigned int)ptr+511)/512)*512;
203 }
204
cpuid(u32 op,u32 regs[4])205 static inline void cpuid(u32 op, u32 regs[4])
206 {
207 __asm__ volatile (
208 "mov %%ebx, %%edi\n"
209 "cpuid\n"
210 "xchg %%edi, %%ebx\n"
211 : "=a"(regs[0]), "=D"(regs[1]), "=c"(regs[2]), "=d"(regs[3])
212 : "a"(op)
213 );
214 }
215
216 enum cpuid_regs {
217 CR_EAX = 0,
218 CR_ECX,
219 CR_EDX,
220 CR_EBX
221 };
222
mid_identify_cpu(void)223 int mid_identify_cpu(void)
224 {
225 u32 regs[4];
226
227 cpuid(1, regs);
228
229 switch ( regs[CR_EAX] & CPUID_MASK ) {
230
231 case PENWELL_FAMILY:
232 return MID_CPU_CHIP_PENWELL;
233 case CLOVERVIEW_FAMILY:
234 return MID_CPU_CHIP_CLOVERVIEW;
235 case VALLEYVIEW2_FAMILY:
236 return MID_CPU_CHIP_VALLEYVIEW2;
237 case TANGIER_FAMILY:
238 return MID_CPU_CHIP_TANGIER;
239 case ANNIEDALE_FAMILY:
240 return MID_CPU_CHIP_ANNIEDALE;
241 default:
242 return MID_CPU_CHIP_OTHER;
243 }
244 }
245
setup_spi(void)246 static void setup_spi(void)
247 {
248 if (!(*(int *)SPI_TYPE)) {
249 switch ( mid_identify_cpu() ) {
250
251 case MID_CPU_CHIP_PENWELL:
252 *(int *)SPI_TYPE = SPI_1;
253 bs_printk("PNW detected\n");
254 break;
255
256 case MID_CPU_CHIP_CLOVERVIEW:
257 *(int *)SPI_TYPE = SPI_1;
258 bs_printk("CLV detected\n");
259 break;
260
261 case MID_CPU_CHIP_TANGIER:
262 *(int *)SPI_TYPE = SPI_2;
263 bs_printk("MRD detected\n");
264 break;
265
266 case MID_CPU_CHIP_ANNIEDALE:
267 *(int *)SPI_TYPE = SPI_2;
268 bs_printk("ANN detected\n");
269 break;
270
271 case MID_CPU_CHIP_VALLEYVIEW2:
272 case MID_CPU_CHIP_OTHER:
273 default:
274 no_uart_used = 1;
275 }
276 }
277 }
278
setup_gdt(void)279 static void setup_gdt(void)
280 {
281 static const u64 boot_gdt[] __attribute__((aligned(16))) = {
282 /* CS: code, read/execute, 4 GB, base 0 */
283 [GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
284 /* DS: data, read/write, 4 GB, base 0 */
285 [GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
286 };
287 static struct gdt_ptr gdt;
288
289 gdt.len = sizeof(boot_gdt)-1;
290 gdt.ptr = (u32)&boot_gdt;
291
292 asm volatile("lgdtl %0" : : "m" (gdt));
293 }
294
setup_idt(void)295 static void setup_idt(void)
296 {
297 static const struct gdt_ptr null_idt = {0, 0};
298 asm volatile("lidtl %0" : : "m" (null_idt));
299 }
300
vxe_fw_setup(void)301 static void vxe_fw_setup(void)
302 {
303 u8 *vxe_fw_image;
304 u32 vxe_fw_size;
305 u32 vxe_fw_load_adrs;
306
307 vxe_fw_size = *(u32*)VXE_FW_SIZE_OFFSET;
308 /* do we have a VXE FW image? */
309 if (vxe_fw_size == 0)
310 return;
311
312 /* Do we have enough room to load the image? */
313 if (vxe_fw_size > imr6_toc.entries[IMR_TOC_ENTRY_VXE_FW].size) {
314 bs_printk("FATAL ERROR: VXE FW image size is too large for IMR\n");
315 FATAL_HANG();
316 }
317
318 vxe_fw_image = (u8 *)(
319 BZIMAGE_OFFSET
320 + *(u32 *)BZIMAGE_SIZE_OFFSET
321 + *(u32 *)INITRD_SIZE_OFFSET
322 );
323
324 vxe_fw_load_adrs = IMR6_START_ADDRESS + imr6_toc.entries[IMR_TOC_ENTRY_VXE_FW].start_offset;
325 memcpy((u8 *)vxe_fw_load_adrs, vxe_fw_image, vxe_fw_size);
326 }
327
load_imr_toc(u32 imr,u32 imrsize,imr_toc_t * toc,u32 tocsize)328 static void load_imr_toc(u32 imr, u32 imrsize, imr_toc_t *toc, u32 tocsize)
329 {
330 if (imr == 0 || imrsize == 0 || toc == NULL || tocsize == 0 || imrsize < tocsize )
331 {
332 bs_printk("FATAL ERROR: TOC size is too large for IMR\n");
333 FATAL_HANG();
334 }
335 memcpy((u8 *)imr, (u8 *)toc, tocsize);
336 }
337
338
xen_multiboot_setup(void)339 static u32 xen_multiboot_setup(void)
340 {
341 u32 *magic, *xen_image, i;
342 char *src, *dst;
343 u32 xen_size;
344 u32 xen_jump_adrs;
345 static module_t modules[3];
346 static multiboot_info_t mb = {
347 .flags = MBI_CMDLINE | MBI_MODULES | MBI_MEMMAP | MBI_DRIVES,
348 .mmap_addr = (u32)mb_mmap,
349 .mods_count = 3,
350 .mods_addr = (u32)modules,
351 };
352
353 xen_size = *(u32 *)XEN_SIZE_OFFSET;
354 /* do we have a xen image? */
355 if (xen_size == 0) {
356 return 0;
357 }
358
359 /* Compute the actual offset of the Xen image */
360 xen_image = (u32*)(
361 BZIMAGE_OFFSET
362 + *(u32 *)BZIMAGE_SIZE_OFFSET
363 + *(u32 *)INITRD_SIZE_OFFSET
364 + *(u32 *)VXE_FW_SIZE_OFFSET
365 + *(u32 *)SEC_PLAT_SVCS_SIZE_OFFSET
366 );
367
368 /* the multiboot signature should be located in the first 8192 bytes */
369 for (magic = xen_image; magic < xen_image + 2048; magic++)
370 if (*magic == MULTIBOOT_HEADER_MAGIC)
371 break;
372 if (*magic != MULTIBOOT_HEADER_MAGIC) {
373 return 0;
374 }
375
376 mb.cmdline = (u32)strnchr((char *)CMDLINE_OFFSET, '$', CMDLINE_SIZE) + 1;
377 dst = (char *)mb.cmdline + strnlen((const char *)mb.cmdline, CMDLINE_SIZE) - 1;
378 *dst = ' ';
379 dst++;
380 src = (char *)CMDLINE_OFFSET;
381 for (i = 0 ;i < strnlen((const char *)CMDLINE_OFFSET, CMDLINE_SIZE);i++) {
382 if (!strncmp(src, "capfreq=", 8)) {
383 while (*src != ' ' && *src != 0) {
384 *dst = *src;
385 dst++;
386 src++;
387 }
388 break;
389 }
390 src++;
391 }
392
393 /* fill in the multiboot module information: dom0 kernel + initrd + Platform Services Image */
394 modules[0].mod_start = BZIMAGE_OFFSET;
395 modules[0].mod_end = BZIMAGE_OFFSET + *(u32 *)BZIMAGE_SIZE_OFFSET;
396 modules[0].string = CMDLINE_OFFSET;
397
398 modules[1].mod_start = modules[0].mod_end ;
399 modules[1].mod_end = modules[1].mod_start + *(u32 *)INITRD_SIZE_OFFSET;
400 modules[1].string = 0;
401
402 modules[2].mod_start = sps_load_adrs;
403 modules[2].mod_end = modules[2].mod_start + *(u32 *)SEC_PLAT_SVCS_SIZE_OFFSET;
404 modules[2].string = 0;
405
406 mb.drives_addr = IMR6_START_ADDRESS + imr6_toc.entries[IMR_TOC_ENTRY_XEN_EXTRA].start_offset;
407 mb.drives_length = imr6_toc.entries[IMR_TOC_ENTRY_XEN_EXTRA].size;
408
409 for(i = 0; i < E820MAX; i++)
410 if (!mb_mmap[i].size)
411 break;
412 mb.mmap_length = i * sizeof(memory_map_t);
413
414 /* relocate xen to start address */
415 if (xen_size > imr7_size) {
416 bs_printk("FATAL ERROR: Xen image size is too large for IMR\n");
417 FATAL_HANG();
418 }
419 xen_jump_adrs = IMR7_START_ADDRESS;
420 memcpy((u8 *)xen_jump_adrs, xen_image, xen_size);
421
422 mb_info = (u32)&mb;
423 mb_magic = MULTIBOOT_BOOTLOADER_MAGIC;
424
425 return (u32)xen_jump_adrs;
426 }
427
sec_plat_svcs_setup(void)428 static void sec_plat_svcs_setup(void)
429 {
430 u8 *sps_image;
431 u32 sps_size;
432
433 sps_size = PAGE_ALIGN_FWD(*(u32*)SEC_PLAT_SVCS_SIZE_OFFSET);
434 /* do we have a SPS image? */
435 if (sps_size == 0)
436 return;
437
438 /* Do we have enough room to load the image? */
439 if (sps_size > imr7_size) {
440 bs_printk("FATAL ERROR: SPS image size is too large for IMR\n");
441 FATAL_HANG();
442 }
443
444 sps_image = (u8 *)(
445 BZIMAGE_OFFSET
446 + *(u32 *)BZIMAGE_SIZE_OFFSET
447 + *(u32 *)INITRD_SIZE_OFFSET
448 + *(u32 *)VXE_FW_SIZE_OFFSET
449 );
450
451 /* load SPS image (with assumed CHAABI Mailboxes suffixed) */
452 /* at bottom of IMR7 */
453 /* Must be page-aligned or Xen will panic */
454 sps_load_adrs = PAGE_ALIGN_BACK(IMR7_START_ADDRESS + imr7_size - sps_size);
455 memcpy((u8 *)sps_load_adrs, sps_image, sps_size);
456
457 /* reduce remaining size for Xen image size check */
458 imr7_size -= sps_size;
459 }
460
bootstub(void)461 int bootstub(void)
462 {
463 u32 jmp;
464 struct boot_img_hdr *aosp = (struct boot_img_hdr *)AOSP_HEADER_ADDRESS;
465 struct boot_params *bp = (struct boot_params *)BOOT_PARAMS_OFFSET;
466 struct setup_header *sh;
467 u32 imr_size;
468 int nr_entries;
469
470 if (is_image_aosp(aosp->magic)) {
471 sh = (struct setup_header *)((unsigned int)aosp->kernel_addr + 0x1F1);
472 /* disable the bs_printk through SPI/UART */
473 *(int *)SPI_UART_SUPPRESSION = 1;
474 *(int *)SPI_TYPE = SPI_2;
475 } else
476 sh = (struct setup_header *)SETUP_HEADER_OFFSET;
477
478 setup_idt();
479 setup_gdt();
480 setup_spi();
481 bs_printk("Bootstub Version: 1.4 ...\n");
482
483 memset(bp, 0, sizeof (struct boot_params));
484
485 if (mid_identify_cpu() == MID_CPU_CHIP_VALLEYVIEW2) {
486 nr_entries = get_e820_by_bios(bp->e820_map);
487 bp->e820_entries = (nr_entries > 0) ? nr_entries : 0;
488 } else {
489 sfi_setup_mmap(bp, mb_mmap);
490 }
491
492 if ((mid_identify_cpu() != MID_CPU_CHIP_TANGIER) && (mid_identify_cpu() != MID_CPU_CHIP_ANNIEDALE)) {
493 if ((IMR6_END_ADDRESS > IMR6_START_ADDRESS) && (IMR7_END_ADDRESS > IMR7_START_ADDRESS)) {
494 imr_size = PAGE_ALIGN_FWD(IMR6_END_ADDRESS - IMR6_START_ADDRESS);
495 load_imr_toc(IMR6_START_ADDRESS, imr_size, &imr6_toc, sizeof(imr6_toc));
496 vxe_fw_setup();
497 sfi_add_e820_entry(bp, mb_mmap, IMR6_START_ADDRESS, imr_size, E820_RESERVED);
498
499 imr7_size = PAGE_ALIGN_FWD(IMR7_END_ADDRESS - IMR7_START_ADDRESS);
500 sec_plat_svcs_setup();
501 sfi_add_e820_entry(bp, mb_mmap, IMR7_START_ADDRESS, imr7_size, E820_RESERVED);
502 } else {
503 *(u32 *)XEN_SIZE_OFFSET = 0; /* Don't allow Xen to boot */
504 }
505 } else {
506 *(u32 *)XEN_SIZE_OFFSET = 0; /* Don't allow Xen to boot */
507 }
508
509 setup_boot_params(bp, sh);
510
511 jmp = xen_multiboot_setup();
512 if (!jmp) {
513 bs_printk("Using bzImage to boot\n");
514 jmp = bzImage_setup(bp, sh);
515 } else
516 bs_printk("Using multiboot image to boot\n");
517
518 bs_printk("Jump to kernel 32bit entry\n");
519 return jmp;
520 }
521
bs_printk(const char * str)522 void bs_printk(const char *str)
523 {
524 if (*(int *)SPI_UART_SUPPRESSION)
525 return;
526
527 switch (*(int *)SPI_TYPE) {
528
529 case SPI_1:
530 bs_spi_printk(str);
531 break;
532
533 case SPI_2:
534 bs_ssp_printk(str);
535 break;
536 }
537 }
538