1 /*
2 * bootstub 32 bit entry setting routings
3 *
4 * Copyright (C) 2008-2010 Intel Corporation.
5 * Author: Alek Du <alek.du@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 */
21
22 #include "types.h"
23 #include "bootstub.h"
24 #include "bootparam.h"
25 #include "spi-uart.h"
26 #include "ssp-uart.h"
27 #include "mb.h"
28 #include "sfi.h"
29 #include <bootimg.h>
30
31 #include <stdint.h>
32 #include <stddef.h>
33 #include "imr_toc.h"
34
35 #define PAGE_SIZE_MASK 0xFFF
36 #define MASK_1K 0x3FF
37 #define PAGE_ALIGN_FWD(x) ((x + PAGE_SIZE_MASK) & ~PAGE_SIZE_MASK)
38 #define PAGE_ALIGN_BACK(x) ((x) & ~PAGE_SIZE_MASK)
39
40 #define IMR_START_ADDRESS(x) (((x) & 0xFFFFFFFC) << 8)
41 #define IMR_END_ADDRESS(x) ((x == 0) ? (x) : ((((x) & 0xFFFFFFFC) << 8) | MASK_1K))
42
43 #define IMR6_START_ADDRESS IMR_START_ADDRESS(*((u32 *)0xff108160))
44 #define IMR6_END_ADDRESS IMR_END_ADDRESS(*((u32 *)0xff108164))
45 #define IMR7_START_ADDRESS IMR_START_ADDRESS(*((u32 *)0xff108170))
46 #define IMR7_END_ADDRESS IMR_END_ADDRESS(*((u32 *)0xff108174))
47
48 #define FATAL_HANG() { asm("cli"); while (1) { asm("nop"); } }
49
50 extern int no_uart_used;
51
52 extern imr_toc_t imr6_toc;
53 static u32 imr7_size;
54
55 static u32 sps_load_adrs;
56
57 static memory_map_t mb_mmap[E820MAX];
58 u32 mb_magic, mb_info;
59
60 struct gdt_ptr {
61 u16 len;
62 u32 ptr;
63 } __attribute__((packed));
64
memcpy(void * dest,const void * src,size_t count)65 static void *memcpy(void *dest, const void *src, size_t count)
66 {
67 char *tmp = dest;
68 const char *s = src;
69 size_t _count = count / 4;
70
71 while (_count--) {
72 *(long *)tmp = *(long *)s;
73 tmp += 4;
74 s += 4;
75 }
76 count %= 4;
77 while (count--)
78 *tmp++ = *s++;
79 return dest;
80 }
81
memset(void * s,unsigned char c,size_t count)82 static void *memset(void *s, unsigned char c, size_t count)
83 {
84 char *xs = s;
85 size_t _count = count / 4;
86 unsigned long _c = c << 24 | c << 16 | c << 8 | c;
87
88 while (_count--) {
89 *(long *)xs = _c;
90 xs += 4;
91 }
92 count %= 4;
93 while (count--)
94 *xs++ = c;
95 return s;
96 }
97
strnlen(const char * s,size_t maxlen)98 static size_t strnlen(const char *s, size_t maxlen)
99 {
100 const char *es = s;
101 while (*es && maxlen) {
102 es++;
103 maxlen--;
104 }
105
106 return (es - s);
107 }
108
strnchr(const char * s,int c,size_t maxlen)109 static const char *strnchr(const char *s, int c, size_t maxlen)
110 {
111 int i;
112 for (i = 0; i < maxlen && *s != c; s++, i++)
113 ;
114 return s;
115 }
116
strncmp(const char * cs,const char * ct,size_t count)117 int strncmp(const char *cs, const char *ct, size_t count)
118 {
119 unsigned char c1, c2;
120
121 while (count) {
122 c1 = *cs++;
123 c2 = *ct++;
124 if (c1 != c2)
125 return c1 < c2 ? -1 : 1;
126 if (!c1)
127 break;
128 count--;
129 }
130 return 0;
131 }
132
is_image_aosp(unsigned char * magic)133 static inline int is_image_aosp(unsigned char *magic)
134 {
135 return !strncmp((char *)magic, (char *)BOOT_MAGIC, sizeof(BOOT_MAGIC)-1);
136 }
137
setup_boot_params(struct boot_params * bp,struct setup_header * sh)138 static void setup_boot_params(struct boot_params *bp, struct setup_header *sh)
139 {
140 bp->screen_info.orig_video_mode = 0;
141 bp->screen_info.orig_video_lines = 0;
142 bp->screen_info.orig_video_cols = 0;
143 bp->alt_mem_k = 128*1024; // hard coded 128M mem here, since SFI will override it
144 memcpy(&bp->hdr, sh, sizeof (struct setup_header));
145 bp->hdr.type_of_loader = 0xff; //bootstub is unknown bootloader for kernel :)
146 bp->hdr.hardware_subarch = X86_SUBARCH_MRST;
147 }
148
bzImage_setup(struct boot_params * bp,struct setup_header * sh)149 static u32 bzImage_setup(struct boot_params *bp, struct setup_header *sh)
150 {
151 void *cmdline = (void *)BOOT_CMDLINE_OFFSET;
152 struct boot_img_hdr *aosp = (struct boot_img_hdr *)AOSP_HEADER_ADDRESS;
153 size_t cmdline_len, extra_cmdline_len;
154 u8 *initramfs, *ptr;
155
156 if (is_image_aosp(aosp->magic)) {
157 ptr = (u8*)aosp->kernel_addr;
158 cmdline_len = strnlen((const char *)aosp->cmdline, sizeof(aosp->cmdline));
159 extra_cmdline_len = strnlen((const char *)aosp->extra_cmdline, sizeof(aosp->extra_cmdline));
160
161 /*
162 * Copy the command + extra command line to be after bootparams
163 * so that it won't be overwritten by the kernel executable.
164 */
165 memset(cmdline, 0, sizeof(aosp->cmdline) + sizeof(aosp->extra_cmdline));
166 memcpy(cmdline, (const void *)aosp->cmdline, cmdline_len);
167 memcpy(cmdline + cmdline_len, (const void *)aosp->extra_cmdline, extra_cmdline_len);
168
169 bp->hdr.ramdisk_size = aosp->ramdisk_size;
170
171 initramfs = (u8 *)aosp->ramdisk_addr;
172 } else {
173 ptr = (u8*)BZIMAGE_OFFSET;
174 cmdline_len = strnlen((const char *)CMDLINE_OFFSET, CMDLINE_SIZE);
175 /*
176 * Copy the command line to be after bootparams so that it won't be
177 * overwritten by the kernel executable.
178 */
179 memset(cmdline, 0, CMDLINE_SIZE);
180 memcpy(cmdline, (const void *)CMDLINE_OFFSET, cmdline_len);
181
182 bp->hdr.ramdisk_size = *(u32 *)INITRD_SIZE_OFFSET;
183
184 initramfs = (u8 *)BZIMAGE_OFFSET + *(u32 *)BZIMAGE_SIZE_OFFSET;
185 }
186
187 bp->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET;
188 bp->hdr.cmdline_size = cmdline_len;
189 #ifndef BUILD_RAMDUMP
190 bp->hdr.ramdisk_image = (bp->alt_mem_k*1024 - bp->hdr.ramdisk_size) & 0xFFFFF000;
191
192 if (*initramfs) {
193 bs_printk("Relocating initramfs to high memory ...\n");
194 memcpy((u8*)bp->hdr.ramdisk_image, initramfs, bp->hdr.ramdisk_size);
195 } else {
196 bs_printk("Won't relocate initramfs, are you in SLE?\n");
197 }
198 #else
199 bp->hdr.ramdisk_image = (u32) initramfs;
200 #endif
201
202 while (1){
203 if (*(u32 *)ptr == SETUP_SIGNATURE && *(u32 *)(ptr+4) == 0)
204 break;
205 ptr++;
206 }
207 ptr+=4;
208 return (((unsigned int)ptr+511)/512)*512;
209 }
210
cpuid(u32 op,u32 regs[4])211 static inline void cpuid(u32 op, u32 regs[4])
212 {
213 __asm__ volatile (
214 "mov %%ebx, %%edi\n"
215 "cpuid\n"
216 "xchg %%edi, %%ebx\n"
217 : "=a"(regs[0]), "=D"(regs[1]), "=c"(regs[2]), "=d"(regs[3])
218 : "a"(op)
219 );
220 }
221
222 enum cpuid_regs {
223 CR_EAX = 0,
224 CR_ECX,
225 CR_EDX,
226 CR_EBX
227 };
228
mid_identify_cpu(void)229 int mid_identify_cpu(void)
230 {
231 u32 regs[4];
232
233 cpuid(1, regs);
234
235 switch ( regs[CR_EAX] & CPUID_MASK ) {
236
237 case PENWELL_FAMILY:
238 return MID_CPU_CHIP_PENWELL;
239 case CLOVERVIEW_FAMILY:
240 return MID_CPU_CHIP_CLOVERVIEW;
241 case VALLEYVIEW2_FAMILY:
242 return MID_CPU_CHIP_VALLEYVIEW2;
243 case TANGIER_FAMILY:
244 return MID_CPU_CHIP_TANGIER;
245 case ANNIEDALE_FAMILY:
246 return MID_CPU_CHIP_ANNIEDALE;
247 default:
248 return MID_CPU_CHIP_OTHER;
249 }
250 }
251
setup_spi(void)252 static void setup_spi(void)
253 {
254 if (!(*(int *)SPI_TYPE)) {
255 switch ( mid_identify_cpu() ) {
256
257 case MID_CPU_CHIP_PENWELL:
258 *(int *)SPI_TYPE = SPI_1;
259 bs_printk("PNW detected\n");
260 break;
261
262 case MID_CPU_CHIP_CLOVERVIEW:
263 *(int *)SPI_TYPE = SPI_1;
264 bs_printk("CLV detected\n");
265 break;
266
267 case MID_CPU_CHIP_TANGIER:
268 *(int *)SPI_TYPE = SPI_2;
269 bs_printk("MRD detected\n");
270 break;
271
272 case MID_CPU_CHIP_ANNIEDALE:
273 *(int *)SPI_TYPE = SPI_2;
274 bs_printk("ANN detected\n");
275 break;
276
277 case MID_CPU_CHIP_VALLEYVIEW2:
278 case MID_CPU_CHIP_OTHER:
279 default:
280 no_uart_used = 1;
281 }
282 }
283 }
284
setup_gdt(void)285 static void setup_gdt(void)
286 {
287 static const u64 boot_gdt[] __attribute__((aligned(16))) = {
288 /* CS: code, read/execute, 4 GB, base 0 */
289 [GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
290 /* DS: data, read/write, 4 GB, base 0 */
291 [GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
292 };
293 static struct gdt_ptr gdt;
294
295 gdt.len = sizeof(boot_gdt)-1;
296 gdt.ptr = (u32)&boot_gdt;
297
298 asm volatile("lgdtl %0" : : "m" (gdt));
299 }
300
setup_idt(void)301 static void setup_idt(void)
302 {
303 static const struct gdt_ptr null_idt = {0, 0};
304 asm volatile("lidtl %0" : : "m" (null_idt));
305 }
306
vxe_fw_setup(void)307 static void vxe_fw_setup(void)
308 {
309 u8 *vxe_fw_image;
310 u32 vxe_fw_size;
311 u32 vxe_fw_load_adrs;
312
313 vxe_fw_size = *(u32*)VXE_FW_SIZE_OFFSET;
314 /* do we have a VXE FW image? */
315 if (vxe_fw_size == 0)
316 return;
317
318 /* Do we have enough room to load the image? */
319 if (vxe_fw_size > imr6_toc.entries[IMR_TOC_ENTRY_VXE_FW].size) {
320 bs_printk("FATAL ERROR: VXE FW image size is too large for IMR\n");
321 FATAL_HANG();
322 }
323
324 vxe_fw_image = (u8 *)(
325 BZIMAGE_OFFSET
326 + *(u32 *)BZIMAGE_SIZE_OFFSET
327 + *(u32 *)INITRD_SIZE_OFFSET
328 );
329
330 vxe_fw_load_adrs = IMR6_START_ADDRESS + imr6_toc.entries[IMR_TOC_ENTRY_VXE_FW].start_offset;
331 memcpy((u8 *)vxe_fw_load_adrs, vxe_fw_image, vxe_fw_size);
332 }
333
load_imr_toc(u32 imr,u32 imrsize,imr_toc_t * toc,u32 tocsize)334 static void load_imr_toc(u32 imr, u32 imrsize, imr_toc_t *toc, u32 tocsize)
335 {
336 if (imr == 0 || imrsize == 0 || toc == NULL || tocsize == 0 || imrsize < tocsize )
337 {
338 bs_printk("FATAL ERROR: TOC size is too large for IMR\n");
339 FATAL_HANG();
340 }
341 memcpy((u8 *)imr, (u8 *)toc, tocsize);
342 }
343
344
xen_multiboot_setup(void)345 static u32 xen_multiboot_setup(void)
346 {
347 u32 *magic, *xen_image, i;
348 char *src, *dst;
349 u32 xen_size;
350 u32 xen_jump_adrs;
351 static module_t modules[3];
352 static multiboot_info_t mb = {
353 .flags = MBI_CMDLINE | MBI_MODULES | MBI_MEMMAP | MBI_DRIVES,
354 .mmap_addr = (u32)mb_mmap,
355 .mods_count = 3,
356 .mods_addr = (u32)modules,
357 };
358
359 xen_size = *(u32 *)XEN_SIZE_OFFSET;
360 /* do we have a xen image? */
361 if (xen_size == 0) {
362 return 0;
363 }
364
365 /* Compute the actual offset of the Xen image */
366 xen_image = (u32*)(
367 BZIMAGE_OFFSET
368 + *(u32 *)BZIMAGE_SIZE_OFFSET
369 + *(u32 *)INITRD_SIZE_OFFSET
370 + *(u32 *)VXE_FW_SIZE_OFFSET
371 + *(u32 *)SEC_PLAT_SVCS_SIZE_OFFSET
372 );
373
374 /* the multiboot signature should be located in the first 8192 bytes */
375 for (magic = xen_image; magic < xen_image + 2048; magic++)
376 if (*magic == MULTIBOOT_HEADER_MAGIC)
377 break;
378 if (*magic != MULTIBOOT_HEADER_MAGIC) {
379 return 0;
380 }
381
382 mb.cmdline = (u32)strnchr((char *)CMDLINE_OFFSET, '$', CMDLINE_SIZE) + 1;
383 dst = (char *)mb.cmdline + strnlen((const char *)mb.cmdline, CMDLINE_SIZE) - 1;
384 *dst = ' ';
385 dst++;
386 src = (char *)CMDLINE_OFFSET;
387 for (i = 0 ;i < strnlen((const char *)CMDLINE_OFFSET, CMDLINE_SIZE);i++) {
388 if (!strncmp(src, "capfreq=", 8)) {
389 while (*src != ' ' && *src != 0) {
390 *dst = *src;
391 dst++;
392 src++;
393 }
394 break;
395 }
396 src++;
397 }
398
399 /* fill in the multiboot module information: dom0 kernel + initrd + Platform Services Image */
400 modules[0].mod_start = BZIMAGE_OFFSET;
401 modules[0].mod_end = BZIMAGE_OFFSET + *(u32 *)BZIMAGE_SIZE_OFFSET;
402 modules[0].string = CMDLINE_OFFSET;
403
404 modules[1].mod_start = modules[0].mod_end ;
405 modules[1].mod_end = modules[1].mod_start + *(u32 *)INITRD_SIZE_OFFSET;
406 modules[1].string = 0;
407
408 modules[2].mod_start = sps_load_adrs;
409 modules[2].mod_end = modules[2].mod_start + *(u32 *)SEC_PLAT_SVCS_SIZE_OFFSET;
410 modules[2].string = 0;
411
412 mb.drives_addr = IMR6_START_ADDRESS + imr6_toc.entries[IMR_TOC_ENTRY_XEN_EXTRA].start_offset;
413 mb.drives_length = imr6_toc.entries[IMR_TOC_ENTRY_XEN_EXTRA].size;
414
415 for(i = 0; i < E820MAX; i++)
416 if (!mb_mmap[i].size)
417 break;
418 mb.mmap_length = i * sizeof(memory_map_t);
419
420 /* relocate xen to start address */
421 if (xen_size > imr7_size) {
422 bs_printk("FATAL ERROR: Xen image size is too large for IMR\n");
423 FATAL_HANG();
424 }
425 xen_jump_adrs = IMR7_START_ADDRESS;
426 memcpy((u8 *)xen_jump_adrs, xen_image, xen_size);
427
428 mb_info = (u32)&mb;
429 mb_magic = MULTIBOOT_BOOTLOADER_MAGIC;
430
431 return (u32)xen_jump_adrs;
432 }
433
sec_plat_svcs_setup(void)434 static void sec_plat_svcs_setup(void)
435 {
436 u8 *sps_image;
437 u32 sps_size;
438
439 sps_size = PAGE_ALIGN_FWD(*(u32*)SEC_PLAT_SVCS_SIZE_OFFSET);
440 /* do we have a SPS image? */
441 if (sps_size == 0)
442 return;
443
444 /* Do we have enough room to load the image? */
445 if (sps_size > imr7_size) {
446 bs_printk("FATAL ERROR: SPS image size is too large for IMR\n");
447 FATAL_HANG();
448 }
449
450 sps_image = (u8 *)(
451 BZIMAGE_OFFSET
452 + *(u32 *)BZIMAGE_SIZE_OFFSET
453 + *(u32 *)INITRD_SIZE_OFFSET
454 + *(u32 *)VXE_FW_SIZE_OFFSET
455 );
456
457 /* load SPS image (with assumed CHAABI Mailboxes suffixed) */
458 /* at bottom of IMR7 */
459 /* Must be page-aligned or Xen will panic */
460 sps_load_adrs = PAGE_ALIGN_BACK(IMR7_START_ADDRESS + imr7_size - sps_size);
461 memcpy((u8 *)sps_load_adrs, sps_image, sps_size);
462
463 /* reduce remaining size for Xen image size check */
464 imr7_size -= sps_size;
465 }
466
bootstub(void)467 int bootstub(void)
468 {
469 u32 jmp;
470 struct boot_img_hdr *aosp = (struct boot_img_hdr *)AOSP_HEADER_ADDRESS;
471 struct boot_params *bp = (struct boot_params *)BOOT_PARAMS_OFFSET;
472 struct setup_header *sh;
473 u32 imr_size;
474 int nr_entries;
475
476 if (is_image_aosp(aosp->magic)) {
477 sh = (struct setup_header *)((unsigned int)aosp->kernel_addr + \
478 (unsigned int)offsetof(struct boot_params,hdr));
479 /* disable the bs_printk through SPI/UART */
480 *(int *)SPI_UART_SUPPRESSION = 1;
481 *(int *)SPI_TYPE = SPI_2;
482 } else
483 sh = (struct setup_header *)SETUP_HEADER_OFFSET;
484
485 setup_idt();
486 setup_gdt();
487 setup_spi();
488 bs_printk("Bootstub Version: 1.4 ...\n");
489
490 memset(bp, 0, sizeof (struct boot_params));
491
492 if (mid_identify_cpu() == MID_CPU_CHIP_VALLEYVIEW2) {
493 nr_entries = get_e820_by_bios(bp->e820_map);
494 bp->e820_entries = (nr_entries > 0) ? nr_entries : 0;
495 } else {
496 sfi_setup_mmap(bp, mb_mmap);
497 }
498
499 if ((mid_identify_cpu() != MID_CPU_CHIP_TANGIER) && (mid_identify_cpu() != MID_CPU_CHIP_ANNIEDALE)) {
500 if ((IMR6_END_ADDRESS > IMR6_START_ADDRESS) && (IMR7_END_ADDRESS > IMR7_START_ADDRESS)) {
501 imr_size = PAGE_ALIGN_FWD(IMR6_END_ADDRESS - IMR6_START_ADDRESS);
502 load_imr_toc(IMR6_START_ADDRESS, imr_size, &imr6_toc, sizeof(imr6_toc));
503 vxe_fw_setup();
504 sfi_add_e820_entry(bp, mb_mmap, IMR6_START_ADDRESS, imr_size, E820_RESERVED);
505
506 imr7_size = PAGE_ALIGN_FWD(IMR7_END_ADDRESS - IMR7_START_ADDRESS);
507 sec_plat_svcs_setup();
508 sfi_add_e820_entry(bp, mb_mmap, IMR7_START_ADDRESS, imr7_size, E820_RESERVED);
509 } else {
510 *(u32 *)XEN_SIZE_OFFSET = 0; /* Don't allow Xen to boot */
511 }
512 } else {
513 *(u32 *)XEN_SIZE_OFFSET = 0; /* Don't allow Xen to boot */
514 }
515
516 setup_boot_params(bp, sh);
517
518 jmp = xen_multiboot_setup();
519 if (!jmp) {
520 bs_printk("Using bzImage to boot\n");
521 jmp = bzImage_setup(bp, sh);
522 } else
523 bs_printk("Using multiboot image to boot\n");
524
525 bs_printk("Jump to kernel 32bit entry\n");
526 return jmp;
527 }
528
bs_printk(const char * str)529 void bs_printk(const char *str)
530 {
531 if (*(int *)SPI_UART_SUPPRESSION)
532 return;
533
534 switch (*(int *)SPI_TYPE) {
535
536 case SPI_1:
537 bs_spi_printk(str);
538 break;
539
540 case SPI_2:
541 bs_ssp_printk(str);
542 break;
543 }
544 }
545