/arch/x86/kernel/ |
D | doublefault.c | 22 unsigned long gdt, tss; in doublefault_fn() local 25 gdt = gdt_desc.address; in doublefault_fn() 27 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); in doublefault_fn() 29 if (ptr_ok(gdt)) { in doublefault_fn() 30 gdt += GDT_ENTRY_TSS << 3; in doublefault_fn() 31 tss = get_desc_base((struct desc_struct *)gdt); in doublefault_fn()
|
D | setup_percpu.c | 157 struct desc_struct gdt; in setup_percpu_segment() local 159 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, in setup_percpu_segment() 161 gdt.s = 1; in setup_percpu_segment() 163 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); in setup_percpu_segment()
|
D | apm_32.c | 606 struct desc_struct *gdt; in __apm_bios_call() local 611 gdt = get_cpu_gdt_table(cpu); in __apm_bios_call() 612 save_desc_40 = gdt[0x40 / 8]; in __apm_bios_call() 613 gdt[0x40 / 8] = bad_bios_desc; in __apm_bios_call() 622 gdt[0x40 / 8] = save_desc_40; in __apm_bios_call() 682 struct desc_struct *gdt; in __apm_bios_call_simple() local 687 gdt = get_cpu_gdt_table(cpu); in __apm_bios_call_simple() 688 save_desc_40 = gdt[0x40 / 8]; in __apm_bios_call_simple() 689 gdt[0x40 / 8] = bad_bios_desc; in __apm_bios_call_simple() 697 gdt[0x40 / 8] = save_desc_40; in __apm_bios_call_simple() [all …]
|
D | head_32.S | 458 movl %eax,%ss # after changing gdt.
|
/arch/x86/purgatory/ |
D | setup-x86_64.S | 20 lgdt gdt(%rip) 39 gdt: /* 0x00 unusable segment 43 .word gdt_end - gdt - 1 44 .quad gdt
|
D | entry64.S | 21 lgdt gdt(%rip) 85 gdt: label 90 .word gdt_end - gdt - 1 91 .quad gdt
|
/arch/x86/boot/ |
D | pm.c | 84 static struct gdt_ptr gdt; in setup_gdt() local 86 gdt.len = sizeof(boot_gdt)-1; in setup_gdt() 87 gdt.ptr = (u32)&boot_gdt + (ds() << 4); in setup_gdt() 89 asm volatile("lgdtl %0" : : "m" (gdt)); in setup_gdt()
|
/arch/x86/include/asm/ |
D | desc.h | 43 struct desc_struct gdt[GDT_ENTRIES]; member 50 return per_cpu(gdt_page, cpu).gdt; in get_cpu_gdt_table() 130 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) in native_write_gdt_entry() argument 137 default: size = sizeof(*gdt); break; in native_write_gdt_entry() 140 memcpy(&gdt[entry], desc, size); in native_write_gdt_entry() 247 struct desc_struct *gdt = get_cpu_gdt_table(cpu); in native_load_tls() local 251 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; in native_load_tls()
|
D | lguest.h | 66 struct desc_struct gdt[GDT_ENTRIES]; member
|
/arch/x86/boot/compressed/ |
D | head_64.S | 122 leal gdt(%ebp), %eax 123 movl %eax, gdt+2(%ebp) 124 lgdt gdt(%ebp) 442 gdt: label 443 .word gdt_end - gdt 444 .long gdt
|
D | eboot.c | 1071 struct desc_ptr *gdt = NULL; in efi_main() local 1102 sizeof(*gdt), (void **)&gdt); in efi_main() 1108 gdt->size = 0x800; in efi_main() 1109 status = efi_low_alloc(sys_table, gdt->size, 8, in efi_main() 1110 (unsigned long *)&gdt->address); in efi_main() 1141 memset((char *)gdt->address, 0x0, gdt->size); in efi_main() 1142 desc = (struct desc_struct *)gdt->address; in efi_main() 1195 asm volatile ("lgdt %0" : : "m" (*gdt)); in efi_main()
|
/arch/x86/realmode/rm/ |
D | trampoline_64.S | 116 * the new gdt/idt that has __KERNEL_CS with CS.L = 1. 133 .short tr_gdt_end - tr_gdt - 1 # gdt limit
|
/arch/x86/kernel/acpi/ |
D | wakeup_32.S | 22 # reload the gdt, as we need the full 32 bit address 31 # and restore the stack ... but you need gdt for this to work
|
/arch/x86/xen/ |
D | smp.c | 403 struct desc_struct *gdt; in cpu_initialize_context() local 415 gdt = get_cpu_gdt_table(cpu); in cpu_initialize_context() 436 BUG_ON((unsigned long)gdt & ~PAGE_MASK); in cpu_initialize_context() 438 gdt_mfn = arbitrary_virt_to_mfn(gdt); in cpu_initialize_context() 439 make_lowmem_page_readonly(gdt); in cpu_initialize_context()
|
D | enlighten.c | 697 struct desc_struct *gdt; in load_TLS_descriptor() local 706 gdt = get_cpu_gdt_table(cpu); in load_TLS_descriptor() 707 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); in load_TLS_descriptor()
|
/arch/x86/lguest/ |
D | boot.c | 334 struct desc_struct *gdt = (void *)desc->address; in lguest_load_gdt() local 337 hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); in lguest_load_gdt()
|
/arch/x86/include/uapi/asm/ |
D | kvm.h | 149 struct kvm_dtable gdt, idt; member
|
/arch/x86/kernel/cpu/ |
D | common.c | 98 DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = {
|
/arch/x86/kvm/ |
D | vmx.c | 2033 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); in reload_tss() local 2036 descs = (void *)gdt->address; in reload_tss() 2097 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); in segment_base() local 2105 table_base = gdt->address; in segment_base() 2328 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); in vmx_vcpu_load() local 2338 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ in vmx_vcpu_load()
|
D | svm.c | 752 struct desc_struct *gdt; in svm_hardware_enable() local 774 gdt = (struct desc_struct *)gdt_descr.address; in svm_hardware_enable() 775 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); in svm_hardware_enable()
|
D | x86.c | 7185 sregs->gdt.limit = dt.size; in kvm_arch_vcpu_ioctl_get_sregs() 7186 sregs->gdt.base = dt.address; in kvm_arch_vcpu_ioctl_get_sregs() 7275 dt.size = sregs->gdt.limit; in kvm_arch_vcpu_ioctl_set_sregs() 7276 dt.address = sregs->gdt.base; in kvm_arch_vcpu_ioctl_set_sregs()
|
/arch/x86/events/ |
D | core.c | 2332 desc = raw_cpu_ptr(gdt_page.gdt) + idx; in get_segment_base()
|