/arch/x86/boot/compressed/ |
D | efi_stub_32.S | 49 popl %ecx 50 movl %ecx, saved_return_addr(%edx) 52 popl %ecx 53 movl %ecx, efi_rt_function_ptr(%edx) 58 call *%ecx 70 movl efi_rt_function_ptr(%edx), %ecx 71 pushl %ecx 76 movl saved_return_addr(%edx), %ecx 77 pushl %ecx
|
D | head_32.S | 77 popl %ecx 78 movl %ecx, efi32_config(%esi) /* Handle */ 79 popl %ecx 80 movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */ 91 popl %ecx 93 pushl %ecx 98 popl %ecx 105 movl %ecx, efi32_config(%esi) /* Handle */ 194 movl $(_bss - startup_32), %ecx 195 shrl $2, %ecx [all …]
|
/arch/x86/math-emu/ |
D | reg_round.S | 124 movl PARAM4,%ecx 144 movl %ecx,%esi 145 andl CW_PC,%ecx 146 cmpl PR_64_BITS,%ecx 149 cmpl PR_53_BITS,%ecx 152 cmpl PR_24_BITS,%ecx 158 cmpl PR_RESERVED_BITS,%ecx 172 movl %esi,%ecx 173 andl CW_RC,%ecx 174 cmpl RC_RND,%ecx [all …]
|
D | wm_sqrt.S | 90 movl SIGL(%esi),%ecx 99 rcrl $1,%ecx 107 movl %ecx,FPU_fsqrt_arg_1 113 movl $0xaaaaaaaa,%ecx 114 mull %ecx 127 movl FPU_fsqrt_arg_2,%ecx /* ms word */ 136 shrl %ecx /* Doing this first will prevent a divide */ 139 movl %ecx,%edx /* msw of the arg / 2 */ 144 movl %ecx,%edx 149 movl %ecx,%edx [all …]
|
D | mul_Xsig.S | 34 movl PARAM2,%ecx 41 mull %ecx /* msl of b */ 45 mull %ecx /* msl of b */ 51 mull %ecx /* msl of b */ 74 movl PARAM2,%ecx 81 mull 4(%ecx) /* msl of b */ 85 mull (%ecx) /* lsl of b */ 91 mull 4(%ecx) /* msl of b */ 97 mull (%ecx) /* lsl of b */ 103 mull 4(%ecx) /* msl of b */ [all …]
|
D | reg_u_sub.S | 45 movl PARAM6,%ecx 46 subl PARAM7,%ecx /* exp1 - exp2 */ 155 xorl %ecx,%ecx 156 subl %edx,%ecx 157 movl %ecx,%edx 158 movl SIGL(%esi),%ecx 159 sbbl %ebx,%ecx 160 movl %ecx,%ebx 161 movl SIGH(%esi),%ecx 162 sbbl %eax,%ecx [all …]
|
D | round_Xsig.S | 51 bsrl %edx,%ecx /* get the required shift in %ecx */ 52 subl $31,%ecx 53 negl %ecx 54 subl %ecx,-4(%ebp) 122 bsrl %edx,%ecx /* get the required shift in %ecx */ 123 subl $31,%ecx 124 negl %ecx 125 subl %ecx,-4(%ebp)
|
D | reg_u_mul.S | 68 xorl %ecx,%ecx 86 adcl $0,%ecx 91 adcl %edx,%ecx 111 testl $0x80000000,%ecx 118 rcll $1,%ecx 130 movl %ecx,%eax
|
/arch/x86/lib/ |
D | copy_user_nocache_64.S | 23 movl %edi,%ecx 24 andl $7,%ecx 26 subl $8,%ecx 27 negl %ecx 28 subl %ecx,%edx 33 decl %ecx 37 103: addl %ecx,%edx /* ecx is zerorest also */ 56 movl %edx,%ecx 58 shrl $6,%ecx 78 decl %ecx [all …]
|
D | atomic64_cx8_32.S | 28 movl %ecx, %edx 37 read64 %ecx 77 movl %ecx, %ebp 79 read64 %ecx 82 movl %edx, %ecx 84 \insc\()l %edi, %ecx 91 movl %ecx, %edx 112 movl %edx, %ecx 114 \insc\()l $0, %ecx 121 movl %ecx, %edx [all …]
|
D | copy_user_64.S | 50 movl %edi,%ecx 51 andl $7,%ecx 53 subl $8,%ecx 54 negl %ecx 55 subl %ecx,%edx 60 decl %ecx 64 103: addl %ecx,%edx /* ecx is zerorest also */ 108 movl %edx,%ecx 138 movl %edx,%ecx 140 shrl $6,%ecx [all …]
|
D | checksum_32.S | 59 movl 16(%esp),%ecx # Function arg: int len 67 dec %ecx 76 subl $2, %ecx # Alignment uses up two bytes. 78 addl $2, %ecx # ecx was < 2. Deal with it. 85 movl %ecx, %edx 86 shrl $5, %ecx 106 dec %ecx 109 2: movl %edx, %ecx 118 4: andl $3, %ecx 120 cmpl $2, %ecx [all …]
|
/arch/x86/include/asm/ |
D | mwait.h | 18 static inline void __monitor(const void *eax, unsigned long ecx, in __monitor() argument 23 :: "a" (eax), "c" (ecx), "d"(edx)); in __monitor() 26 static inline void __mwait(unsigned long eax, unsigned long ecx) in __mwait() argument 30 :: "a" (eax), "c" (ecx)); in __mwait() 33 static inline void __sti_mwait(unsigned long eax, unsigned long ecx) in __sti_mwait() argument 38 :: "a" (eax), "c" (ecx)); in __sti_mwait() 51 static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) in mwait_idle_with_hints() argument 62 __mwait(eax, ecx); in mwait_idle_with_hints()
|
D | virtext.h | 30 unsigned long ecx = cpuid_ecx(1); in cpu_has_vmx() local 31 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ in cpu_has_vmx() 86 uint32_t eax, ebx, ecx, edx; in cpu_has_svm() local 94 cpuid(0x80000000, &eax, &ebx, &ecx, &edx); in cpu_has_svm() 101 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in cpu_has_svm() 102 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { in cpu_has_svm()
|
/arch/x86/crypto/ |
D | salsa20-i586-asm_32.S | 41 movl 4(%edx),%ecx 49 movl %ecx,168(%esp) 51 movl 16(%edx),%ecx 61 movl %ecx,180(%esp) 63 movl 28(%edx),%ecx 73 movl %ecx,192(%esp) 75 movl 40(%edx),%ecx 85 movl %ecx,204(%esp) 87 movl 52(%edx),%ecx 97 movl %ecx,216(%esp) [all …]
|
/arch/x86/realmode/rm/ |
D | reboot.S | 38 movl $MSR_EFER, %ecx 58 movl $16, %ecx 59 movl %ecx, %ds 60 movl %ecx, %es 61 movl %ecx, %fs 62 movl %ecx, %gs 63 movl %ecx, %ss 95 xorl %ecx, %ecx 100 movl %ecx, %cr3
|
/arch/x86/um/ |
D | checksum_32.S | 56 movl 16(%esp),%ecx # Function arg: int len 60 subl $2, %ecx # Alignment uses up two bytes. 62 addl $2, %ecx # ecx was < 2. Deal with it. 69 movl %ecx, %edx 70 shrl $5, %ecx 90 dec %ecx 93 2: movl %edx, %ecx 102 4: andl $3, %ecx 104 cmpl $2, %ecx 109 shll $16,%ecx [all …]
|
/arch/x86/kernel/cpu/ |
D | vmware.c | 40 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ argument 42 "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ 50 uint32_t eax, ebx, ecx, edx; in __vmware_platform() local 51 VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx); in __vmware_platform() 58 uint32_t eax, ebx, ecx, edx; in vmware_get_tsc_khz() local 60 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); in vmware_get_tsc_khz() 80 uint32_t eax, ebx, ecx, edx; in vmware_platform_setup() local 82 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); in vmware_platform_setup() 134 uint32_t eax, ebx, ecx, edx; in vmware_legacy_x2apic_available() local 135 VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx); in vmware_legacy_x2apic_available()
|
D | topology.c | 20 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) argument 32 unsigned int eax, ebx, ecx, edx, sub_index; in detect_extended_topology() local 40 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); in detect_extended_topology() 45 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) in detect_extended_topology() 63 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); in detect_extended_topology() 68 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { in detect_extended_topology() 75 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); in detect_extended_topology()
|
/arch/x86/platform/efi/ |
D | efi_stub_32.S | 53 popl %ecx 54 movl %ecx, efi_rt_function_ptr 76 jmp *%ecx 106 movl (%edx), %ecx 107 pushl %ecx 113 movl (%edx), %ecx 114 pushl %ecx
|
/arch/x86/kernel/ |
D | relocate_kernel_32.S | 64 movl 20+16(%esp), %ecx /* cpu_has_pae */ 120 testl %ecx, %ecx 160 xorl %ecx, %ecx 217 movl 4(%esp), %ecx 222 movl %ecx, %ebx 226 movl (%ebx), %ecx 229 testl $0x1, %ecx /* is it a destination page */ 231 movl %ecx, %edi 235 testl $0x2, %ecx /* is it an indirection page */ 237 movl %ecx, %ebx [all …]
|
/arch/x86/boot/ |
D | cpucheck.c | 125 u32 ecx = MSR_K7_HWCR; in check_cpu() local 128 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); in check_cpu() 130 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); in check_cpu() 140 u32 ecx = MSR_VIA_FCR; in check_cpu() local 143 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); in check_cpu() 145 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); in check_cpu() 152 u32 ecx = 0x80860004; in check_cpu() local 156 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); in check_cpu() 157 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); in check_cpu() 161 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); in check_cpu()
|
D | pmjump.S | 53 movl %ecx, %ds 54 movl %ecx, %es 55 movl %ecx, %fs 56 movl %ecx, %gs 57 movl %ecx, %ss 67 xorl %ecx, %ecx
|
/arch/x86/kernel/cpu/microcode/ |
D | core_early.c | 34 #define CPUID_IS(a, b, c, ebx, ecx, edx) \ argument 35 (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) 49 u32 ebx, ecx = 0, edx; in x86_vendor() local 51 native_cpuid(&eax, &ebx, &ecx, &edx); in x86_vendor() 53 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) in x86_vendor() 56 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) in x86_vendor() 65 u32 ebx, ecx = 0, edx; in x86_family() local 68 native_cpuid(&eax, &ebx, &ecx, &edx); in x86_family()
|
/arch/x86/kernel/acpi/ |
D | cstate.c | 63 unsigned int ecx; member 76 unsigned int eax, ebx, ecx, edx; in acpi_processor_ffh_cstate_probe_cpu() local 81 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); in acpi_processor_ffh_cstate_probe_cpu() 98 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || in acpi_processor_ffh_cstate_probe_cpu() 99 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { in acpi_processor_ffh_cstate_probe_cpu() 132 percpu_entry->states[cx->index].ecx = 0; in acpi_processor_ffh_cstate_probe() 140 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; in acpi_processor_ffh_cstate_probe() 162 percpu_entry->states[cx->index].ecx); in acpi_processor_ffh_cstate_enter()
|