/arch/x86/boot/compressed/ |
D | efi_stub_32.S | 49 popl %ecx 50 movl %ecx, saved_return_addr(%edx) 52 popl %ecx 53 movl %ecx, efi_rt_function_ptr(%edx) 58 call *%ecx 70 movl efi_rt_function_ptr(%edx), %ecx 71 pushl %ecx 76 movl saved_return_addr(%edx), %ecx 77 pushl %ecx
|
D | head_32.S | 129 movl $(_bss - startup_32), %ecx 130 shrl $2, %ecx 151 leal _ebss(%ebx), %ecx 152 subl %edi, %ecx 153 shrl $2, %ecx 160 leal _egot(%ebx), %ecx 162 cmpl %ecx, %edx 202 movl (%edi), %ecx 203 testl %ecx, %ecx 205 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
|
/arch/x86/math-emu/ |
D | reg_round.S | 124 movl PARAM4,%ecx 144 movl %ecx,%esi 145 andl CW_PC,%ecx 146 cmpl PR_64_BITS,%ecx 149 cmpl PR_53_BITS,%ecx 152 cmpl PR_24_BITS,%ecx 158 cmpl PR_RESERVED_BITS,%ecx 172 movl %esi,%ecx 173 andl CW_RC,%ecx 174 cmpl RC_RND,%ecx [all …]
|
D | wm_sqrt.S | 90 movl SIGL(%esi),%ecx 99 rcrl $1,%ecx 107 movl %ecx,FPU_fsqrt_arg_1 113 movl $0xaaaaaaaa,%ecx 114 mull %ecx 127 movl FPU_fsqrt_arg_2,%ecx /* ms word */ 136 shrl %ecx /* Doing this first will prevent a divide */ 139 movl %ecx,%edx /* msw of the arg / 2 */ 144 movl %ecx,%edx 149 movl %ecx,%edx [all …]
|
D | mul_Xsig.S | 34 movl PARAM2,%ecx 41 mull %ecx /* msl of b */ 45 mull %ecx /* msl of b */ 51 mull %ecx /* msl of b */ 74 movl PARAM2,%ecx 81 mull 4(%ecx) /* msl of b */ 85 mull (%ecx) /* lsl of b */ 91 mull 4(%ecx) /* msl of b */ 97 mull (%ecx) /* lsl of b */ 103 mull 4(%ecx) /* msl of b */ [all …]
|
D | reg_u_sub.S | 45 movl PARAM6,%ecx 46 subl PARAM7,%ecx /* exp1 - exp2 */ 155 xorl %ecx,%ecx 156 subl %edx,%ecx 157 movl %ecx,%edx 158 movl SIGL(%esi),%ecx 159 sbbl %ebx,%ecx 160 movl %ecx,%ebx 161 movl SIGH(%esi),%ecx 162 sbbl %eax,%ecx [all …]
|
D | round_Xsig.S | 51 bsrl %edx,%ecx /* get the required shift in %ecx */ 52 subl $31,%ecx 53 negl %ecx 54 subl %ecx,-4(%ebp) 122 bsrl %edx,%ecx /* get the required shift in %ecx */ 123 subl $31,%ecx 124 negl %ecx 125 subl %ecx,-4(%ebp)
|
D | reg_u_mul.S | 68 xorl %ecx,%ecx 86 adcl $0,%ecx 91 adcl %edx,%ecx 111 testl $0x80000000,%ecx 118 rcll $1,%ecx 130 movl %ecx,%eax
|
/arch/x86/lib/ |
D | atomic64_cx8_32.S | 28 movl %ecx, %edx 37 read64 %ecx 77 movl %ecx, %ebp 79 read64 %ecx 82 movl %edx, %ecx 84 \insc\()l %edi, %ecx 91 movl %ecx, %edx 112 movl %edx, %ecx 114 \insc\()l $0, %ecx 121 movl %ecx, %edx [all …]
|
D | copy_user_nocache_64.S | 21 movl %edi,%ecx 22 andl $7,%ecx 24 subl $8,%ecx 25 negl %ecx 26 subl %ecx,%edx 31 decl %ecx 35 103: addl %ecx,%edx /* ecx is zerorest also */ 56 movl %edx,%ecx 58 shrl $6,%ecx 78 decl %ecx [all …]
|
D | copy_user_64.S | 48 movl %edi,%ecx 49 andl $7,%ecx 51 subl $8,%ecx 52 negl %ecx 53 subl %ecx,%edx 58 decl %ecx 62 103: addl %ecx,%edx /* ecx is zerorest also */ 109 movl %edx,%ecx 138 movl %edx,%ecx 140 shrl $6,%ecx [all …]
|
D | checksum_32.S | 58 movl 16(%esp),%ecx # Function arg: int len 66 dec %ecx 75 subl $2, %ecx # Alignment uses up two bytes. 77 addl $2, %ecx # ecx was < 2. Deal with it. 84 movl %ecx, %edx 85 shrl $5, %ecx 105 dec %ecx 108 2: movl %edx, %ecx 117 4: andl $3, %ecx 119 cmpl $2, %ecx [all …]
|
/arch/x86/kernel/ |
D | reboot_32.S | 27 movl %ebx, %ecx 28 shrl $4, %ecx 48 movl $16, %ecx 49 movl %ecx, %ds 50 movl %ecx, %es 51 movl %ecx, %fs 52 movl %ecx, %gs 53 movl %ecx, %ss 81 xorl %ecx, %ecx 86 movl %ecx, %cr3
|
D | relocate_kernel_32.S | 64 movl 20+16(%esp), %ecx /* cpu_has_pae */ 120 testl %ecx, %ecx 160 xorl %ecx, %ecx 217 movl 4(%esp), %ecx 222 movl %ecx, %ebx 226 movl (%ebx), %ecx 229 testl $0x1, %ecx /* is it a destination page */ 231 movl %ecx, %edi 235 testl $0x2, %ecx /* is it an indirection page */ 237 movl %ecx, %ebx [all …]
|
D | head_32.S | 88 movl pa(stack_start),%ecx 106 leal -__PAGE_OFFSET(%ecx),%esp 114 movl $pa(__bss_stop),%ecx 115 subl %edi,%ecx 116 shrl $2,%ecx 127 movl $(PARAM_SIZE/4),%ecx 135 movl $(COMMAND_LINE_SIZE/4),%ecx 172 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 173 movl %ecx,(%edx) /* Store PMD entry */ 176 movl $512,%ecx [all …]
|
/arch/x86/crypto/ |
D | salsa20-i586-asm_32.S | 40 movl 4(%edx),%ecx 48 movl %ecx,168(%esp) 50 movl 16(%edx),%ecx 60 movl %ecx,180(%esp) 62 movl 28(%edx),%ecx 72 movl %ecx,192(%esp) 74 movl 40(%edx),%ecx 84 movl %ecx,204(%esp) 86 movl 52(%edx),%ecx 96 movl %ecx,216(%esp) [all …]
|
/arch/x86/platform/efi/ |
D | efi_stub_32.S | 53 popl %ecx 54 movl %ecx, efi_rt_function_ptr 76 jmp *%ecx 106 movl (%edx), %ecx 107 pushl %ecx 113 movl (%edx), %ecx 114 pushl %ecx
|
/arch/x86/um/ |
D | checksum_32.S | 55 movl 16(%esp),%ecx # Function arg: int len 59 subl $2, %ecx # Alignment uses up two bytes. 61 addl $2, %ecx # ecx was < 2. Deal with it. 68 movl %ecx, %edx 69 shrl $5, %ecx 89 dec %ecx 92 2: movl %edx, %ecx 101 4: andl $3, %ecx 103 cmpl $2, %ecx 108 shll $16,%ecx [all …]
|
/arch/x86/kernel/cpu/ |
D | topology.c | 20 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) argument 32 unsigned int eax, ebx, ecx, edx, sub_index; in detect_extended_topology() local 40 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); in detect_extended_topology() 45 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) in detect_extended_topology() 63 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); in detect_extended_topology() 68 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { in detect_extended_topology() 75 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); in detect_extended_topology()
|
D | vmware.c | 37 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ argument 39 "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ 47 uint32_t eax, ebx, ecx, edx; in __vmware_platform() local 48 VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx); in __vmware_platform() 55 uint32_t eax, ebx, ecx, edx; in vmware_get_tsc_khz() local 57 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); in vmware_get_tsc_khz() 77 uint32_t eax, ebx, ecx, edx; in vmware_platform_setup() local 79 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); in vmware_platform_setup()
|
/arch/x86/include/asm/ |
D | virtext.h | 29 unsigned long ecx = cpuid_ecx(1); in cpu_has_vmx() local 30 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ in cpu_has_vmx() 85 uint32_t eax, ebx, ecx, edx; in cpu_has_svm() local 93 cpuid(0x80000000, &eax, &ebx, &ecx, &edx); in cpu_has_svm() 100 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in cpu_has_svm() 101 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { in cpu_has_svm()
|
/arch/x86/boot/ |
D | cpucheck.c | 203 u32 ecx = MSR_K7_HWCR; in check_cpu() local 206 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); in check_cpu() 208 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); in check_cpu() 218 u32 ecx = MSR_VIA_FCR; in check_cpu() local 221 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); in check_cpu() 223 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); in check_cpu() 230 u32 ecx = 0x80860004; in check_cpu() local 234 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); in check_cpu() 235 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); in check_cpu() 239 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); in check_cpu()
|
D | pmjump.S | 53 movl %ecx, %ds 54 movl %ecx, %es 55 movl %ecx, %fs 56 movl %ecx, %gs 57 movl %ecx, %ss 67 xorl %ecx, %ecx
|
/arch/x86/power/ |
D | hibernate_asm_32.S | 30 movl mmu_cr4_features, %ecx 36 andl $~(X86_CR4_PGE), %ecx 37 movl %ecx, %cr4; # turn off PGE 51 movl $1024, %ecx 64 movl mmu_cr4_features, %ecx 66 movl %ecx, %cr4; # turn PGE back on
|
/arch/x86/kernel/acpi/ |
D | cstate.c | 63 unsigned int ecx; member 76 unsigned int eax, ebx, ecx, edx; in acpi_processor_ffh_cstate_probe_cpu() local 81 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); in acpi_processor_ffh_cstate_probe_cpu() 96 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || in acpi_processor_ffh_cstate_probe_cpu() 97 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { in acpi_processor_ffh_cstate_probe_cpu() 130 percpu_entry->states[cx->index].ecx = 0; in acpi_processor_ffh_cstate_probe() 138 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; in acpi_processor_ffh_cstate_probe() 183 percpu_entry->states[cx->index].ecx); in acpi_processor_ffh_cstate_enter()
|