| /kernel/liteos_m/targets/riscv_nuclei_gd32vf103_soc_gcc/SoC/gd32vf103/Common/Include/ |
| D | gd32vf103_rcu.h | 520 …_MUL2 CFG0_PLLMF(0) /*!< PLL source clock multiply by 2 */ 521 …_MUL3 CFG0_PLLMF(1) /*!< PLL source clock multiply by 3 */ 522 …_MUL4 CFG0_PLLMF(2) /*!< PLL source clock multiply by 4 */ 523 …_MUL5 CFG0_PLLMF(3) /*!< PLL source clock multiply by 5 */ 524 …_MUL6 CFG0_PLLMF(4) /*!< PLL source clock multiply by 6 */ 525 …_MUL7 CFG0_PLLMF(5) /*!< PLL source clock multiply by 7 */ 526 …_MUL8 CFG0_PLLMF(6) /*!< PLL source clock multiply by 8 */ 527 …_MUL9 CFG0_PLLMF(7) /*!< PLL source clock multiply by 9 */ 528 …MUL10 CFG0_PLLMF(8) /*!< PLL source clock multiply by 10 */ 529 …MUL11 CFG0_PLLMF(9) /*!< PLL source clock multiply by 11 */ [all …]
|
| /kernel/linux/linux-5.10/tools/perf/pmu-events/arch/x86/amdzen1/ |
| D | floating-point.json | 94 "BriefDescription": "Multiply Ops.", 95 … Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.", 115 "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.", 116 …from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add cou… 129 "BriefDescription": "Double precision multiply FLOPS.", 130 … per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.", 143 "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.", 144 …from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add cou… 157 "BriefDescription": "Single-precision multiply FLOPS.", 158 … per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
|
| /kernel/linux/linux-5.10/arch/parisc/math-emu/ |
| D | fmpyfadd.c | 15 * Double Floating-point Multiply Fused Add 16 * Double Floating-point Multiply Negate Fused Add 17 * Single Floating-point Multiply Fused Add 18 * Single Floating-point Multiply Negate Fused Add 41 * Double Floating-point Multiply Fused Add 68 * set sign bit of result of multiply in dbl_fmpyfadd() 75 * Generate multiply exponent in dbl_fmpyfadd() 100 * sign opposite of the multiply result in dbl_fmpyfadd() 178 * invalid since multiply operands are in dbl_fmpyfadd() 191 * sign opposite of the multiply result in dbl_fmpyfadd() [all …]
|
| /kernel/linux/linux-4.19/arch/parisc/math-emu/ |
| D | fmpyfadd.c | 28 * Double Floating-point Multiply Fused Add 29 * Double Floating-point Multiply Negate Fused Add 30 * Single Floating-point Multiply Fused Add 31 * Single Floating-point Multiply Negate Fused Add 54 * Double Floating-point Multiply Fused Add 81 * set sign bit of result of multiply in dbl_fmpyfadd() 88 * Generate multiply exponent in dbl_fmpyfadd() 113 * sign opposite of the multiply result in dbl_fmpyfadd() 191 * invalid since multiply operands are in dbl_fmpyfadd() 204 * sign opposite of the multiply result in dbl_fmpyfadd() [all …]
|
| /kernel/linux/linux-5.10/arch/m68k/include/asm/ |
| D | delay.h | 50 * multiply instruction. So we need to handle them a little differently. 51 * We use a bit of shifting and a single 32*32->32 multiply to get close. 109 * multiply instruction. So we need to handle them a little differently. 110 * We use a bit of shifting and a single 32*32->32 multiply to get close. 112 * multiply and shift.
|
| D | hash.h | 13 * entirely, let's keep it simple and just use an optimized multiply 16 * The best way to do that appears to be to multiply by 0x8647 with 17 * shifts and adds, and use mulu.w to multiply the high half by 0x61C8.
|
| /kernel/linux/linux-4.19/arch/m68k/include/asm/ |
| D | delay.h | 50 * multiply instruction. So we need to handle them a little differently. 51 * We use a bit of shifting and a single 32*32->32 multiply to get close. 109 * multiply instruction. So we need to handle them a little differently. 110 * We use a bit of shifting and a single 32*32->32 multiply to get close. 112 * multiply and shift.
|
| D | hash.h | 13 * entirely, let's keep it simple and just use an optimized multiply 16 * The best way to do that appears to be to multiply by 0x8647 with 17 * shifts and adds, and use mulu.w to multiply the high half by 0x61C8.
|
| /kernel/linux/linux-4.19/arch/microblaze/lib/ |
| D | mulsi3.S | 5 * Multiply operation for 32 bit integers. 18 beqi r5, result_is_zero /* multiply by zero */ 19 beqi r6, result_is_zero /* multiply by zero */
|
| /kernel/linux/linux-5.10/arch/microblaze/lib/ |
| D | mulsi3.S | 5 * Multiply operation for 32 bit integers. 18 beqi r5, result_is_zero /* multiply by zero */ 19 beqi r6, result_is_zero /* multiply by zero */
|
| /kernel/linux/linux-4.19/lib/mpi/ |
| D | mpih-mul.c | 50 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP), 74 /* Multiply by the first limb in V separately, as the result can be in mul_n_basecase() 89 /* For each iteration in the outer loop, multiply one limb from in mul_n_basecase() 113 * Multiply the least significant (size - 1) limbs with a recursive in mul_n() 226 /* Multiply by the first limb in V separately, as the result can be in mpih_sqr_n_basecase() 241 /* For each iteration in the outer loop, multiply one limb from in mpih_sqr_n_basecase() 262 * Multiply the least significant (size - 1) limbs with a recursive in mpih_sqr_n() 424 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs) 456 /* Multiply by the first limb in V separately, as the result can be in mpihelp_mul() 471 /* For each iteration in the outer loop, multiply one limb from in mpihelp_mul()
|
| /kernel/linux/linux-5.10/lib/mpi/ |
| D | mpih-mul.c | 37 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP), 61 /* Multiply by the first limb in V separately, as the result can be in mul_n_basecase() 76 /* For each iteration in the outer loop, multiply one limb from in mul_n_basecase() 100 * Multiply the least significant (size - 1) limbs with a recursive in mul_n() 213 /* Multiply by the first limb in V separately, as the result can be in mpih_sqr_n_basecase() 228 /* For each iteration in the outer loop, multiply one limb from in mpih_sqr_n_basecase() 249 * Multiply the least significant (size - 1) limbs with a recursive in mpih_sqr_n() 436 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs) 468 /* Multiply by the first limb in V separately, as the result can be in mpihelp_mul() 483 /* For each iteration in the outer loop, multiply one limb from in mpihelp_mul()
|
| /kernel/linux/linux-5.10/crypto/ |
| D | ecc.h | 232 * @left: vli number to multiply with @right 233 * @right: vli number to multiply with @left 246 * @x: scalar to multiply with @p 247 * @p: point to multiply with @x 248 * @y: scalar to multiply with @q 249 * @q: point to multiply with @y
|
| /kernel/linux/linux-5.10/arch/mips/lib/ |
| D | multi3.c | 14 /* multiply 64-bit values, low 64-bits returned */ 23 /* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */ 32 /* multiply 128-bit values, low 128-bits returned */
|
| /kernel/linux/linux-4.19/arch/mips/lib/ |
| D | multi3.c | 14 /* multiply 64-bit values, low 64-bits returned */ 23 /* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */ 32 /* multiply 128-bit values, low 128-bits returned */
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | hash.h | 38 * which is very slightly easier to multiply by and makes no 80 /* 64x64-bit multiply is efficient on all 64-bit processors */ in hash_64_generic() 83 /* Hash 64 bits using only 32x32-bit multiply. */ in hash_64_generic()
|
| /kernel/linux/linux-5.10/tools/include/linux/ |
| D | hash.h | 38 * which is very slightly easier to multiply by and makes no 80 /* 64x64-bit multiply is efficient on all 64-bit processors */ in hash_64_generic() 83 /* Hash 64 bits using only 32x32-bit multiply. */ in hash_64_generic()
|
| /kernel/linux/linux-4.19/tools/include/linux/ |
| D | hash.h | 38 * which is very slightly easier to multiply by and makes no 80 /* 64x64-bit multiply is efficient on all 64-bit processors */ in hash_64_generic() 83 /* Hash 64 bits using only 32x32-bit multiply. */ in hash_64_generic()
|
| /kernel/linux/linux-4.19/include/linux/ |
| D | hash.h | 38 * which is very slightly easier to multiply by and makes no 80 /* 64x64-bit multiply is efficient on all 64-bit processors */ in hash_64_generic() 83 /* Hash 64 bits using only 32x32-bit multiply. */ in hash_64_generic()
|
| /kernel/linux/linux-4.19/arch/m68k/fpsp040/ |
| D | binstr.S | 28 | A3. Multiply the fraction in d2:d3 by 8 using bit-field 32 | A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb 87 | A3. Multiply d2:d3 by 8; extract msbs into d1. 95 | A4. Multiply d4:d5 by 2; add carry out to d1.
|
| /kernel/linux/linux-5.10/arch/m68k/fpsp040/ |
| D | binstr.S | 28 | A3. Multiply the fraction in d2:d3 by 8 using bit-field 32 | A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb 87 | A3. Multiply d2:d3 by 8; extract msbs into d1. 95 | A4. Multiply d4:d5 by 2; add carry out to d1.
|
| /kernel/linux/linux-5.10/arch/parisc/include/asm/ |
| D | hash.h | 6 * HP-PA only implements integer multiply in the FPU. However, for 19 * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the 109 * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily 112 * Without the final shift, the multiply proper is 19 instructions,
|
| /kernel/linux/linux-4.19/arch/parisc/include/asm/ |
| D | hash.h | 6 * HP-PA only implements integer multiply in the FPU. However, for 19 * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the 109 * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily 112 * Without the final shift, the multiply proper is 19 instructions,
|
| /kernel/linux/linux-5.10/arch/sparc/include/asm/ |
| D | elf_64.h | 74 #define AV_SPARC_MUL32 0x00000100 /* 32x32 multiply is efficient */ 82 #define AV_SPARC_FMAF 0x00010000 /* fused multiply-add */ 87 #define AV_SPARC_FJFMAU 0x00200000 /* unfused multiply-add */ 88 #define AV_SPARC_IMA 0x00400000 /* integer multiply-add */
|
| /kernel/linux/linux-4.19/arch/sparc/include/asm/ |
| D | elf_64.h | 74 #define AV_SPARC_MUL32 0x00000100 /* 32x32 multiply is efficient */ 82 #define AV_SPARC_FMAF 0x00010000 /* fused multiply-add */ 87 #define AV_SPARC_FJFMAU 0x00200000 /* unfused multiply-add */ 88 #define AV_SPARC_IMA 0x00400000 /* integer multiply-add */
|