| /kernel/linux/linux-4.19/arch/m68k/ifpsp060/src/ |
| D | itest.S | 171 mov.l &0x88888888,%d3 177 mulu.l %d1,%d2:%d3 195 mov.l &0x00000000,%d3 201 mulu.l %d1,%d2:%d3 241 mov.l &0x00000003,%d3 247 mulu.l %d1,%d2:%d3 265 mov.l &0x00000004,%d3 271 mulu.l %d1,%d2:%d3 289 mov.l &0xffffffff,%d3 295 mulu.l %d1,%d2:%d3 [all …]
|
| D | ilsp.S | 342 mov.l %d7, %d3 # divisor 345 swap %d3 346 cmp.w %d2, %d3 # V1 = U1 ? 353 divu.w %d3, %d1 # use quotient of mslw/msw 364 lddadj1: mov.l %d7, %d3 367 swap %d3 368 mulu.w %d1, %d3 # V1q 370 sub.l %d3, %d4 # U1U2 - V1q 394 mov.l %d5, %d2 # now %d2,%d3 are trial*divisor 395 mov.l %d6, %d3 [all …]
|
| /kernel/linux/linux-5.10/arch/m68k/ifpsp060/src/ |
| D | itest.S | 171 mov.l &0x88888888,%d3 177 mulu.l %d1,%d2:%d3 195 mov.l &0x00000000,%d3 201 mulu.l %d1,%d2:%d3 241 mov.l &0x00000003,%d3 247 mulu.l %d1,%d2:%d3 265 mov.l &0x00000004,%d3 271 mulu.l %d1,%d2:%d3 289 mov.l &0xffffffff,%d3 295 mulu.l %d1,%d2:%d3 [all …]
|
| D | ilsp.S | 342 mov.l %d7, %d3 # divisor 345 swap %d3 346 cmp.w %d2, %d3 # V1 = U1 ? 353 divu.w %d3, %d1 # use quotient of mslw/msw 364 lddadj1: mov.l %d7, %d3 367 swap %d3 368 mulu.w %d1, %d3 # V1q 370 sub.l %d3, %d4 # U1U2 - V1q 394 mov.l %d5, %d2 # now %d2,%d3 are trial*divisor 395 mov.l %d6, %d3 [all …]
|
| /kernel/linux/linux-4.19/arch/m68k/fpsp040/ |
| D | decbin.S | 136 | (*) d3: offset pointer 145 moveql #ESTRT,%d3 |counter to pick up digits 154 bfextu %d4{%d3:#4},%d0 |get the digit and zero extend into d0 156 addqb #4,%d3 |advance d3 to the next digit 182 | (*) d3: offset pointer 207 moveql #FSTRT,%d3 |counter to pick up digits 211 bfextu %d4{%d3:#4},%d0 |get the digit and zero extend 216 | then inc d1 (=2) to point to the next long word and reset d3 to 0 220 addqb #4,%d3 |advance d3 to the next digit 272 | (*) d3: offset pointer [all …]
|
| D | srem_mod.S | 111 movew (%a0),%d3 112 movew %d3,SignY(%a6) 113 andil #0x00007FFF,%d3 | ...Y := |Y| 117 movel 8(%a0),%d5 | ...(D3,D4,D5) is |Y| 119 tstl %d3 122 movel #0x00003FFE,%d3 | ...$3FFD + 1 129 subil #32,%d3 133 subl %d6,%d3 | ...(D3,D4,D5) is normalized 140 subl %d6,%d3 147 orl %d7,%d4 | ...(D3,D4,D5) normalized [all …]
|
| D | binstr.S | 7 | Input: 64-bit binary integer in d2:d3, desired length (LEN) in 26 | Copy the fraction in d2:d3 to d4:d5. 28 | A3. Multiply the fraction in d2:d3 by 8 using bit-field 35 | A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 36 | into d2:d3. D1 will contain the bcd digit formed. 52 | d3: lower 32-bits of fraction for mul by 8 81 | A2. Copy d2:d3 to d4:d5. Start loop. 85 movel %d3,%d5 |to d4:d5 87 | A3. Multiply d2:d3 by 8; extract msbs into d1. 91 bfextu %d3{#0:#3},%d6 |copy 3 msbs of d3 into d6 [all …]
|
| D | sgetem.S | 118 movel %d3,-(%a7) |save d3 120 bfffo %d0{#0:#32},%d3 |find first 1 in ls mant to d0 121 lsll %d3,%d0 |shift first 1 to integer bit in ms mant 122 movel (%a7)+,%d3 |restore d3 126 moveml %d3/%d5/%d6,-(%a7) |save registers 127 bfffo %d0{#0:#32},%d3 |find first 1 in ls mant to d0 128 lsll %d3,%d0 |shift ms mant until j-bit is set 130 lsll %d3,%d1 |shift ls mant by count 132 subl %d3,%d5 |sub 32 from shift for ls mant 136 moveml (%a7)+,%d3/%d5/%d6 |restore registers
|
| D | bindec.S | 115 | d3: scratch;lower 32-bits of mantissa for binstr 226 | d3: x/x 289 | d3: x/x 361 | d3: x/scratch - offset ptr into PTENRM array 410 moveb (%a2,%d1),%d3 |load d3 with new rmode 411 lsll #4,%d3 |put bits in proper position 412 fmovel %d3,%fpcr |load bits into fpu 413 lsrl #4,%d3 |put bits in proper position 414 tstb %d3 |decode new rmode for pten table 419 lsrb #1,%d3 |get lsb in carry [all …]
|
| /kernel/linux/linux-5.10/arch/m68k/fpsp040/ |
| D | decbin.S | 136 | (*) d3: offset pointer 145 moveql #ESTRT,%d3 |counter to pick up digits 154 bfextu %d4{%d3:#4},%d0 |get the digit and zero extend into d0 156 addqb #4,%d3 |advance d3 to the next digit 182 | (*) d3: offset pointer 207 moveql #FSTRT,%d3 |counter to pick up digits 211 bfextu %d4{%d3:#4},%d0 |get the digit and zero extend 216 | then inc d1 (=2) to point to the next long word and reset d3 to 0 220 addqb #4,%d3 |advance d3 to the next digit 272 | (*) d3: offset pointer [all …]
|
| D | srem_mod.S | 111 movew (%a0),%d3 112 movew %d3,SignY(%a6) 113 andil #0x00007FFF,%d3 | ...Y := |Y| 117 movel 8(%a0),%d5 | ...(D3,D4,D5) is |Y| 119 tstl %d3 122 movel #0x00003FFE,%d3 | ...$3FFD + 1 129 subil #32,%d3 133 subl %d6,%d3 | ...(D3,D4,D5) is normalized 140 subl %d6,%d3 147 orl %d7,%d4 | ...(D3,D4,D5) normalized [all …]
|
| D | binstr.S | 7 | Input: 64-bit binary integer in d2:d3, desired length (LEN) in 26 | Copy the fraction in d2:d3 to d4:d5. 28 | A3. Multiply the fraction in d2:d3 by 8 using bit-field 35 | A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 36 | into d2:d3. D1 will contain the bcd digit formed. 52 | d3: lower 32-bits of fraction for mul by 8 81 | A2. Copy d2:d3 to d4:d5. Start loop. 85 movel %d3,%d5 |to d4:d5 87 | A3. Multiply d2:d3 by 8; extract msbs into d1. 91 bfextu %d3{#0:#3},%d6 |copy 3 msbs of d3 into d6 [all …]
|
| D | sgetem.S | 118 movel %d3,-(%a7) |save d3 120 bfffo %d0{#0:#32},%d3 |find first 1 in ls mant to d0 121 lsll %d3,%d0 |shift first 1 to integer bit in ms mant 122 movel (%a7)+,%d3 |restore d3 126 moveml %d3/%d5/%d6,-(%a7) |save registers 127 bfffo %d0{#0:#32},%d3 |find first 1 in ls mant to d0 128 lsll %d3,%d0 |shift ms mant until j-bit is set 130 lsll %d3,%d1 |shift ls mant by count 132 subl %d3,%d5 |sub 32 from shift for ls mant 136 moveml (%a7)+,%d3/%d5/%d6 |restore registers
|
| D | bindec.S | 115 | d3: scratch;lower 32-bits of mantissa for binstr 226 | d3: x/x 289 | d3: x/x 361 | d3: x/scratch - offset ptr into PTENRM array 410 moveb (%a2,%d1),%d3 |load d3 with new rmode 411 lsll #4,%d3 |put bits in proper position 412 fmovel %d3,%fpcr |load bits into fpu 413 lsrl #4,%d3 |put bits in proper position 414 tstb %d3 |decode new rmode for pten table 419 lsrb #1,%d3 |get lsb in carry [all …]
|
| /kernel/linux/linux-5.10/include/asm-generic/ |
| D | xor.h | 101 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_2() local 105 d3 = p1[3]; in xor_32regs_2() 113 d3 ^= p2[3]; in xor_32regs_2() 121 p1[3] = d3; in xor_32regs_2() 138 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_3() local 142 d3 = p1[3]; in xor_32regs_3() 150 d3 ^= p2[3]; in xor_32regs_3() 158 d3 ^= p3[3]; in xor_32regs_3() 166 p1[3] = d3; in xor_32regs_3() 184 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_4() local [all …]
|
| /kernel/linux/linux-4.19/include/asm-generic/ |
| D | xor.h | 109 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_2() local 113 d3 = p1[3]; in xor_32regs_2() 121 d3 ^= p2[3]; in xor_32regs_2() 129 p1[3] = d3; in xor_32regs_2() 146 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_3() local 150 d3 = p1[3]; in xor_32regs_3() 158 d3 ^= p2[3]; in xor_32regs_3() 166 d3 ^= p3[3]; in xor_32regs_3() 174 p1[3] = d3; in xor_32regs_3() 192 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_4() local [all …]
|
| /kernel/linux/linux-5.10/arch/mips/crypto/ |
| D | poly1305-mips.pl | 735 my ($d0,$d1,$d2,$d3) = 801 lw $d3,12($inp) 814 srlv $at,$d3,$t1 815 sllv $d3,$d3,$shr 818 or $d3,$d3,$t0 827 sllv $at,$d3,$t1 828 srlv $d3,$d3,$shr 831 or $d3,$d3,$t0 838 lwl $d3,12+MSB($inp) 842 lwr $d3,12+LSB($inp) [all …]
|
| /kernel/linux/linux-5.10/arch/x86/crypto/ |
| D | poly1305-x86_64-cryptogams.pl | 180 my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13"); 190 mov %rdx,$d3 200 adc %rdx,$d3 210 adc \$0,$d3 215 adc $h2,$d3 217 and $d3,%rax # last reduction step 218 mov $d3,$h2 219 shr \$2,$d3 221 add $d3,%rax 420 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = [all …]
|
| /kernel/linux/linux-5.10/arch/arm/crypto/ |
| D | poly1305-armv4.pl | 337 adds $h3,r2,r1 @ d3+=d2>>32 340 add $h4,$h4,r3 @ h4+=d3>>32 496 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14)); 551 @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 557 vmull.u32 $D3,$R3,${R0}[1] 563 vmlal.u32 $D3,$R2,${R1}[1] 568 vmlal.u32 $D3,$R1,${R2}[1] 573 vmlal.u32 $D3,$R0,${R3}[1] 578 vmlal.u32 $D3,$R4,${S4}[1] 633 vshr.u64 $T0,$D3,#26 [all …]
|
| D | curve25519-core.S | 31 vst1.8 {d2-d3}, [r6, : 128]! 174 vtrn.32 d3, d11 181 vmov.i64 d3, #0 183 vst1.8 {d2-d3}, [r2, : 128]! 194 vmov.i64 d3, #0 196 vst1.8 {d2-d3}, [r2, : 128]! 202 vld1.8 {d2-d3}, [r2, : 128]! 205 vst1.8 {d2-d3}, [r6, : 128]! 290 vld1.8 {d2-d3}, [r4, : 128]! 318 vmlal.s32 q13, d11, d3 [all …]
|
| D | poly1305-core.S_shipped | 291 adds r7,r2,r1 @ d3+=d2>>32 294 add r8,r8,r3 @ h4+=d3>>32 472 vdup.32 d3,r4 488 @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 493 vmull.u32 q7,d3,d0[1] 500 vmlal.u32 q8,d3,d1[1] 505 vmlal.u32 q8,d1,d3[1] 506 vmlal.u32 q7,d0,d3[1] 507 vmlal.u32 q9,d3,d3[1] 509 vmlal.u32 q5,d3,d6[1] [all …]
|
| /kernel/linux/linux-4.19/Documentation/i2c/ |
| D | i2c-topology | 119 '--| dev D3 | 137 of the entire operation. But accesses to D3 are possibly interleaved 175 '--| dev D3 | 196 This means that accesses to both D2 and D3 are locked out for the full 214 '--| dev D4 | '--| dev D3 | 238 '--| dev D4 | '--| dev D3 | 243 are locked). But accesses to D3 and D4 are possibly interleaved at 244 any point. Accesses to D3 locks out D1 and D2, but accesses to D4 259 '--| dev D4 | '--| dev D3 | 262 When device D1 is accessed, accesses to D2 and D3 are locked out [all …]
|
| /kernel/linux/linux-5.10/Documentation/admin-guide/media/ |
| D | dvb-usb-vp7045-cardlist.rst | 16 - 13d3:3223, 13d3:3224 18 - 13d3:3205, 13d3:3206
|
| /kernel/linux/linux-4.19/arch/x86/crypto/ |
| D | poly1305-sse2-x86_64.S | 50 #define d3 %r11 macro 187 # d3 = t2[0] + t2[1] + t3[1] 195 movq t1,d3 235 # d3 += d2 >> 26 238 add %rax,d3 244 # d4 += d3 >> 26 245 mov d3,%rax 248 # h3 = d3 & 0x3ffffff 249 mov d3,%rax 492 # d3 = t1[0] + t1[1] [all …]
|
| /kernel/linux/linux-5.10/Documentation/i2c/ |
| D | i2c-topology.rst | 135 '--| dev D3 | 153 of the entire operation. But accesses to D3 are possibly interleaved 195 '--| dev D3 | 216 This means that accesses to both D2 and D3 are locked out for the full 234 '--| dev D4 | '--| dev D3 | 258 '--| dev D4 | '--| dev D3 | 263 are locked). But accesses to D3 and D4 are possibly interleaved at 264 any point. Accesses to D3 locks out D1 and D2, but accesses to D4 279 '--| dev D4 | '--| dev D3 | 282 When device D1 is accessed, accesses to D2 and D3 are locked out [all …]
|