/arch/arm/mach-ks8695/ |
D | time.c | 60 u32 half = DIV_ROUND_CLOSEST(rate, 2); in ks8695_set_periodic() local 69 writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC); in ks8695_set_periodic() 70 writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD); in ks8695_set_periodic() 82 u32 half = DIV_ROUND_CLOSEST(cycles, 2); in ks8695_set_next_event() local 91 writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC); in ks8695_set_next_event() 92 writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD); in ks8695_set_next_event()
|
/arch/arm/mach-ixp4xx/ |
D | ixp4xx_qmgr.c | 98 int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1); in qmgr_irq() local 99 u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]); in qmgr_irq() 103 __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */ in qmgr_irq() 108 i += half * HALF_QUEUES; in qmgr_irq() 118 int half = queue / 32; in qmgr_enable_irq() local 122 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask, in qmgr_enable_irq() 123 &qmgr_regs->irqen[half]); in qmgr_enable_irq() 130 int half = queue / 32; in qmgr_disable_irq() local 134 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask, in qmgr_disable_irq() 135 &qmgr_regs->irqen[half]); in qmgr_disable_irq() [all …]
|
/arch/arm/vdso/ |
D | vdsomunge.c | 119 static Elf32_Half read_elf_half(Elf32_Half half, bool swap) in read_elf_half() argument 121 return swap ? swab16(half) : half; in read_elf_half()
|
/arch/s390/net/ |
D | bpf_jit.S | 71 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ 114 sk_negative_common(half, 2, llgh)
|
/arch/powerpc/kernel/ |
D | vecemu.c | 243 int exp, half; in rfin() local 255 half = 0x400000 >> exp; in rfin() 257 return (x + half) & ~(0x7fffff >> exp); in rfin()
|
/arch/xtensa/lib/ |
D | memset.S | 114 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned 123 # dst half-aligned
|
/arch/mips/net/ |
D | bpf_jit_asm.S | 88 is_offset_negative(half) 90 is_offset_in_header(2, half)
|
/arch/sparc/kernel/ |
D | head_32.S | 79 .half 0x0203 /* HdrS version */ 81 .half 1 83 .half 0 85 .half 0
|
D | head_64.S | 68 .half 0x0301 /* HdrS version */ 71 .half 1 73 .half 0 75 .half 0
|
/arch/arm/mach-omap2/ |
D | sram243x.S | 159 cmp r0, #0x1 @ going to half speed? 164 cmp r0, #0x1 @ going to half speed (post branch link) 165 moveq r5, r5, lsr #1 @ divide by 2 if to half
|
D | sram242x.S | 159 cmp r0, #0x1 @ going to half speed? 164 cmp r0, #0x1 @ going to half speed (post branch link) 165 moveq r5, r5, lsr #1 @ divide by 2 if to half
|
/arch/s390/kernel/ |
D | head.S | 157 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 297 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
|
/arch/x86/crypto/sha1-mb/ |
D | sha1_x8_avx2.S | 93 # process top half (r0..r3) {a...d} 104 # process bottom half (r4..r7) {e...h}
|
/arch/m68k/ |
D | Kconfig.cpu | 486 Split the ColdFire CPU cache, and use half as an instruction cache 487 and half as a data cache.
|
/arch/x86/crypto/sha256-mb/ |
D | sha256_x8_avx2.S | 149 # process top half (r0..r3) {a...d} 160 # process bottom half (r4..r7) {e...h}
|
/arch/m68k/fpsp040/ |
D | ssin.S | 488 |--we want P+p = W+w but |p| <= half ulp of P 500 |--|r| <= half ulp of R.
|
D | stan.S | 326 movel #0x7fdc0000,FP_SCR3(%a6) |create low half of 2**16383*
|
/arch/c6x/lib/ |
D | csum_64plus.S | 86 ;; Manage half-word
|
/arch/x86/boot/ |
D | header.S | 595 movzwl %dx, %esp # Clear upper half of %esp
|
/arch/arm/kernel/ |
D | entry-armv.S | 251 @ pointing at the second half of the Thumb instruction. We
|
/arch/sparc/lib/ |
D | M7memcpy.S | 610 andcc %o1, 2, %o5 ! check for half word alignment 626 ! Src is half-word aligned
|
/arch/cris/arch-v10/kernel/ |
D | entry.S | 350 ;; ETRAX 100LX TR89 bugfix: if the second half of an unaligned
|
/arch/x86/math-emu/ |
D | README | 311 number of randomly selected arguments in each case is about half a
|
/arch/powerpc/ |
D | Kconfig | 665 # This is roughly half way between the top of user space and the bottom
|
/arch/arm/crypto/ |
D | sha512-core.S_shipped | 13 @ This code is ~4.5 (four and a half) times faster than code generated
|