1/* 2 * x86 semaphore implementation. 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Portions Copyright 1999 Red Hat, Inc. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 * 13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> 14 */ 15 16#include <linux/linkage.h> 17#include <asm/alternative-asm.h> 18#include <asm/frame.h> 19 20#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) 21#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) 22 23#ifdef CONFIG_X86_32 24 25/* 26 * The semaphore operations have a special calling sequence that 27 * allow us to do a simpler in-line version of them. These routines 28 * need to convert that sequence back into the C sequence when 29 * there is contention on the semaphore. 30 * 31 * %eax contains the semaphore pointer on entry. Save the C-clobbered 32 * registers (%eax, %edx and %ecx) except %eax which is either a return 33 * value or just gets clobbered. Same is true for %edx so make sure GCC 34 * reloads it after the slow path, by making it hold a temporary, for 35 * example see ____down_write(). 36 */ 37 38#define save_common_regs \ 39 pushl %ecx 40 41#define restore_common_regs \ 42 popl %ecx 43 44 /* Avoid uglifying the argument copying x86-64 needs to do. */ 45 .macro movq src, dst 46 .endm 47 48#else 49 50/* 51 * x86-64 rwsem wrappers 52 * 53 * This interfaces the inline asm code to the slow-path 54 * C routines. We need to save the call-clobbered regs 55 * that the asm does not mark as clobbered, and move the 56 * argument from %rax to %rdi. 57 * 58 * NOTE! We don't need to save %rax, because the functions 59 * will always return the semaphore pointer in %rax (which 60 * is also the input argument to these helpers) 61 * 62 * The following can clobber %rdx because the asm clobbers it: 63 * call_rwsem_down_write_failed 64 * call_rwsem_wake 65 * but %rdi, %rsi, %rcx, %r8-r11 always need saving. 66 */ 67 68#define save_common_regs \ 69 pushq %rdi; \ 70 pushq %rsi; \ 71 pushq %rcx; \ 72 pushq %r8; \ 73 pushq %r9; \ 74 pushq %r10; \ 75 pushq %r11 76 77#define restore_common_regs \ 78 popq %r11; \ 79 popq %r10; \ 80 popq %r9; \ 81 popq %r8; \ 82 popq %rcx; \ 83 popq %rsi; \ 84 popq %rdi 85 86#endif 87 88/* Fix up special calling conventions */ 89ENTRY(call_rwsem_down_read_failed) 90 FRAME_BEGIN 91 save_common_regs 92 __ASM_SIZE(push,) %__ASM_REG(dx) 93 movq %rax,%rdi 94 call rwsem_down_read_failed 95 __ASM_SIZE(pop,) %__ASM_REG(dx) 96 restore_common_regs 97 FRAME_END 98 ret 99ENDPROC(call_rwsem_down_read_failed) 100 101ENTRY(call_rwsem_down_write_failed) 102 FRAME_BEGIN 103 save_common_regs 104 movq %rax,%rdi 105 call rwsem_down_write_failed 106 restore_common_regs 107 FRAME_END 108 ret 109ENDPROC(call_rwsem_down_write_failed) 110 111ENTRY(call_rwsem_down_write_failed_killable) 112 FRAME_BEGIN 113 save_common_regs 114 movq %rax,%rdi 115 call rwsem_down_write_failed_killable 116 restore_common_regs 117 FRAME_END 118 ret 119ENDPROC(call_rwsem_down_write_failed_killable) 120 121ENTRY(call_rwsem_wake) 122 FRAME_BEGIN 123 /* do nothing if still outstanding active readers */ 124 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) 125 jnz 1f 126 save_common_regs 127 movq %rax,%rdi 128 call rwsem_wake 129 restore_common_regs 1301: FRAME_END 131 ret 132ENDPROC(call_rwsem_wake) 133 134ENTRY(call_rwsem_downgrade_wake) 135 FRAME_BEGIN 136 save_common_regs 137 __ASM_SIZE(push,) %__ASM_REG(dx) 138 movq %rax,%rdi 139 call rwsem_downgrade_wake 140 __ASM_SIZE(pop,) %__ASM_REG(dx) 141 restore_common_regs 142 FRAME_END 143 ret 144ENDPROC(call_rwsem_downgrade_wake) 145