• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * x86 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 *	This program is free software; you can redistribute it and/or
9 *	modify it under the terms of the GNU General Public License
10 *	as published by the Free Software Foundation; either version
11 *	2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15
16#include <linux/linkage.h>
17#include <asm/alternative-asm.h>
18
19#define __ASM_HALF_REG(reg)	__ASM_SEL(reg, e##reg)
20#define __ASM_HALF_SIZE(inst)	__ASM_SEL(inst##w, inst##l)
21
22#ifdef CONFIG_X86_32
23
24/*
25 * The semaphore operations have a special calling sequence that
26 * allow us to do a simpler in-line version of them. These routines
27 * need to convert that sequence back into the C sequence when
28 * there is contention on the semaphore.
29 *
30 * %eax contains the semaphore pointer on entry. Save the C-clobbered
31 * registers (%eax, %edx and %ecx) except %eax whish is either a return
32 * value or just clobbered..
33 */
34
35#define save_common_regs \
36	pushl %ecx
37
38#define restore_common_regs \
39	popl %ecx
40
41	/* Avoid uglifying the argument copying x86-64 needs to do. */
42	.macro movq src, dst
43	.endm
44
45#else
46
47/*
48 * x86-64 rwsem wrappers
49 *
50 * This interfaces the inline asm code to the slow-path
51 * C routines. We need to save the call-clobbered regs
52 * that the asm does not mark as clobbered, and move the
53 * argument from %rax to %rdi.
54 *
55 * NOTE! We don't need to save %rax, because the functions
56 * will always return the semaphore pointer in %rax (which
57 * is also the input argument to these helpers)
58 *
59 * The following can clobber %rdx because the asm clobbers it:
60 *   call_rwsem_down_write_failed
61 *   call_rwsem_wake
62 * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
63 */
64
65#define save_common_regs \
66	pushq %rdi; \
67	pushq %rsi; \
68	pushq %rcx; \
69	pushq %r8;  \
70	pushq %r9;  \
71	pushq %r10; \
72	pushq %r11
73
74#define restore_common_regs \
75	popq %r11; \
76	popq %r10; \
77	popq %r9; \
78	popq %r8; \
79	popq %rcx; \
80	popq %rsi; \
81	popq %rdi
82
83#endif
84
85/* Fix up special calling conventions */
86ENTRY(call_rwsem_down_read_failed)
87	save_common_regs
88	__ASM_SIZE(push,) %__ASM_REG(dx)
89	movq %rax,%rdi
90	call rwsem_down_read_failed
91	__ASM_SIZE(pop,) %__ASM_REG(dx)
92	restore_common_regs
93	ret
94ENDPROC(call_rwsem_down_read_failed)
95
96ENTRY(call_rwsem_down_write_failed)
97	save_common_regs
98	movq %rax,%rdi
99	call rwsem_down_write_failed
100	restore_common_regs
101	ret
102ENDPROC(call_rwsem_down_write_failed)
103
104ENTRY(call_rwsem_wake)
105	/* do nothing if still outstanding active readers */
106	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
107	jnz 1f
108	save_common_regs
109	movq %rax,%rdi
110	call rwsem_wake
111	restore_common_regs
1121:	ret
113ENDPROC(call_rwsem_wake)
114
115ENTRY(call_rwsem_downgrade_wake)
116	save_common_regs
117	__ASM_SIZE(push,) %__ASM_REG(dx)
118	movq %rax,%rdi
119	call rwsem_downgrade_wake
120	__ASM_SIZE(pop,) %__ASM_REG(dx)
121	restore_common_regs
122	ret
123ENDPROC(call_rwsem_downgrade_wake)
124