• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#include <linux/linkage.h>
2#include <asm-generic/export.h>
3#include <asm/asm.h>
4#include <asm/csr.h>
5
6	.macro fixup op reg addr lbl
7100:
8	\op \reg, \addr
9	.section __ex_table,"a"
10	.balign RISCV_SZPTR
11	RISCV_PTR 100b, \lbl
12	.previous
13	.endm
14
15ENTRY(__asm_copy_to_user)
16ENTRY(__asm_copy_from_user)
17
18	/* Enable access to user memory */
19	li t6, SR_SUM
20	csrs CSR_STATUS, t6
21
22	/*
23	 * Save the terminal address which will be used to compute the number
24	 * of bytes copied in case of a fixup exception.
25	 */
26	add	t5, a0, a2
27
28	/*
29	 * Register allocation for code below:
30	 * a0 - start of uncopied dst
31	 * a1 - start of uncopied src
32	 * a2 - size
33	 * t0 - end of uncopied dst
34	 */
35	add	t0, a0, a2
36
37	/*
38	 * Use byte copy only if too small.
39	 * SZREG holds 4 for RV32 and 8 for RV64
40	 */
41	li	a3, 9*SZREG /* size must be larger than size in word_copy */
42	bltu	a2, a3, .Lbyte_copy_tail
43
44	/*
45	 * Copy first bytes until dst is aligned to word boundary.
46	 * a0 - start of dst
47	 * t1 - start of aligned dst
48	 */
49	addi	t1, a0, SZREG-1
50	andi	t1, t1, ~(SZREG-1)
51	/* dst is already aligned, skip */
52	beq	a0, t1, .Lskip_align_dst
531:
54	/* a5 - one byte for copying data */
55	fixup lb      a5, 0(a1), 10f
56	addi	a1, a1, 1	/* src */
57	fixup sb      a5, 0(a0), 10f
58	addi	a0, a0, 1	/* dst */
59	bltu	a0, t1, 1b	/* t1 - start of aligned dst */
60
61.Lskip_align_dst:
62	/*
63	 * Now dst is aligned.
64	 * Use shift-copy if src is misaligned.
65	 * Use word-copy if both src and dst are aligned because
66	 * can not use shift-copy which do not require shifting
67	 */
68	/* a1 - start of src */
69	andi	a3, a1, SZREG-1
70	bnez	a3, .Lshift_copy
71
72.Lword_copy:
73        /*
74	 * Both src and dst are aligned, unrolled word copy
75	 *
76	 * a0 - start of aligned dst
77	 * a1 - start of aligned src
78	 * t0 - end of aligned dst
79	 */
80	addi	t0, t0, -(8*SZREG) /* not to over run */
812:
82	fixup REG_L   a4,        0(a1), 10f
83	fixup REG_L   a5,    SZREG(a1), 10f
84	fixup REG_L   a6,  2*SZREG(a1), 10f
85	fixup REG_L   a7,  3*SZREG(a1), 10f
86	fixup REG_L   t1,  4*SZREG(a1), 10f
87	fixup REG_L   t2,  5*SZREG(a1), 10f
88	fixup REG_L   t3,  6*SZREG(a1), 10f
89	fixup REG_L   t4,  7*SZREG(a1), 10f
90	fixup REG_S   a4,        0(a0), 10f
91	fixup REG_S   a5,    SZREG(a0), 10f
92	fixup REG_S   a6,  2*SZREG(a0), 10f
93	fixup REG_S   a7,  3*SZREG(a0), 10f
94	fixup REG_S   t1,  4*SZREG(a0), 10f
95	fixup REG_S   t2,  5*SZREG(a0), 10f
96	fixup REG_S   t3,  6*SZREG(a0), 10f
97	fixup REG_S   t4,  7*SZREG(a0), 10f
98	addi	a0, a0, 8*SZREG
99	addi	a1, a1, 8*SZREG
100	bltu	a0, t0, 2b
101
102	addi	t0, t0, 8*SZREG /* revert to original value */
103	j	.Lbyte_copy_tail
104
105.Lshift_copy:
106
107	/*
108	 * Word copy with shifting.
109	 * For misaligned copy we still perform aligned word copy, but
110	 * we need to use the value fetched from the previous iteration and
111	 * do some shifts.
112	 * This is safe because reading is less than a word size.
113	 *
114	 * a0 - start of aligned dst
115	 * a1 - start of src
116	 * a3 - a1 & mask:(SZREG-1)
117	 * t0 - end of uncopied dst
118	 * t1 - end of aligned dst
119	 */
120	/* calculating aligned word boundary for dst */
121	andi	t1, t0, ~(SZREG-1)
122	/* Converting unaligned src to aligned src */
123	andi	a1, a1, ~(SZREG-1)
124
125	/*
126	 * Calculate shifts
127	 * t3 - prev shift
128	 * t4 - current shift
129	 */
130	slli	t3, a3, 3 /* converting bytes in a3 to bits */
131	li	a5, SZREG*8
132	sub	t4, a5, t3
133
134	/* Load the first word to combine with second word */
135	fixup REG_L   a5, 0(a1), 10f
136
1373:
138	/* Main shifting copy
139	 *
140	 * a0 - start of aligned dst
141	 * a1 - start of aligned src
142	 * t1 - end of aligned dst
143	 */
144
145	/* At least one iteration will be executed */
146	srl	a4, a5, t3
147	fixup REG_L   a5, SZREG(a1), 10f
148	addi	a1, a1, SZREG
149	sll	a2, a5, t4
150	or	a2, a2, a4
151	fixup REG_S   a2, 0(a0), 10f
152	addi	a0, a0, SZREG
153	bltu	a0, t1, 3b
154
155	/* Revert src to original unaligned value  */
156	add	a1, a1, a3
157
158.Lbyte_copy_tail:
159	/*
160	 * Byte copy anything left.
161	 *
162	 * a0 - start of remaining dst
163	 * a1 - start of remaining src
164	 * t0 - end of remaining dst
165	 */
166	bgeu	a0, t0, .Lout_copy_user  /* check if end of copy */
1674:
168	fixup lb      a5, 0(a1), 10f
169	addi	a1, a1, 1	/* src */
170	fixup sb      a5, 0(a0), 10f
171	addi	a0, a0, 1	/* dst */
172	bltu	a0, t0, 4b	/* t0 - end of dst */
173
174.Lout_copy_user:
175	/* Disable access to user memory */
176	csrc CSR_STATUS, t6
177	li	a0, 0
178	ret
179
180	/* Exception fixup code */
18110:
182	/* Disable access to user memory */
183	csrc CSR_STATUS, t6
184	sub a0, t5, a0
185	ret
186ENDPROC(__asm_copy_to_user)
187ENDPROC(__asm_copy_from_user)
188EXPORT_SYMBOL(__asm_copy_to_user)
189EXPORT_SYMBOL(__asm_copy_from_user)
190
191
192ENTRY(__clear_user)
193
194	/* Enable access to user memory */
195	li t6, SR_SUM
196	csrs CSR_STATUS, t6
197
198	add a3, a0, a1
199	addi t0, a0, SZREG-1
200	andi t1, a3, ~(SZREG-1)
201	andi t0, t0, ~(SZREG-1)
202	/*
203	 * a3: terminal address of target region
204	 * t0: lowest doubleword-aligned address in target region
205	 * t1: highest doubleword-aligned address in target region
206	 */
207	bgeu t0, t1, 2f
208	bltu a0, t0, 4f
2091:
210	fixup REG_S, zero, (a0), 11f
211	addi a0, a0, SZREG
212	bltu a0, t1, 1b
2132:
214	bltu a0, a3, 5f
215
2163:
217	/* Disable access to user memory */
218	csrc CSR_STATUS, t6
219	li a0, 0
220	ret
2214: /* Edge case: unalignment */
222	fixup sb, zero, (a0), 11f
223	addi a0, a0, 1
224	bltu a0, t0, 4b
225	j 1b
2265: /* Edge case: remainder */
227	fixup sb, zero, (a0), 11f
228	addi a0, a0, 1
229	bltu a0, a3, 5b
230	j 3b
231
232	/* Exception fixup code */
23311:
234	/* Disable access to user memory */
235	csrc CSR_STATUS, t6
236	sub a0, a3, a0
237	ret
238ENDPROC(__clear_user)
239EXPORT_SYMBOL(__clear_user)
240