• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007  Maciej W. Rozycki
8  * Copyright (C) 2008  Thiemo Seufer
9  * Copyright (C) 2012  MIPS Technologies, Inc.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
18 
19 #include <asm/bugs.h>
20 #include <asm/cacheops.h>
21 #include <asm/inst.h>
22 #include <asm/io.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/prefetch.h>
26 #include <asm/bootinfo.h>
27 #include <asm/mipsregs.h>
28 #include <asm/mmu_context.h>
29 #include <asm/cpu.h>
30 #include <asm/war.h>
31 
32 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
33 #include <asm/sibyte/sb1250.h>
34 #include <asm/sibyte/sb1250_regs.h>
35 #include <asm/sibyte/sb1250_dma.h>
36 #endif
37 
38 #include <asm/uasm.h>
39 
40 /* Registers used in the assembled routines. */
41 #define ZERO 0
42 #define AT 2
43 #define A0 4
44 #define A1 5
45 #define A2 6
46 #define T0 8
47 #define T1 9
48 #define T2 10
49 #define T3 11
50 #define T9 25
51 #define RA 31
52 
53 /* Handle labels (which must be positive integers). */
54 enum label_id {
55 	label_clear_nopref = 1,
56 	label_clear_pref,
57 	label_copy_nopref,
58 	label_copy_pref_both,
59 	label_copy_pref_store,
60 };
61 
62 UASM_L_LA(_clear_nopref)
63 UASM_L_LA(_clear_pref)
64 UASM_L_LA(_copy_nopref)
65 UASM_L_LA(_copy_pref_both)
66 UASM_L_LA(_copy_pref_store)
67 
68 /* We need one branch and therefore one relocation per target label. */
69 static struct uasm_label __cpuinitdata labels[5];
70 static struct uasm_reloc __cpuinitdata relocs[5];
71 
72 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
73 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 
75 static int pref_bias_clear_store __cpuinitdata;
76 static int pref_bias_copy_load __cpuinitdata;
77 static int pref_bias_copy_store __cpuinitdata;
78 
79 static u32 pref_src_mode __cpuinitdata;
80 static u32 pref_dst_mode __cpuinitdata;
81 
82 static int clear_word_size __cpuinitdata;
83 static int copy_word_size __cpuinitdata;
84 
85 static int half_clear_loop_size __cpuinitdata;
86 static int half_copy_loop_size __cpuinitdata;
87 
88 static int cache_line_size __cpuinitdata;
89 #define cache_line_mask() (cache_line_size - 1)
90 
91 static inline void __cpuinit
pg_addiu(u32 ** buf,unsigned int reg1,unsigned int reg2,unsigned int off)92 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
93 {
94 	if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
95 		if (off > 0x7fff) {
96 			uasm_i_lui(buf, T9, uasm_rel_hi(off));
97 			uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
98 		} else
99 			uasm_i_addiu(buf, T9, ZERO, off);
100 		uasm_i_daddu(buf, reg1, reg2, T9);
101 	} else {
102 #ifdef CONFIG_CPU_MIPSR6
103 		if (off > 0xff) {
104 #else
105 		if (off > 0x7fff) {
106 #endif
107 			uasm_i_lui(buf, T9, uasm_rel_hi(off));
108 			uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
109 			UASM_i_ADDU(buf, reg1, reg2, T9);
110 		} else
111 			UASM_i_ADDIU(buf, reg1, reg2, off);
112 	}
113 }
114 
115 static void __cpuinit set_prefetch_parameters(void)
116 {
117 	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
118 		clear_word_size = 8;
119 	else
120 		clear_word_size = 4;
121 
122 	if (cpu_has_64bit_gp_regs)
123 		copy_word_size = 8;
124 	else
125 		copy_word_size = 4;
126 
127 	/*
128 	 * The pref's used here are using "streaming" hints, which cause the
129 	 * copied data to be kicked out of the cache sooner.  A page copy often
130 	 * ends up copying a lot more data than is commonly used, so this seems
131 	 * to make sense in terms of reducing cache pollution, but I've no real
132 	 * performance data to back this up.
133 	 */
134 	if (cpu_has_prefetch) {
135 		/*
136 		 * XXX: Most prefetch bias values in here are based on
137 		 * guesswork.
138 		 */
139 		cache_line_size = cpu_dcache_line_size();
140 		switch (current_cpu_type()) {
141 		case CPU_R5500:
142 		case CPU_TX49XX:
143 			/* These processors only support the Pref_Load. */
144 			pref_bias_copy_load = 256;
145 			break;
146 
147 		case CPU_R10000:
148 		case CPU_R12000:
149 		case CPU_R14000:
150 			/*
151 			 * Those values have been experimentally tuned for an
152 			 * Origin 200.
153 			 */
154 			pref_bias_clear_store = 512;
155 			pref_bias_copy_load = 256;
156 			pref_bias_copy_store = 256;
157 			pref_src_mode = Pref_LoadStreamed;
158 			pref_dst_mode = Pref_StoreStreamed;
159 			break;
160 
161 		case CPU_SB1:
162 		case CPU_SB1A:
163 			pref_bias_clear_store = 128;
164 			pref_bias_copy_load = 128;
165 			pref_bias_copy_store = 128;
166 			/*
167 			 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
168 			 * hints are broken.
169 			 */
170 			if (current_cpu_type() == CPU_SB1 &&
171 			    (current_cpu_data.processor_id & 0xff) < 0x02) {
172 				pref_src_mode = Pref_Load;
173 				pref_dst_mode = Pref_Store;
174 			} else {
175 				pref_src_mode = Pref_LoadStreamed;
176 				pref_dst_mode = Pref_StoreStreamed;
177 			}
178 			break;
179 
180 		default:
181 			pref_bias_clear_store = 128;
182 			pref_bias_copy_load = 256;
183 			pref_bias_copy_store = 128;
184 			pref_src_mode = Pref_LoadStreamed;
185 			pref_dst_mode = Pref_PrepareForStore;
186 			break;
187 		}
188 	} else {
189 		if (cpu_has_cache_cdex_s)
190 			cache_line_size = cpu_scache_line_size();
191 		else if (cpu_has_cache_cdex_p)
192 			cache_line_size = cpu_dcache_line_size();
193 	}
194 	/*
195 	 * Too much unrolling will overflow the available space in
196 	 * clear_space_array / copy_page_array.
197 	 */
198 	half_clear_loop_size = min(16 * clear_word_size,
199 				   max(cache_line_size >> 1,
200 				       4 * clear_word_size));
201 	half_copy_loop_size = min(16 * copy_word_size,
202 				  max(cache_line_size >> 1,
203 				      4 * copy_word_size));
204 }
205 
206 static void __cpuinit build_clear_store(u32 **buf, int off)
207 {
208 	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
209 		uasm_i_sd(buf, ZERO, off, A0);
210 	} else {
211 		uasm_i_sw(buf, ZERO, off, A0);
212 	}
213 }
214 
215 static inline void __cpuinit build_clear_pref(u32 **buf, int off)
216 {
217 	if (off & cache_line_mask())
218 		return;
219 
220 	if (pref_bias_clear_store) {
221 		uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
222 			    A0);
223 	} else if (cache_line_size == (half_clear_loop_size << 1)) {
224 		if (cpu_has_cache_cdex_s) {
225 			uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
226 		} else if (cpu_has_cache_cdex_p) {
227 			if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
228 				uasm_i_nop(buf);
229 				uasm_i_nop(buf);
230 				uasm_i_nop(buf);
231 				uasm_i_nop(buf);
232 			}
233 
234 			if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
235 				uasm_i_lw(buf, ZERO, ZERO, AT);
236 
237 			uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
238 		}
239 		}
240 }
241 
242 extern u32 __clear_page_start;
243 extern u32 __clear_page_end;
244 extern u32 __copy_page_start;
245 extern u32 __copy_page_end;
246 
247 void __cpuinit build_clear_page(void)
248 {
249 	int off;
250 	u32 *buf = &__clear_page_start;
251 	struct uasm_label *l = labels;
252 	struct uasm_reloc *r = relocs;
253 	int i;
254 	static atomic_t run_once = ATOMIC_INIT(0);
255 
256 	if (atomic_xchg(&run_once, 1)) {
257 		return;
258 	}
259 
260 	memset(labels, 0, sizeof(labels));
261 	memset(relocs, 0, sizeof(relocs));
262 
263 	set_prefetch_parameters();
264 
265 	/*
266 	 * This algorithm makes the following assumptions:
267 	 *   - The prefetch bias is a multiple of 2 words.
268 	 *   - The prefetch bias is less than one page.
269 	 */
270 	BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
271 	BUG_ON(PAGE_SIZE < pref_bias_clear_store);
272 
273 	off = PAGE_SIZE - pref_bias_clear_store;
274 	if (off > 0xffff || !pref_bias_clear_store)
275 		pg_addiu(&buf, A2, A0, off);
276 	else
277 		uasm_i_ori(&buf, A2, A0, off);
278 
279 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
280 		uasm_i_lui(&buf, AT, 0xa000);
281 
282 	off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
283 				* cache_line_size : 0;
284 	while (off) {
285 		build_clear_pref(&buf, -off);
286 		off -= cache_line_size;
287 	}
288 	uasm_l_clear_pref(&l, buf);
289 	do {
290 		build_clear_pref(&buf, off);
291 		build_clear_store(&buf, off);
292 		off += clear_word_size;
293 	} while (off < half_clear_loop_size);
294 	pg_addiu(&buf, A0, A0, 2 * off);
295 	off = -off;
296 	do {
297 		build_clear_pref(&buf, off);
298 		if (off == -clear_word_size)
299 			uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
300 		build_clear_store(&buf, off);
301 		off += clear_word_size;
302 	} while (off < 0);
303 
304 	if (pref_bias_clear_store) {
305 		pg_addiu(&buf, A2, A0, pref_bias_clear_store);
306 		uasm_l_clear_nopref(&l, buf);
307 		off = 0;
308 		do {
309 			build_clear_store(&buf, off);
310 			off += clear_word_size;
311 		} while (off < half_clear_loop_size);
312 		pg_addiu(&buf, A0, A0, 2 * off);
313 		off = -off;
314 		do {
315 			if (off == -clear_word_size)
316 				uasm_il_bne(&buf, &r, A0, A2,
317 					    label_clear_nopref);
318 			build_clear_store(&buf, off);
319 			off += clear_word_size;
320 		} while (off < 0);
321 	}
322 
323 	uasm_i_jr(&buf, RA);
324 	uasm_i_nop(&buf);
325 
326 	BUG_ON(buf > &__clear_page_end);
327 
328 	uasm_resolve_relocs(relocs, labels);
329 
330 	pr_debug("Synthesized clear page handler (%u instructions).\n",
331 		 (u32)(buf - &__clear_page_start));
332 
333 	pr_debug("\t.set push\n");
334 	pr_debug("\t.set noreorder\n");
335 	for (i = 0; i < (buf - &__clear_page_start); i++)
336 		pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
337 	pr_debug("\t.set pop\n");
338 }
339 
340 static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
341 {
342 	if (cpu_has_64bit_gp_regs) {
343 		uasm_i_ld(buf, reg, off, A1);
344 	} else {
345 		uasm_i_lw(buf, reg, off, A1);
346 	}
347 }
348 
349 static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
350 {
351 	if (cpu_has_64bit_gp_regs) {
352 		uasm_i_sd(buf, reg, off, A0);
353 	} else {
354 		uasm_i_sw(buf, reg, off, A0);
355 	}
356 }
357 
358 static inline void build_copy_load_pref(u32 **buf, int off)
359 {
360 	if (off & cache_line_mask())
361 		return;
362 
363 	if (pref_bias_copy_load)
364 		uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
365 }
366 
367 static inline void build_copy_store_pref(u32 **buf, int off)
368 {
369 	if (off & cache_line_mask())
370 		return;
371 
372 	if (pref_bias_copy_store) {
373 		uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
374 			    A0);
375 	} else if (cache_line_size == (half_copy_loop_size << 1)) {
376 		if (cpu_has_cache_cdex_s) {
377 			uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
378 		} else if (cpu_has_cache_cdex_p) {
379 			if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
380 				uasm_i_nop(buf);
381 				uasm_i_nop(buf);
382 				uasm_i_nop(buf);
383 				uasm_i_nop(buf);
384 			}
385 
386 			if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
387 				uasm_i_lw(buf, ZERO, ZERO, AT);
388 
389 			uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
390 		}
391 	}
392 }
393 
394 void __cpuinit build_copy_page(void)
395 {
396 	int off;
397 	u32 *buf = &__copy_page_start;
398 	struct uasm_label *l = labels;
399 	struct uasm_reloc *r = relocs;
400 	int i;
401 	static atomic_t run_once = ATOMIC_INIT(0);
402 
403 	if (atomic_xchg(&run_once, 1)) {
404 		return;
405 	}
406 
407 	memset(labels, 0, sizeof(labels));
408 	memset(relocs, 0, sizeof(relocs));
409 
410 	set_prefetch_parameters();
411 
412 	/*
413 	 * This algorithm makes the following assumptions:
414 	 *   - All prefetch biases are multiples of 8 words.
415 	 *   - The prefetch biases are less than one page.
416 	 *   - The store prefetch bias isn't greater than the load
417 	 *     prefetch bias.
418 	 */
419 	BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
420 	BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
421 	BUG_ON(PAGE_SIZE < pref_bias_copy_load);
422 	BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
423 
424 	off = PAGE_SIZE - pref_bias_copy_load;
425 	if (off > 0xffff || !pref_bias_copy_load)
426 		pg_addiu(&buf, A2, A0, off);
427 	else
428 		uasm_i_ori(&buf, A2, A0, off);
429 
430 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
431 		uasm_i_lui(&buf, AT, 0xa000);
432 
433 	off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
434 				cache_line_size : 0;
435 	while (off) {
436 		build_copy_load_pref(&buf, -off);
437 		off -= cache_line_size;
438 	}
439 	off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
440 				cache_line_size : 0;
441 	while (off) {
442 		build_copy_store_pref(&buf, -off);
443 		off -= cache_line_size;
444 	}
445 	uasm_l_copy_pref_both(&l, buf);
446 	do {
447 		build_copy_load_pref(&buf, off);
448 		build_copy_load(&buf, T0, off);
449 		build_copy_load_pref(&buf, off + copy_word_size);
450 		build_copy_load(&buf, T1, off + copy_word_size);
451 		build_copy_load_pref(&buf, off + 2 * copy_word_size);
452 		build_copy_load(&buf, T2, off + 2 * copy_word_size);
453 		build_copy_load_pref(&buf, off + 3 * copy_word_size);
454 		build_copy_load(&buf, T3, off + 3 * copy_word_size);
455 		build_copy_store_pref(&buf, off);
456 		build_copy_store(&buf, T0, off);
457 		build_copy_store_pref(&buf, off + copy_word_size);
458 		build_copy_store(&buf, T1, off + copy_word_size);
459 		build_copy_store_pref(&buf, off + 2 * copy_word_size);
460 		build_copy_store(&buf, T2, off + 2 * copy_word_size);
461 		build_copy_store_pref(&buf, off + 3 * copy_word_size);
462 		build_copy_store(&buf, T3, off + 3 * copy_word_size);
463 		off += 4 * copy_word_size;
464 	} while (off < half_copy_loop_size);
465 	pg_addiu(&buf, A1, A1, 2 * off);
466 	pg_addiu(&buf, A0, A0, 2 * off);
467 	off = -off;
468 	do {
469 		build_copy_load_pref(&buf, off);
470 		build_copy_load(&buf, T0, off);
471 		build_copy_load_pref(&buf, off + copy_word_size);
472 		build_copy_load(&buf, T1, off + copy_word_size);
473 		build_copy_load_pref(&buf, off + 2 * copy_word_size);
474 		build_copy_load(&buf, T2, off + 2 * copy_word_size);
475 		build_copy_load_pref(&buf, off + 3 * copy_word_size);
476 		build_copy_load(&buf, T3, off + 3 * copy_word_size);
477 		build_copy_store_pref(&buf, off);
478 		build_copy_store(&buf, T0, off);
479 		build_copy_store_pref(&buf, off + copy_word_size);
480 		build_copy_store(&buf, T1, off + copy_word_size);
481 		build_copy_store_pref(&buf, off + 2 * copy_word_size);
482 		build_copy_store(&buf, T2, off + 2 * copy_word_size);
483 		build_copy_store_pref(&buf, off + 3 * copy_word_size);
484 		if (off == -(4 * copy_word_size))
485 			uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
486 		build_copy_store(&buf, T3, off + 3 * copy_word_size);
487 		off += 4 * copy_word_size;
488 	} while (off < 0);
489 
490 	if (pref_bias_copy_load - pref_bias_copy_store) {
491 		pg_addiu(&buf, A2, A0,
492 			 pref_bias_copy_load - pref_bias_copy_store);
493 		uasm_l_copy_pref_store(&l, buf);
494 		off = 0;
495 		do {
496 			build_copy_load(&buf, T0, off);
497 			build_copy_load(&buf, T1, off + copy_word_size);
498 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
499 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
500 			build_copy_store_pref(&buf, off);
501 			build_copy_store(&buf, T0, off);
502 			build_copy_store_pref(&buf, off + copy_word_size);
503 			build_copy_store(&buf, T1, off + copy_word_size);
504 			build_copy_store_pref(&buf, off + 2 * copy_word_size);
505 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
506 			build_copy_store_pref(&buf, off + 3 * copy_word_size);
507 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
508 			off += 4 * copy_word_size;
509 		} while (off < half_copy_loop_size);
510 		pg_addiu(&buf, A1, A1, 2 * off);
511 		pg_addiu(&buf, A0, A0, 2 * off);
512 		off = -off;
513 		do {
514 			build_copy_load(&buf, T0, off);
515 			build_copy_load(&buf, T1, off + copy_word_size);
516 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
517 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
518 			build_copy_store_pref(&buf, off);
519 			build_copy_store(&buf, T0, off);
520 			build_copy_store_pref(&buf, off + copy_word_size);
521 			build_copy_store(&buf, T1, off + copy_word_size);
522 			build_copy_store_pref(&buf, off + 2 * copy_word_size);
523 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
524 			build_copy_store_pref(&buf, off + 3 * copy_word_size);
525 			if (off == -(4 * copy_word_size))
526 				uasm_il_bne(&buf, &r, A2, A0,
527 					    label_copy_pref_store);
528 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
529 			off += 4 * copy_word_size;
530 		} while (off < 0);
531 	}
532 
533 	if (pref_bias_copy_store) {
534 		pg_addiu(&buf, A2, A0, pref_bias_copy_store);
535 		uasm_l_copy_nopref(&l, buf);
536 		off = 0;
537 		do {
538 			build_copy_load(&buf, T0, off);
539 			build_copy_load(&buf, T1, off + copy_word_size);
540 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
541 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
542 			build_copy_store(&buf, T0, off);
543 			build_copy_store(&buf, T1, off + copy_word_size);
544 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
545 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
546 			off += 4 * copy_word_size;
547 		} while (off < half_copy_loop_size);
548 		pg_addiu(&buf, A1, A1, 2 * off);
549 		pg_addiu(&buf, A0, A0, 2 * off);
550 		off = -off;
551 		do {
552 			build_copy_load(&buf, T0, off);
553 			build_copy_load(&buf, T1, off + copy_word_size);
554 			build_copy_load(&buf, T2, off + 2 * copy_word_size);
555 			build_copy_load(&buf, T3, off + 3 * copy_word_size);
556 			build_copy_store(&buf, T0, off);
557 			build_copy_store(&buf, T1, off + copy_word_size);
558 			build_copy_store(&buf, T2, off + 2 * copy_word_size);
559 			if (off == -(4 * copy_word_size))
560 				uasm_il_bne(&buf, &r, A2, A0,
561 					    label_copy_nopref);
562 			build_copy_store(&buf, T3, off + 3 * copy_word_size);
563 			off += 4 * copy_word_size;
564 		} while (off < 0);
565 	}
566 
567 	uasm_i_jr(&buf, RA);
568 	uasm_i_nop(&buf);
569 
570 	BUG_ON(buf > &__copy_page_end);
571 
572 	uasm_resolve_relocs(relocs, labels);
573 
574 	pr_debug("Synthesized copy page handler (%u instructions).\n",
575 		 (u32)(buf - &__copy_page_start));
576 
577 	pr_debug("\t.set push\n");
578 	pr_debug("\t.set noreorder\n");
579 	for (i = 0; i < (buf - &__copy_page_start); i++)
580 		pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
581 	pr_debug("\t.set pop\n");
582 }
583 
584 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
585 extern void clear_page_cpu(void *page);
586 extern void copy_page_cpu(void *to, void *from);
587 
588 /*
589  * Pad descriptors to cacheline, since each is exclusively owned by a
590  * particular CPU.
591  */
592 struct dmadscr {
593 	u64 dscr_a;
594 	u64 dscr_b;
595 	u64 pad_a;
596 	u64 pad_b;
597 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
598 
599 void sb1_dma_init(void)
600 {
601 	int i;
602 
603 	for (i = 0; i < DM_NUM_CHANNELS; i++) {
604 		const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
605 				     V_DM_DSCR_BASE_RINGSZ(1);
606 		void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
607 
608 		__raw_writeq(base_val, base_reg);
609 		__raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
610 		__raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
611 	}
612 }
613 
614 void clear_page(void *page)
615 {
616 	u64 to_phys = CPHYSADDR((unsigned long)page);
617 	unsigned int cpu = smp_processor_id();
618 
619 	/* if the page is not in KSEG0, use old way */
620 	if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
621 		return clear_page_cpu(page);
622 
623 	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
624 				 M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
625 	page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
626 	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
627 
628 	/*
629 	 * Don't really want to do it this way, but there's no
630 	 * reliable way to delay completion detection.
631 	 */
632 	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
633 		 & M_DM_DSCR_BASE_INTERRUPT))
634 		;
635 	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
636 }
637 
638 void copy_page(void *to, void *from)
639 {
640 	u64 from_phys = CPHYSADDR((unsigned long)from);
641 	u64 to_phys = CPHYSADDR((unsigned long)to);
642 	unsigned int cpu = smp_processor_id();
643 
644 	/* if any page is not in KSEG0, use old way */
645 	if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
646 	    || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
647 		return copy_page_cpu(to, from);
648 
649 	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
650 				 M_DM_DSCRA_INTERRUPT;
651 	page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
652 	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
653 
654 	/*
655 	 * Don't really want to do it this way, but there's no
656 	 * reliable way to delay completion detection.
657 	 */
658 	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
659 		 & M_DM_DSCR_BASE_INTERRUPT))
660 		;
661 	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
662 }
663 
664 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
665