1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4 /*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 */
30
31 #include <linux/pci.h>
32 #include <linux/slab.h>
33 #include <linux/uaccess.h>
34
35 #include <drm/drm_device.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_print.h>
38 #include <drm/r128_drm.h>
39
40 #include "r128_drv.h"
41
42 /* ================================================================
43 * CCE hardware state programming functions
44 */
45
r128_emit_clip_rects(drm_r128_private_t * dev_priv,struct drm_clip_rect * boxes,int count)46 static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
47 struct drm_clip_rect *boxes, int count)
48 {
49 u32 aux_sc_cntl = 0x00000000;
50 RING_LOCALS;
51 DRM_DEBUG("\n");
52
53 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
54
55 if (count >= 1) {
56 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
57 OUT_RING(boxes[0].x1);
58 OUT_RING(boxes[0].x2 - 1);
59 OUT_RING(boxes[0].y1);
60 OUT_RING(boxes[0].y2 - 1);
61
62 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
63 }
64 if (count >= 2) {
65 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
66 OUT_RING(boxes[1].x1);
67 OUT_RING(boxes[1].x2 - 1);
68 OUT_RING(boxes[1].y1);
69 OUT_RING(boxes[1].y2 - 1);
70
71 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
72 }
73 if (count >= 3) {
74 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
75 OUT_RING(boxes[2].x1);
76 OUT_RING(boxes[2].x2 - 1);
77 OUT_RING(boxes[2].y1);
78 OUT_RING(boxes[2].y2 - 1);
79
80 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
81 }
82
83 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
84 OUT_RING(aux_sc_cntl);
85
86 ADVANCE_RING();
87 }
88
r128_emit_core(drm_r128_private_t * dev_priv)89 static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
90 {
91 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
92 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
93 RING_LOCALS;
94 DRM_DEBUG("\n");
95
96 BEGIN_RING(2);
97
98 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
99 OUT_RING(ctx->scale_3d_cntl);
100
101 ADVANCE_RING();
102 }
103
r128_emit_context(drm_r128_private_t * dev_priv)104 static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
105 {
106 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
107 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
108 RING_LOCALS;
109 DRM_DEBUG("\n");
110
111 BEGIN_RING(13);
112
113 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
114 OUT_RING(ctx->dst_pitch_offset_c);
115 OUT_RING(ctx->dp_gui_master_cntl_c);
116 OUT_RING(ctx->sc_top_left_c);
117 OUT_RING(ctx->sc_bottom_right_c);
118 OUT_RING(ctx->z_offset_c);
119 OUT_RING(ctx->z_pitch_c);
120 OUT_RING(ctx->z_sten_cntl_c);
121 OUT_RING(ctx->tex_cntl_c);
122 OUT_RING(ctx->misc_3d_state_cntl_reg);
123 OUT_RING(ctx->texture_clr_cmp_clr_c);
124 OUT_RING(ctx->texture_clr_cmp_msk_c);
125 OUT_RING(ctx->fog_color_c);
126
127 ADVANCE_RING();
128 }
129
r128_emit_setup(drm_r128_private_t * dev_priv)130 static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
131 {
132 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
133 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
134 RING_LOCALS;
135 DRM_DEBUG("\n");
136
137 BEGIN_RING(3);
138
139 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
140 OUT_RING(ctx->setup_cntl);
141 OUT_RING(ctx->pm4_vc_fpu_setup);
142
143 ADVANCE_RING();
144 }
145
r128_emit_masks(drm_r128_private_t * dev_priv)146 static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
147 {
148 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
149 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
150 RING_LOCALS;
151 DRM_DEBUG("\n");
152
153 BEGIN_RING(5);
154
155 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
156 OUT_RING(ctx->dp_write_mask);
157
158 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
159 OUT_RING(ctx->sten_ref_mask_c);
160 OUT_RING(ctx->plane_3d_mask_c);
161
162 ADVANCE_RING();
163 }
164
r128_emit_window(drm_r128_private_t * dev_priv)165 static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
166 {
167 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
168 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
169 RING_LOCALS;
170 DRM_DEBUG("\n");
171
172 BEGIN_RING(2);
173
174 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
175 OUT_RING(ctx->window_xy_offset);
176
177 ADVANCE_RING();
178 }
179
r128_emit_tex0(drm_r128_private_t * dev_priv)180 static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
181 {
182 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
183 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
184 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
185 int i;
186 RING_LOCALS;
187 DRM_DEBUG("\n");
188
189 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
190
191 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
192 2 + R128_MAX_TEXTURE_LEVELS));
193 OUT_RING(tex->tex_cntl);
194 OUT_RING(tex->tex_combine_cntl);
195 OUT_RING(ctx->tex_size_pitch_c);
196 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
197 OUT_RING(tex->tex_offset[i]);
198
199 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
200 OUT_RING(ctx->constant_color_c);
201 OUT_RING(tex->tex_border_color);
202
203 ADVANCE_RING();
204 }
205
r128_emit_tex1(drm_r128_private_t * dev_priv)206 static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
207 {
208 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
209 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
210 int i;
211 RING_LOCALS;
212 DRM_DEBUG("\n");
213
214 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
215
216 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
217 OUT_RING(tex->tex_cntl);
218 OUT_RING(tex->tex_combine_cntl);
219 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
220 OUT_RING(tex->tex_offset[i]);
221
222 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
223 OUT_RING(tex->tex_border_color);
224
225 ADVANCE_RING();
226 }
227
r128_emit_state(drm_r128_private_t * dev_priv)228 static void r128_emit_state(drm_r128_private_t *dev_priv)
229 {
230 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
231 unsigned int dirty = sarea_priv->dirty;
232
233 DRM_DEBUG("dirty=0x%08x\n", dirty);
234
235 if (dirty & R128_UPLOAD_CORE) {
236 r128_emit_core(dev_priv);
237 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
238 }
239
240 if (dirty & R128_UPLOAD_CONTEXT) {
241 r128_emit_context(dev_priv);
242 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
243 }
244
245 if (dirty & R128_UPLOAD_SETUP) {
246 r128_emit_setup(dev_priv);
247 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
248 }
249
250 if (dirty & R128_UPLOAD_MASKS) {
251 r128_emit_masks(dev_priv);
252 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
253 }
254
255 if (dirty & R128_UPLOAD_WINDOW) {
256 r128_emit_window(dev_priv);
257 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
258 }
259
260 if (dirty & R128_UPLOAD_TEX0) {
261 r128_emit_tex0(dev_priv);
262 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
263 }
264
265 if (dirty & R128_UPLOAD_TEX1) {
266 r128_emit_tex1(dev_priv);
267 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
268 }
269
270 /* Turn off the texture cache flushing */
271 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
272
273 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
274 }
275
276 #if R128_PERFORMANCE_BOXES
277 /* ================================================================
278 * Performance monitoring functions
279 */
280
r128_clear_box(drm_r128_private_t * dev_priv,int x,int y,int w,int h,int r,int g,int b)281 static void r128_clear_box(drm_r128_private_t *dev_priv,
282 int x, int y, int w, int h, int r, int g, int b)
283 {
284 u32 pitch, offset;
285 u32 fb_bpp, color;
286 RING_LOCALS;
287
288 switch (dev_priv->fb_bpp) {
289 case 16:
290 fb_bpp = R128_GMC_DST_16BPP;
291 color = (((r & 0xf8) << 8) |
292 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
293 break;
294 case 24:
295 fb_bpp = R128_GMC_DST_24BPP;
296 color = ((r << 16) | (g << 8) | b);
297 break;
298 case 32:
299 fb_bpp = R128_GMC_DST_32BPP;
300 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
301 break;
302 default:
303 return;
304 }
305
306 offset = dev_priv->back_offset;
307 pitch = dev_priv->back_pitch >> 3;
308
309 BEGIN_RING(6);
310
311 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
312 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
313 R128_GMC_BRUSH_SOLID_COLOR |
314 fb_bpp |
315 R128_GMC_SRC_DATATYPE_COLOR |
316 R128_ROP3_P |
317 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
318
319 OUT_RING((pitch << 21) | (offset >> 5));
320 OUT_RING(color);
321
322 OUT_RING((x << 16) | y);
323 OUT_RING((w << 16) | h);
324
325 ADVANCE_RING();
326 }
327
r128_cce_performance_boxes(drm_r128_private_t * dev_priv)328 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
329 {
330 if (atomic_read(&dev_priv->idle_count) == 0)
331 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
332 else
333 atomic_set(&dev_priv->idle_count, 0);
334 }
335
336 #endif
337
338 /* ================================================================
339 * CCE command dispatch functions
340 */
341
r128_print_dirty(const char * msg,unsigned int flags)342 static void r128_print_dirty(const char *msg, unsigned int flags)
343 {
344 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
345 msg,
346 flags,
347 (flags & R128_UPLOAD_CORE) ? "core, " : "",
348 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
349 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
350 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
351 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
352 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
353 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
354 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
355 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
356 }
357
r128_cce_dispatch_clear(struct drm_device * dev,drm_r128_clear_t * clear)358 static void r128_cce_dispatch_clear(struct drm_device *dev,
359 drm_r128_clear_t *clear)
360 {
361 drm_r128_private_t *dev_priv = dev->dev_private;
362 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
363 int nbox = sarea_priv->nbox;
364 struct drm_clip_rect *pbox = sarea_priv->boxes;
365 unsigned int flags = clear->flags;
366 int i;
367 RING_LOCALS;
368 DRM_DEBUG("\n");
369
370 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
371 unsigned int tmp = flags;
372
373 flags &= ~(R128_FRONT | R128_BACK);
374 if (tmp & R128_FRONT)
375 flags |= R128_BACK;
376 if (tmp & R128_BACK)
377 flags |= R128_FRONT;
378 }
379
380 for (i = 0; i < nbox; i++) {
381 int x = pbox[i].x1;
382 int y = pbox[i].y1;
383 int w = pbox[i].x2 - x;
384 int h = pbox[i].y2 - y;
385
386 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
387 pbox[i].x1, pbox[i].y1, pbox[i].x2,
388 pbox[i].y2, flags);
389
390 if (flags & (R128_FRONT | R128_BACK)) {
391 BEGIN_RING(2);
392
393 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
394 OUT_RING(clear->color_mask);
395
396 ADVANCE_RING();
397 }
398
399 if (flags & R128_FRONT) {
400 BEGIN_RING(6);
401
402 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
403 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
404 R128_GMC_BRUSH_SOLID_COLOR |
405 (dev_priv->color_fmt << 8) |
406 R128_GMC_SRC_DATATYPE_COLOR |
407 R128_ROP3_P |
408 R128_GMC_CLR_CMP_CNTL_DIS |
409 R128_GMC_AUX_CLIP_DIS);
410
411 OUT_RING(dev_priv->front_pitch_offset_c);
412 OUT_RING(clear->clear_color);
413
414 OUT_RING((x << 16) | y);
415 OUT_RING((w << 16) | h);
416
417 ADVANCE_RING();
418 }
419
420 if (flags & R128_BACK) {
421 BEGIN_RING(6);
422
423 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
424 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
425 R128_GMC_BRUSH_SOLID_COLOR |
426 (dev_priv->color_fmt << 8) |
427 R128_GMC_SRC_DATATYPE_COLOR |
428 R128_ROP3_P |
429 R128_GMC_CLR_CMP_CNTL_DIS |
430 R128_GMC_AUX_CLIP_DIS);
431
432 OUT_RING(dev_priv->back_pitch_offset_c);
433 OUT_RING(clear->clear_color);
434
435 OUT_RING((x << 16) | y);
436 OUT_RING((w << 16) | h);
437
438 ADVANCE_RING();
439 }
440
441 if (flags & R128_DEPTH) {
442 BEGIN_RING(6);
443
444 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
445 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
446 R128_GMC_BRUSH_SOLID_COLOR |
447 (dev_priv->depth_fmt << 8) |
448 R128_GMC_SRC_DATATYPE_COLOR |
449 R128_ROP3_P |
450 R128_GMC_CLR_CMP_CNTL_DIS |
451 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
452
453 OUT_RING(dev_priv->depth_pitch_offset_c);
454 OUT_RING(clear->clear_depth);
455
456 OUT_RING((x << 16) | y);
457 OUT_RING((w << 16) | h);
458
459 ADVANCE_RING();
460 }
461 }
462 }
463
r128_cce_dispatch_swap(struct drm_device * dev)464 static void r128_cce_dispatch_swap(struct drm_device *dev)
465 {
466 drm_r128_private_t *dev_priv = dev->dev_private;
467 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
468 int nbox = sarea_priv->nbox;
469 struct drm_clip_rect *pbox = sarea_priv->boxes;
470 int i;
471 RING_LOCALS;
472 DRM_DEBUG("\n");
473
474 #if R128_PERFORMANCE_BOXES
475 /* Do some trivial performance monitoring...
476 */
477 r128_cce_performance_boxes(dev_priv);
478 #endif
479
480 for (i = 0; i < nbox; i++) {
481 int x = pbox[i].x1;
482 int y = pbox[i].y1;
483 int w = pbox[i].x2 - x;
484 int h = pbox[i].y2 - y;
485
486 BEGIN_RING(7);
487
488 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
489 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
490 R128_GMC_DST_PITCH_OFFSET_CNTL |
491 R128_GMC_BRUSH_NONE |
492 (dev_priv->color_fmt << 8) |
493 R128_GMC_SRC_DATATYPE_COLOR |
494 R128_ROP3_S |
495 R128_DP_SRC_SOURCE_MEMORY |
496 R128_GMC_CLR_CMP_CNTL_DIS |
497 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
498
499 /* Make this work even if front & back are flipped:
500 */
501 if (dev_priv->current_page == 0) {
502 OUT_RING(dev_priv->back_pitch_offset_c);
503 OUT_RING(dev_priv->front_pitch_offset_c);
504 } else {
505 OUT_RING(dev_priv->front_pitch_offset_c);
506 OUT_RING(dev_priv->back_pitch_offset_c);
507 }
508
509 OUT_RING((x << 16) | y);
510 OUT_RING((x << 16) | y);
511 OUT_RING((w << 16) | h);
512
513 ADVANCE_RING();
514 }
515
516 /* Increment the frame counter. The client-side 3D driver must
517 * throttle the framerate by waiting for this value before
518 * performing the swapbuffer ioctl.
519 */
520 dev_priv->sarea_priv->last_frame++;
521
522 BEGIN_RING(2);
523
524 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
525 OUT_RING(dev_priv->sarea_priv->last_frame);
526
527 ADVANCE_RING();
528 }
529
r128_cce_dispatch_flip(struct drm_device * dev)530 static void r128_cce_dispatch_flip(struct drm_device *dev)
531 {
532 drm_r128_private_t *dev_priv = dev->dev_private;
533 RING_LOCALS;
534 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
535 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
536
537 #if R128_PERFORMANCE_BOXES
538 /* Do some trivial performance monitoring...
539 */
540 r128_cce_performance_boxes(dev_priv);
541 #endif
542
543 BEGIN_RING(4);
544
545 R128_WAIT_UNTIL_PAGE_FLIPPED();
546 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
547
548 if (dev_priv->current_page == 0)
549 OUT_RING(dev_priv->back_offset);
550 else
551 OUT_RING(dev_priv->front_offset);
552
553 ADVANCE_RING();
554
555 /* Increment the frame counter. The client-side 3D driver must
556 * throttle the framerate by waiting for this value before
557 * performing the swapbuffer ioctl.
558 */
559 dev_priv->sarea_priv->last_frame++;
560 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
561 1 - dev_priv->current_page;
562
563 BEGIN_RING(2);
564
565 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
566 OUT_RING(dev_priv->sarea_priv->last_frame);
567
568 ADVANCE_RING();
569 }
570
r128_cce_dispatch_vertex(struct drm_device * dev,struct drm_buf * buf)571 static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
572 {
573 drm_r128_private_t *dev_priv = dev->dev_private;
574 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
575 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
576 int format = sarea_priv->vc_format;
577 int offset = buf->bus_address;
578 int size = buf->used;
579 int prim = buf_priv->prim;
580 int i = 0;
581 RING_LOCALS;
582 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
583
584 if (0)
585 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
586
587 if (buf->used) {
588 buf_priv->dispatched = 1;
589
590 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
591 r128_emit_state(dev_priv);
592
593 do {
594 /* Emit the next set of up to three cliprects */
595 if (i < sarea_priv->nbox) {
596 r128_emit_clip_rects(dev_priv,
597 &sarea_priv->boxes[i],
598 sarea_priv->nbox - i);
599 }
600
601 /* Emit the vertex buffer rendering commands */
602 BEGIN_RING(5);
603
604 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
605 OUT_RING(offset);
606 OUT_RING(size);
607 OUT_RING(format);
608 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
609 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
610
611 ADVANCE_RING();
612
613 i += 3;
614 } while (i < sarea_priv->nbox);
615 }
616
617 if (buf_priv->discard) {
618 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
619
620 /* Emit the vertex buffer age */
621 BEGIN_RING(2);
622
623 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
624 OUT_RING(buf_priv->age);
625
626 ADVANCE_RING();
627
628 buf->pending = 1;
629 buf->used = 0;
630 /* FIXME: Check dispatched field */
631 buf_priv->dispatched = 0;
632 }
633
634 dev_priv->sarea_priv->last_dispatch++;
635
636 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
637 sarea_priv->nbox = 0;
638 }
639
r128_cce_dispatch_indirect(struct drm_device * dev,struct drm_buf * buf,int start,int end)640 static void r128_cce_dispatch_indirect(struct drm_device *dev,
641 struct drm_buf *buf, int start, int end)
642 {
643 drm_r128_private_t *dev_priv = dev->dev_private;
644 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
645 RING_LOCALS;
646 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
647
648 if (start != end) {
649 int offset = buf->bus_address + start;
650 int dwords = (end - start + 3) / sizeof(u32);
651
652 /* Indirect buffer data must be an even number of
653 * dwords, so if we've been given an odd number we must
654 * pad the data with a Type-2 CCE packet.
655 */
656 if (dwords & 1) {
657 u32 *data = (u32 *)
658 ((char *)dev->agp_buffer_map->handle
659 + buf->offset + start);
660 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
661 }
662
663 buf_priv->dispatched = 1;
664
665 /* Fire off the indirect buffer */
666 BEGIN_RING(3);
667
668 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
669 OUT_RING(offset);
670 OUT_RING(dwords);
671
672 ADVANCE_RING();
673 }
674
675 if (buf_priv->discard) {
676 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
677
678 /* Emit the indirect buffer age */
679 BEGIN_RING(2);
680
681 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
682 OUT_RING(buf_priv->age);
683
684 ADVANCE_RING();
685
686 buf->pending = 1;
687 buf->used = 0;
688 /* FIXME: Check dispatched field */
689 buf_priv->dispatched = 0;
690 }
691
692 dev_priv->sarea_priv->last_dispatch++;
693 }
694
r128_cce_dispatch_indices(struct drm_device * dev,struct drm_buf * buf,int start,int end,int count)695 static void r128_cce_dispatch_indices(struct drm_device *dev,
696 struct drm_buf *buf,
697 int start, int end, int count)
698 {
699 drm_r128_private_t *dev_priv = dev->dev_private;
700 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
701 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
702 int format = sarea_priv->vc_format;
703 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
704 int prim = buf_priv->prim;
705 u32 *data;
706 int dwords;
707 int i = 0;
708 RING_LOCALS;
709 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
710
711 if (0)
712 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
713
714 if (start != end) {
715 buf_priv->dispatched = 1;
716
717 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
718 r128_emit_state(dev_priv);
719
720 dwords = (end - start + 3) / sizeof(u32);
721
722 data = (u32 *) ((char *)dev->agp_buffer_map->handle
723 + buf->offset + start);
724
725 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
726 dwords - 2));
727
728 data[1] = cpu_to_le32(offset);
729 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
730 data[3] = cpu_to_le32(format);
731 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
732 (count << 16)));
733
734 if (count & 0x1) {
735 #ifdef __LITTLE_ENDIAN
736 data[dwords - 1] &= 0x0000ffff;
737 #else
738 data[dwords - 1] &= 0xffff0000;
739 #endif
740 }
741
742 do {
743 /* Emit the next set of up to three cliprects */
744 if (i < sarea_priv->nbox) {
745 r128_emit_clip_rects(dev_priv,
746 &sarea_priv->boxes[i],
747 sarea_priv->nbox - i);
748 }
749
750 r128_cce_dispatch_indirect(dev, buf, start, end);
751
752 i += 3;
753 } while (i < sarea_priv->nbox);
754 }
755
756 if (buf_priv->discard) {
757 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
758
759 /* Emit the vertex buffer age */
760 BEGIN_RING(2);
761
762 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
763 OUT_RING(buf_priv->age);
764
765 ADVANCE_RING();
766
767 buf->pending = 1;
768 /* FIXME: Check dispatched field */
769 buf_priv->dispatched = 0;
770 }
771
772 dev_priv->sarea_priv->last_dispatch++;
773
774 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
775 sarea_priv->nbox = 0;
776 }
777
r128_cce_dispatch_blit(struct drm_device * dev,struct drm_file * file_priv,drm_r128_blit_t * blit)778 static int r128_cce_dispatch_blit(struct drm_device *dev,
779 struct drm_file *file_priv,
780 drm_r128_blit_t *blit)
781 {
782 drm_r128_private_t *dev_priv = dev->dev_private;
783 struct drm_device_dma *dma = dev->dma;
784 struct drm_buf *buf;
785 drm_r128_buf_priv_t *buf_priv;
786 u32 *data;
787 int dword_shift, dwords;
788 RING_LOCALS;
789 DRM_DEBUG("\n");
790
791 /* The compiler won't optimize away a division by a variable,
792 * even if the only legal values are powers of two. Thus, we'll
793 * use a shift instead.
794 */
795 switch (blit->format) {
796 case R128_DATATYPE_ARGB8888:
797 dword_shift = 0;
798 break;
799 case R128_DATATYPE_ARGB1555:
800 case R128_DATATYPE_RGB565:
801 case R128_DATATYPE_ARGB4444:
802 case R128_DATATYPE_YVYU422:
803 case R128_DATATYPE_VYUY422:
804 dword_shift = 1;
805 break;
806 case R128_DATATYPE_CI8:
807 case R128_DATATYPE_RGB8:
808 dword_shift = 2;
809 break;
810 default:
811 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return -EINVAL;
813 }
814
815 /* Flush the pixel cache, and mark the contents as Read Invalid.
816 * This ensures no pixel data gets mixed up with the texture
817 * data from the host data blit, otherwise part of the texture
818 * image may be corrupted.
819 */
820 BEGIN_RING(2);
821
822 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
823 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
824
825 ADVANCE_RING();
826
827 /* Dispatch the indirect buffer.
828 */
829 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private;
831
832 if (buf->file_priv != file_priv) {
833 DRM_ERROR("process %d using buffer owned by %p\n",
834 task_pid_nr(current), buf->file_priv);
835 return -EINVAL;
836 }
837 if (buf->pending) {
838 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return -EINVAL;
840 }
841
842 buf_priv->discard = 1;
843
844 dwords = (blit->width * blit->height) >> dword_shift;
845
846 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
847
848 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
849 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
850 R128_GMC_BRUSH_NONE |
851 (blit->format << 8) |
852 R128_GMC_SRC_DATATYPE_COLOR |
853 R128_ROP3_S |
854 R128_DP_SRC_SOURCE_HOST_DATA |
855 R128_GMC_CLR_CMP_CNTL_DIS |
856 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
857
858 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
859 data[3] = cpu_to_le32(0xffffffff);
860 data[4] = cpu_to_le32(0xffffffff);
861 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
862 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
863 data[7] = cpu_to_le32(dwords);
864
865 buf->used = (dwords + 8) * sizeof(u32);
866
867 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
868
869 /* Flush the pixel cache after the blit completes. This ensures
870 * the texture data is written out to memory before rendering
871 * continues.
872 */
873 BEGIN_RING(2);
874
875 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
876 OUT_RING(R128_PC_FLUSH_GUI);
877
878 ADVANCE_RING();
879
880 return 0;
881 }
882
883 /* ================================================================
884 * Tiled depth buffer management
885 *
886 * FIXME: These should all set the destination write mask for when we
887 * have hardware stencil support.
888 */
889
r128_cce_dispatch_write_span(struct drm_device * dev,drm_r128_depth_t * depth)890 static int r128_cce_dispatch_write_span(struct drm_device *dev,
891 drm_r128_depth_t *depth)
892 {
893 drm_r128_private_t *dev_priv = dev->dev_private;
894 int count, x, y;
895 u32 *buffer;
896 u8 *mask;
897 int i, buffer_size, mask_size;
898 RING_LOCALS;
899 DRM_DEBUG("\n");
900
901 count = depth->n;
902 if (count > 4096 || count <= 0)
903 return -EMSGSIZE;
904
905 if (copy_from_user(&x, depth->x, sizeof(x)))
906 return -EFAULT;
907 if (copy_from_user(&y, depth->y, sizeof(y)))
908 return -EFAULT;
909
910 buffer_size = depth->n * sizeof(u32);
911 buffer = memdup_user(depth->buffer, buffer_size);
912 if (IS_ERR(buffer))
913 return PTR_ERR(buffer);
914
915 mask_size = depth->n;
916 if (depth->mask) {
917 mask = memdup_user(depth->mask, mask_size);
918 if (IS_ERR(mask)) {
919 kfree(buffer);
920 return PTR_ERR(mask);
921 }
922
923 for (i = 0; i < count; i++, x++) {
924 if (mask[i]) {
925 BEGIN_RING(6);
926
927 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
928 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
929 R128_GMC_BRUSH_SOLID_COLOR |
930 (dev_priv->depth_fmt << 8) |
931 R128_GMC_SRC_DATATYPE_COLOR |
932 R128_ROP3_P |
933 R128_GMC_CLR_CMP_CNTL_DIS |
934 R128_GMC_WR_MSK_DIS);
935
936 OUT_RING(dev_priv->depth_pitch_offset_c);
937 OUT_RING(buffer[i]);
938
939 OUT_RING((x << 16) | y);
940 OUT_RING((1 << 16) | 1);
941
942 ADVANCE_RING();
943 }
944 }
945
946 kfree(mask);
947 } else {
948 for (i = 0; i < count; i++, x++) {
949 BEGIN_RING(6);
950
951 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
952 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
953 R128_GMC_BRUSH_SOLID_COLOR |
954 (dev_priv->depth_fmt << 8) |
955 R128_GMC_SRC_DATATYPE_COLOR |
956 R128_ROP3_P |
957 R128_GMC_CLR_CMP_CNTL_DIS |
958 R128_GMC_WR_MSK_DIS);
959
960 OUT_RING(dev_priv->depth_pitch_offset_c);
961 OUT_RING(buffer[i]);
962
963 OUT_RING((x << 16) | y);
964 OUT_RING((1 << 16) | 1);
965
966 ADVANCE_RING();
967 }
968 }
969
970 kfree(buffer);
971
972 return 0;
973 }
974
r128_cce_dispatch_write_pixels(struct drm_device * dev,drm_r128_depth_t * depth)975 static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
976 drm_r128_depth_t *depth)
977 {
978 drm_r128_private_t *dev_priv = dev->dev_private;
979 int count, *x, *y;
980 u32 *buffer;
981 u8 *mask;
982 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
983 RING_LOCALS;
984 DRM_DEBUG("\n");
985
986 count = depth->n;
987 if (count > 4096 || count <= 0)
988 return -EMSGSIZE;
989
990 xbuf_size = count * sizeof(*x);
991 ybuf_size = count * sizeof(*y);
992 x = memdup_user(depth->x, xbuf_size);
993 if (IS_ERR(x))
994 return PTR_ERR(x);
995 y = memdup_user(depth->y, ybuf_size);
996 if (IS_ERR(y)) {
997 kfree(x);
998 return PTR_ERR(y);
999 }
1000 buffer_size = depth->n * sizeof(u32);
1001 buffer = memdup_user(depth->buffer, buffer_size);
1002 if (IS_ERR(buffer)) {
1003 kfree(x);
1004 kfree(y);
1005 return PTR_ERR(buffer);
1006 }
1007
1008 if (depth->mask) {
1009 mask_size = depth->n;
1010 mask = memdup_user(depth->mask, mask_size);
1011 if (IS_ERR(mask)) {
1012 kfree(x);
1013 kfree(y);
1014 kfree(buffer);
1015 return PTR_ERR(mask);
1016 }
1017
1018 for (i = 0; i < count; i++) {
1019 if (mask[i]) {
1020 BEGIN_RING(6);
1021
1022 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1023 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1024 R128_GMC_BRUSH_SOLID_COLOR |
1025 (dev_priv->depth_fmt << 8) |
1026 R128_GMC_SRC_DATATYPE_COLOR |
1027 R128_ROP3_P |
1028 R128_GMC_CLR_CMP_CNTL_DIS |
1029 R128_GMC_WR_MSK_DIS);
1030
1031 OUT_RING(dev_priv->depth_pitch_offset_c);
1032 OUT_RING(buffer[i]);
1033
1034 OUT_RING((x[i] << 16) | y[i]);
1035 OUT_RING((1 << 16) | 1);
1036
1037 ADVANCE_RING();
1038 }
1039 }
1040
1041 kfree(mask);
1042 } else {
1043 for (i = 0; i < count; i++) {
1044 BEGIN_RING(6);
1045
1046 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1047 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1048 R128_GMC_BRUSH_SOLID_COLOR |
1049 (dev_priv->depth_fmt << 8) |
1050 R128_GMC_SRC_DATATYPE_COLOR |
1051 R128_ROP3_P |
1052 R128_GMC_CLR_CMP_CNTL_DIS |
1053 R128_GMC_WR_MSK_DIS);
1054
1055 OUT_RING(dev_priv->depth_pitch_offset_c);
1056 OUT_RING(buffer[i]);
1057
1058 OUT_RING((x[i] << 16) | y[i]);
1059 OUT_RING((1 << 16) | 1);
1060
1061 ADVANCE_RING();
1062 }
1063 }
1064
1065 kfree(x);
1066 kfree(y);
1067 kfree(buffer);
1068
1069 return 0;
1070 }
1071
r128_cce_dispatch_read_span(struct drm_device * dev,drm_r128_depth_t * depth)1072 static int r128_cce_dispatch_read_span(struct drm_device *dev,
1073 drm_r128_depth_t *depth)
1074 {
1075 drm_r128_private_t *dev_priv = dev->dev_private;
1076 int count, x, y;
1077 RING_LOCALS;
1078 DRM_DEBUG("\n");
1079
1080 count = depth->n;
1081 if (count > 4096 || count <= 0)
1082 return -EMSGSIZE;
1083
1084 if (copy_from_user(&x, depth->x, sizeof(x)))
1085 return -EFAULT;
1086 if (copy_from_user(&y, depth->y, sizeof(y)))
1087 return -EFAULT;
1088
1089 BEGIN_RING(7);
1090
1091 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1092 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1093 R128_GMC_DST_PITCH_OFFSET_CNTL |
1094 R128_GMC_BRUSH_NONE |
1095 (dev_priv->depth_fmt << 8) |
1096 R128_GMC_SRC_DATATYPE_COLOR |
1097 R128_ROP3_S |
1098 R128_DP_SRC_SOURCE_MEMORY |
1099 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1100
1101 OUT_RING(dev_priv->depth_pitch_offset_c);
1102 OUT_RING(dev_priv->span_pitch_offset_c);
1103
1104 OUT_RING((x << 16) | y);
1105 OUT_RING((0 << 16) | 0);
1106 OUT_RING((count << 16) | 1);
1107
1108 ADVANCE_RING();
1109
1110 return 0;
1111 }
1112
r128_cce_dispatch_read_pixels(struct drm_device * dev,drm_r128_depth_t * depth)1113 static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
1114 drm_r128_depth_t *depth)
1115 {
1116 drm_r128_private_t *dev_priv = dev->dev_private;
1117 int count, *x, *y;
1118 int i, xbuf_size, ybuf_size;
1119 RING_LOCALS;
1120 DRM_DEBUG("\n");
1121
1122 count = depth->n;
1123 if (count > 4096 || count <= 0)
1124 return -EMSGSIZE;
1125
1126 if (count > dev_priv->depth_pitch)
1127 count = dev_priv->depth_pitch;
1128
1129 xbuf_size = count * sizeof(*x);
1130 ybuf_size = count * sizeof(*y);
1131 x = kmalloc(xbuf_size, GFP_KERNEL);
1132 if (x == NULL)
1133 return -ENOMEM;
1134 y = kmalloc(ybuf_size, GFP_KERNEL);
1135 if (y == NULL) {
1136 kfree(x);
1137 return -ENOMEM;
1138 }
1139 if (copy_from_user(x, depth->x, xbuf_size)) {
1140 kfree(x);
1141 kfree(y);
1142 return -EFAULT;
1143 }
1144 if (copy_from_user(y, depth->y, ybuf_size)) {
1145 kfree(x);
1146 kfree(y);
1147 return -EFAULT;
1148 }
1149
1150 for (i = 0; i < count; i++) {
1151 BEGIN_RING(7);
1152
1153 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1154 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1155 R128_GMC_DST_PITCH_OFFSET_CNTL |
1156 R128_GMC_BRUSH_NONE |
1157 (dev_priv->depth_fmt << 8) |
1158 R128_GMC_SRC_DATATYPE_COLOR |
1159 R128_ROP3_S |
1160 R128_DP_SRC_SOURCE_MEMORY |
1161 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1162
1163 OUT_RING(dev_priv->depth_pitch_offset_c);
1164 OUT_RING(dev_priv->span_pitch_offset_c);
1165
1166 OUT_RING((x[i] << 16) | y[i]);
1167 OUT_RING((i << 16) | 0);
1168 OUT_RING((1 << 16) | 1);
1169
1170 ADVANCE_RING();
1171 }
1172
1173 kfree(x);
1174 kfree(y);
1175
1176 return 0;
1177 }
1178
1179 /* ================================================================
1180 * Polygon stipple
1181 */
1182
r128_cce_dispatch_stipple(struct drm_device * dev,u32 * stipple)1183 static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
1184 {
1185 drm_r128_private_t *dev_priv = dev->dev_private;
1186 int i;
1187 RING_LOCALS;
1188 DRM_DEBUG("\n");
1189
1190 BEGIN_RING(33);
1191
1192 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1193 for (i = 0; i < 32; i++)
1194 OUT_RING(stipple[i]);
1195
1196 ADVANCE_RING();
1197 }
1198
1199 /* ================================================================
1200 * IOCTL functions
1201 */
1202
r128_cce_clear(struct drm_device * dev,void * data,struct drm_file * file_priv)1203 static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1204 {
1205 drm_r128_private_t *dev_priv = dev->dev_private;
1206 drm_r128_sarea_t *sarea_priv;
1207 drm_r128_clear_t *clear = data;
1208 DRM_DEBUG("\n");
1209
1210 LOCK_TEST_WITH_RETURN(dev, file_priv);
1211
1212 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1213
1214 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1215
1216 sarea_priv = dev_priv->sarea_priv;
1217
1218 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1219 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1220
1221 r128_cce_dispatch_clear(dev, clear);
1222 COMMIT_RING();
1223
1224 /* Make sure we restore the 3D state next time.
1225 */
1226 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1227
1228 return 0;
1229 }
1230
r128_do_init_pageflip(struct drm_device * dev)1231 static int r128_do_init_pageflip(struct drm_device *dev)
1232 {
1233 drm_r128_private_t *dev_priv = dev->dev_private;
1234 DRM_DEBUG("\n");
1235
1236 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1237 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1238
1239 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1240 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1241 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1242
1243 dev_priv->page_flipping = 1;
1244 dev_priv->current_page = 0;
1245 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1246
1247 return 0;
1248 }
1249
r128_do_cleanup_pageflip(struct drm_device * dev)1250 static int r128_do_cleanup_pageflip(struct drm_device *dev)
1251 {
1252 drm_r128_private_t *dev_priv = dev->dev_private;
1253 DRM_DEBUG("\n");
1254
1255 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1256 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1257
1258 if (dev_priv->current_page != 0) {
1259 r128_cce_dispatch_flip(dev);
1260 COMMIT_RING();
1261 }
1262
1263 dev_priv->page_flipping = 0;
1264 return 0;
1265 }
1266
1267 /* Swapping and flipping are different operations, need different ioctls.
1268 * They can & should be intermixed to support multiple 3d windows.
1269 */
1270
r128_cce_flip(struct drm_device * dev,void * data,struct drm_file * file_priv)1271 static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1272 {
1273 drm_r128_private_t *dev_priv = dev->dev_private;
1274 DRM_DEBUG("\n");
1275
1276 LOCK_TEST_WITH_RETURN(dev, file_priv);
1277
1278 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1279
1280 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1281
1282 if (!dev_priv->page_flipping)
1283 r128_do_init_pageflip(dev);
1284
1285 r128_cce_dispatch_flip(dev);
1286
1287 COMMIT_RING();
1288 return 0;
1289 }
1290
r128_cce_swap(struct drm_device * dev,void * data,struct drm_file * file_priv)1291 static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1292 {
1293 drm_r128_private_t *dev_priv = dev->dev_private;
1294 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1295 DRM_DEBUG("\n");
1296
1297 LOCK_TEST_WITH_RETURN(dev, file_priv);
1298
1299 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1300
1301 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1302
1303 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1304 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1305
1306 r128_cce_dispatch_swap(dev);
1307 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1308 R128_UPLOAD_MASKS);
1309
1310 COMMIT_RING();
1311 return 0;
1312 }
1313
r128_cce_vertex(struct drm_device * dev,void * data,struct drm_file * file_priv)1314 static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1315 {
1316 drm_r128_private_t *dev_priv = dev->dev_private;
1317 struct drm_device_dma *dma = dev->dma;
1318 struct drm_buf *buf;
1319 drm_r128_buf_priv_t *buf_priv;
1320 drm_r128_vertex_t *vertex = data;
1321
1322 LOCK_TEST_WITH_RETURN(dev, file_priv);
1323
1324 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1325
1326 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1327 task_pid_nr(current), vertex->idx, vertex->count, vertex->discard);
1328
1329 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1330 DRM_ERROR("buffer index %d (of %d max)\n",
1331 vertex->idx, dma->buf_count - 1);
1332 return -EINVAL;
1333 }
1334 if (vertex->prim < 0 ||
1335 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1336 DRM_ERROR("buffer prim %d\n", vertex->prim);
1337 return -EINVAL;
1338 }
1339
1340 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1341 VB_AGE_TEST_WITH_RETURN(dev_priv);
1342
1343 buf = dma->buflist[vertex->idx];
1344 buf_priv = buf->dev_private;
1345
1346 if (buf->file_priv != file_priv) {
1347 DRM_ERROR("process %d using buffer owned by %p\n",
1348 task_pid_nr(current), buf->file_priv);
1349 return -EINVAL;
1350 }
1351 if (buf->pending) {
1352 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1353 return -EINVAL;
1354 }
1355
1356 buf->used = vertex->count;
1357 buf_priv->prim = vertex->prim;
1358 buf_priv->discard = vertex->discard;
1359
1360 r128_cce_dispatch_vertex(dev, buf);
1361
1362 COMMIT_RING();
1363 return 0;
1364 }
1365
r128_cce_indices(struct drm_device * dev,void * data,struct drm_file * file_priv)1366 static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1367 {
1368 drm_r128_private_t *dev_priv = dev->dev_private;
1369 struct drm_device_dma *dma = dev->dma;
1370 struct drm_buf *buf;
1371 drm_r128_buf_priv_t *buf_priv;
1372 drm_r128_indices_t *elts = data;
1373 int count;
1374
1375 LOCK_TEST_WITH_RETURN(dev, file_priv);
1376
1377 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1378
1379 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", task_pid_nr(current),
1380 elts->idx, elts->start, elts->end, elts->discard);
1381
1382 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1383 DRM_ERROR("buffer index %d (of %d max)\n",
1384 elts->idx, dma->buf_count - 1);
1385 return -EINVAL;
1386 }
1387 if (elts->prim < 0 ||
1388 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1389 DRM_ERROR("buffer prim %d\n", elts->prim);
1390 return -EINVAL;
1391 }
1392
1393 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1394 VB_AGE_TEST_WITH_RETURN(dev_priv);
1395
1396 buf = dma->buflist[elts->idx];
1397 buf_priv = buf->dev_private;
1398
1399 if (buf->file_priv != file_priv) {
1400 DRM_ERROR("process %d using buffer owned by %p\n",
1401 task_pid_nr(current), buf->file_priv);
1402 return -EINVAL;
1403 }
1404 if (buf->pending) {
1405 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1406 return -EINVAL;
1407 }
1408
1409 count = (elts->end - elts->start) / sizeof(u16);
1410 elts->start -= R128_INDEX_PRIM_OFFSET;
1411
1412 if (elts->start & 0x7) {
1413 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1414 return -EINVAL;
1415 }
1416 if (elts->start < buf->used) {
1417 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1418 return -EINVAL;
1419 }
1420
1421 buf->used = elts->end;
1422 buf_priv->prim = elts->prim;
1423 buf_priv->discard = elts->discard;
1424
1425 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1426
1427 COMMIT_RING();
1428 return 0;
1429 }
1430
r128_cce_blit(struct drm_device * dev,void * data,struct drm_file * file_priv)1431 static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1432 {
1433 struct drm_device_dma *dma = dev->dma;
1434 drm_r128_private_t *dev_priv = dev->dev_private;
1435 drm_r128_blit_t *blit = data;
1436 int ret;
1437
1438 LOCK_TEST_WITH_RETURN(dev, file_priv);
1439
1440 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1441
1442 DRM_DEBUG("pid=%d index=%d\n", task_pid_nr(current), blit->idx);
1443
1444 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1445 DRM_ERROR("buffer index %d (of %d max)\n",
1446 blit->idx, dma->buf_count - 1);
1447 return -EINVAL;
1448 }
1449
1450 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1451 VB_AGE_TEST_WITH_RETURN(dev_priv);
1452
1453 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1454
1455 COMMIT_RING();
1456 return ret;
1457 }
1458
r128_cce_depth(struct drm_device * dev,void * data,struct drm_file * file_priv)1459 int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1460 {
1461 drm_r128_private_t *dev_priv = dev->dev_private;
1462 drm_r128_depth_t *depth = data;
1463 int ret;
1464
1465 LOCK_TEST_WITH_RETURN(dev, file_priv);
1466
1467 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1468
1469 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1470
1471 ret = -EINVAL;
1472 switch (depth->func) {
1473 case R128_WRITE_SPAN:
1474 ret = r128_cce_dispatch_write_span(dev, depth);
1475 break;
1476 case R128_WRITE_PIXELS:
1477 ret = r128_cce_dispatch_write_pixels(dev, depth);
1478 break;
1479 case R128_READ_SPAN:
1480 ret = r128_cce_dispatch_read_span(dev, depth);
1481 break;
1482 case R128_READ_PIXELS:
1483 ret = r128_cce_dispatch_read_pixels(dev, depth);
1484 break;
1485 }
1486
1487 COMMIT_RING();
1488 return ret;
1489 }
1490
r128_cce_stipple(struct drm_device * dev,void * data,struct drm_file * file_priv)1491 int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1492 {
1493 drm_r128_private_t *dev_priv = dev->dev_private;
1494 drm_r128_stipple_t *stipple = data;
1495 u32 mask[32];
1496
1497 LOCK_TEST_WITH_RETURN(dev, file_priv);
1498
1499 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1500
1501 if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
1502 return -EFAULT;
1503
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1505
1506 r128_cce_dispatch_stipple(dev, mask);
1507
1508 COMMIT_RING();
1509 return 0;
1510 }
1511
r128_cce_indirect(struct drm_device * dev,void * data,struct drm_file * file_priv)1512 static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1513 {
1514 drm_r128_private_t *dev_priv = dev->dev_private;
1515 struct drm_device_dma *dma = dev->dma;
1516 struct drm_buf *buf;
1517 drm_r128_buf_priv_t *buf_priv;
1518 drm_r128_indirect_t *indirect = data;
1519 #if 0
1520 RING_LOCALS;
1521 #endif
1522
1523 LOCK_TEST_WITH_RETURN(dev, file_priv);
1524
1525 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1526
1527 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
1528 indirect->idx, indirect->start, indirect->end,
1529 indirect->discard);
1530
1531 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1532 DRM_ERROR("buffer index %d (of %d max)\n",
1533 indirect->idx, dma->buf_count - 1);
1534 return -EINVAL;
1535 }
1536
1537 buf = dma->buflist[indirect->idx];
1538 buf_priv = buf->dev_private;
1539
1540 if (buf->file_priv != file_priv) {
1541 DRM_ERROR("process %d using buffer owned by %p\n",
1542 task_pid_nr(current), buf->file_priv);
1543 return -EINVAL;
1544 }
1545 if (buf->pending) {
1546 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1547 return -EINVAL;
1548 }
1549
1550 if (indirect->start < buf->used) {
1551 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1552 indirect->start, buf->used);
1553 return -EINVAL;
1554 }
1555
1556 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1557 VB_AGE_TEST_WITH_RETURN(dev_priv);
1558
1559 buf->used = indirect->end;
1560 buf_priv->discard = indirect->discard;
1561
1562 #if 0
1563 /* Wait for the 3D stream to idle before the indirect buffer
1564 * containing 2D acceleration commands is processed.
1565 */
1566 BEGIN_RING(2);
1567 RADEON_WAIT_UNTIL_3D_IDLE();
1568 ADVANCE_RING();
1569 #endif
1570
1571 /* Dispatch the indirect buffer full of commands from the
1572 * X server. This is insecure and is thus only available to
1573 * privileged clients.
1574 */
1575 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1576
1577 COMMIT_RING();
1578 return 0;
1579 }
1580
r128_getparam(struct drm_device * dev,void * data,struct drm_file * file_priv)1581 int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1582 {
1583 drm_r128_private_t *dev_priv = dev->dev_private;
1584 drm_r128_getparam_t *param = data;
1585 int value;
1586
1587 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1588
1589 DRM_DEBUG("pid=%d\n", task_pid_nr(current));
1590
1591 switch (param->param) {
1592 case R128_PARAM_IRQ_NR:
1593 value = dev->pdev->irq;
1594 break;
1595 default:
1596 return -EINVAL;
1597 }
1598
1599 if (copy_to_user(param->value, &value, sizeof(int))) {
1600 DRM_ERROR("copy_to_user\n");
1601 return -EFAULT;
1602 }
1603
1604 return 0;
1605 }
1606
r128_driver_preclose(struct drm_device * dev,struct drm_file * file_priv)1607 void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1608 {
1609 if (dev->dev_private) {
1610 drm_r128_private_t *dev_priv = dev->dev_private;
1611 if (dev_priv->page_flipping)
1612 r128_do_cleanup_pageflip(dev);
1613 }
1614 }
r128_driver_lastclose(struct drm_device * dev)1615 void r128_driver_lastclose(struct drm_device *dev)
1616 {
1617 r128_do_cleanup_cce(dev);
1618 }
1619
1620 const struct drm_ioctl_desc r128_ioctls[] = {
1621 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1622 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1623 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1624 DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1625 DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1626 DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1627 DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1628 DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1629 DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1630 DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1631 DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1632 DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1633 DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1634 DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1635 DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1636 DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1637 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1638 };
1639
1640 int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
1641