1 /**************************************************************************
2 *
3 * Copyright 2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "os/os_thread.h"
30 #include "util/format/u_format.h"
31 #include "util/u_string.h"
32 #include "util/u_inlines.h"
33 #include "util/u_memory.h"
34 #include "util/u_network.h"
35 #include "util/os_time.h"
36
37 #include "tgsi/tgsi_parse.h"
38
39 #include "rbug_context.h"
40 #include "rbug_objects.h"
41
42 #include "rbug/rbug.h"
43
44 #include <errno.h>
45
46 #define U642VOID(x) ((void *)(uintptr_t)(x))
47 #define VOID2U64(x) ((uint64_t)(uintptr_t)(x))
48
49 struct rbug_rbug
50 {
51 struct rbug_screen *rb_screen;
52 struct rbug_connection *con;
53 thrd_t thread;
54 bool running;
55 };
56
57 int
58 rbug_thread(void *void_rbug);
59
60
61 /**********************************************************
62 * Helper functions
63 */
64
65
66 static struct rbug_context *
rbug_get_context_locked(struct rbug_screen * rb_screen,rbug_context_t ctx)67 rbug_get_context_locked(struct rbug_screen *rb_screen, rbug_context_t ctx)
68 {
69 struct rbug_context *rb_context;
70
71 LIST_FOR_EACH_ENTRY(rb_context, &rb_screen->contexts, list) {
72 if (ctx == VOID2U64(rb_context))
73 break;
74 rb_context = NULL;
75 }
76
77 return rb_context;
78 }
79
80 static struct rbug_shader *
rbug_get_shader_locked(struct rbug_context * rb_context,rbug_shader_t shdr)81 rbug_get_shader_locked(struct rbug_context *rb_context, rbug_shader_t shdr)
82 {
83 struct rbug_shader *tr_shdr;
84
85 LIST_FOR_EACH_ENTRY(tr_shdr, &rb_context->shaders, list) {
86 if (shdr == VOID2U64(tr_shdr))
87 break;
88 tr_shdr = NULL;
89 }
90
91 return tr_shdr;
92 }
93
94 static void *
rbug_shader_create_locked(struct pipe_context * pipe,struct rbug_shader * rb_shader,struct tgsi_token * tokens)95 rbug_shader_create_locked(struct pipe_context *pipe,
96 struct rbug_shader *rb_shader,
97 struct tgsi_token *tokens)
98 {
99 void *state = NULL;
100 struct pipe_shader_state pss;
101 memset(&pss, 0, sizeof(pss));
102 pss.tokens = tokens;
103
104 switch(rb_shader->type) {
105 case RBUG_SHADER_FRAGMENT:
106 state = pipe->create_fs_state(pipe, &pss);
107 break;
108 case RBUG_SHADER_VERTEX:
109 state = pipe->create_vs_state(pipe, &pss);
110 break;
111 case RBUG_SHADER_GEOM:
112 state = pipe->create_gs_state(pipe, &pss);
113 break;
114 default:
115 assert(0);
116 break;
117 }
118
119 return state;
120 }
121
122 static void
rbug_shader_bind_locked(struct pipe_context * pipe,struct rbug_shader * rb_shader,void * state)123 rbug_shader_bind_locked(struct pipe_context *pipe,
124 struct rbug_shader *rb_shader,
125 void *state)
126 {
127 switch(rb_shader->type) {
128 case RBUG_SHADER_FRAGMENT:
129 pipe->bind_fs_state(pipe, state);
130 break;
131 case RBUG_SHADER_VERTEX:
132 pipe->bind_vs_state(pipe, state);
133 break;
134 case RBUG_SHADER_GEOM:
135 pipe->bind_gs_state(pipe, state);
136 break;
137 default:
138 assert(0);
139 break;
140 }
141 }
142
143 static void
rbug_shader_delete_locked(struct pipe_context * pipe,struct rbug_shader * rb_shader,void * state)144 rbug_shader_delete_locked(struct pipe_context *pipe,
145 struct rbug_shader *rb_shader,
146 void *state)
147 {
148 switch(rb_shader->type) {
149 case RBUG_SHADER_FRAGMENT:
150 pipe->delete_fs_state(pipe, state);
151 break;
152 case RBUG_SHADER_VERTEX:
153 pipe->delete_vs_state(pipe, state);
154 break;
155 case RBUG_SHADER_GEOM:
156 pipe->delete_gs_state(pipe, state);
157 break;
158 default:
159 assert(0);
160 break;
161 }
162 }
163
164 /************************************************
165 * Request handler functions
166 */
167
168
169 static int
rbug_texture_list(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)170 rbug_texture_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
171 {
172 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
173 struct rbug_resource *tr_tex;
174 rbug_texture_t *texs;
175 int i = 0;
176
177 mtx_lock(&rb_screen->list_mutex);
178 texs = MALLOC(rb_screen->num_resources * sizeof(rbug_texture_t));
179 LIST_FOR_EACH_ENTRY(tr_tex, &rb_screen->resources, list) {
180 texs[i++] = VOID2U64(tr_tex);
181 }
182 mtx_unlock(&rb_screen->list_mutex);
183
184 rbug_send_texture_list_reply(tr_rbug->con, serial, texs, i, NULL);
185 FREE(texs);
186
187 return 0;
188 }
189
190 static int
rbug_texture_info(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)191 rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
192 {
193 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
194 struct rbug_resource *tr_tex;
195 struct rbug_proto_texture_info *gpti = (struct rbug_proto_texture_info *)header;
196 struct pipe_resource *t;
197 uint16_t num_layers;
198
199 mtx_lock(&rb_screen->list_mutex);
200 LIST_FOR_EACH_ENTRY(tr_tex, &rb_screen->resources, list) {
201 if (gpti->texture == VOID2U64(tr_tex))
202 break;
203 tr_tex = NULL;
204 }
205
206 if (!tr_tex) {
207 mtx_unlock(&rb_screen->list_mutex);
208 return -ESRCH;
209 }
210
211 t = tr_tex->resource;
212 num_layers = util_num_layers(t, 0);
213
214 rbug_send_texture_info_reply(tr_rbug->con, serial,
215 t->target, t->format,
216 &t->width0, 1,
217 &t->height0, 1,
218 &num_layers, 1,
219 util_format_get_blockwidth(t->format),
220 util_format_get_blockheight(t->format),
221 util_format_get_blocksize(t->format),
222 t->last_level,
223 t->nr_samples,
224 t->bind,
225 NULL);
226
227 mtx_unlock(&rb_screen->list_mutex);
228
229 return 0;
230 }
231
232 static int
rbug_texture_read(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)233 rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
234 {
235 struct rbug_proto_texture_read *gptr = (struct rbug_proto_texture_read *)header;
236
237 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
238 struct rbug_resource *tr_tex;
239
240 struct pipe_context *context = rb_screen->private_context;
241 struct pipe_resource *tex;
242 struct pipe_transfer *t;
243
244 void *map;
245
246 mtx_lock(&rb_screen->list_mutex);
247 LIST_FOR_EACH_ENTRY(tr_tex, &rb_screen->resources, list) {
248 if (gptr->texture == VOID2U64(tr_tex))
249 break;
250 tr_tex = NULL;
251 }
252
253 if (!tr_tex) {
254 mtx_unlock(&rb_screen->list_mutex);
255 return -ESRCH;
256 }
257
258 tex = tr_tex->resource;
259 map = pipe_texture_map(context, tex,
260 gptr->level, gptr->face + gptr->zslice,
261 PIPE_MAP_READ,
262 gptr->x, gptr->y, gptr->w, gptr->h, &t);
263
264 rbug_send_texture_read_reply(tr_rbug->con, serial,
265 t->resource->format,
266 util_format_get_blockwidth(t->resource->format),
267 util_format_get_blockheight(t->resource->format),
268 util_format_get_blocksize(t->resource->format),
269 (uint8_t*)map,
270 t->stride * util_format_get_nblocksy(t->resource->format,
271 t->box.height),
272 t->stride,
273 NULL);
274
275 context->texture_unmap(context, t);
276
277 mtx_unlock(&rb_screen->list_mutex);
278
279 return 0;
280 }
281
282 static int
rbug_context_list(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)283 rbug_context_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
284 {
285 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
286 struct rbug_context *rb_context, *next;
287 rbug_context_t *ctxs;
288 int i = 0;
289
290 mtx_lock(&rb_screen->list_mutex);
291 ctxs = MALLOC(rb_screen->num_contexts * sizeof(rbug_context_t));
292 LIST_FOR_EACH_ENTRY_SAFE(rb_context, next, &rb_screen->contexts, list) {
293 ctxs[i++] = VOID2U64(rb_context);
294 }
295 mtx_unlock(&rb_screen->list_mutex);
296
297 rbug_send_context_list_reply(tr_rbug->con, serial, ctxs, i, NULL);
298 FREE(ctxs);
299
300 return 0;
301 }
302
303 static int
rbug_context_info(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)304 rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
305 {
306 struct rbug_proto_context_info *info = (struct rbug_proto_context_info *)header;
307
308 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
309 struct rbug_context *rb_context = NULL;
310 rbug_texture_t cbufs[PIPE_MAX_COLOR_BUFS];
311 rbug_texture_t texs[PIPE_MAX_SHADER_SAMPLER_VIEWS];
312 unsigned i;
313
314 mtx_lock(&rb_screen->list_mutex);
315 rb_context = rbug_get_context_locked(rb_screen, info->context);
316
317 if (!rb_context) {
318 mtx_unlock(&rb_screen->list_mutex);
319 return -ESRCH;
320 }
321
322 /* protect the pipe context */
323 mtx_lock(&rb_context->draw_mutex);
324 mtx_lock(&rb_context->call_mutex);
325
326 for (i = 0; i < rb_context->curr.nr_cbufs; i++)
327 cbufs[i] = VOID2U64(rb_context->curr.cbufs[i]);
328
329 /* XXX what about vertex/geometry shader texture views? */
330 for (i = 0; i < rb_context->curr.num_views[PIPE_SHADER_FRAGMENT]; i++)
331 texs[i] = VOID2U64(rb_context->curr.texs[PIPE_SHADER_FRAGMENT][i]);
332
333 rbug_send_context_info_reply(tr_rbug->con, serial,
334 VOID2U64(rb_context->curr.shader[PIPE_SHADER_VERTEX]), VOID2U64(rb_context->curr.shader[PIPE_SHADER_FRAGMENT]),
335 texs, rb_context->curr.num_views[PIPE_SHADER_FRAGMENT],
336 cbufs, rb_context->curr.nr_cbufs,
337 VOID2U64(rb_context->curr.zsbuf),
338 rb_context->draw_blocker, rb_context->draw_blocked, NULL);
339
340 mtx_unlock(&rb_context->call_mutex);
341 mtx_unlock(&rb_context->draw_mutex);
342 mtx_unlock(&rb_screen->list_mutex);
343
344 return 0;
345 }
346
347 static int
rbug_context_draw_block(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)348 rbug_context_draw_block(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
349 {
350 struct rbug_proto_context_draw_block *block = (struct rbug_proto_context_draw_block *)header;
351
352 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
353 struct rbug_context *rb_context = NULL;
354
355 mtx_lock(&rb_screen->list_mutex);
356 rb_context = rbug_get_context_locked(rb_screen, block->context);
357
358 if (!rb_context) {
359 mtx_unlock(&rb_screen->list_mutex);
360 return -ESRCH;
361 }
362
363 mtx_lock(&rb_context->draw_mutex);
364 rb_context->draw_blocker |= block->block;
365 mtx_unlock(&rb_context->draw_mutex);
366
367 mtx_unlock(&rb_screen->list_mutex);
368
369 return 0;
370 }
371
372 static int
rbug_context_draw_step(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)373 rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
374 {
375 struct rbug_proto_context_draw_step *step = (struct rbug_proto_context_draw_step *)header;
376
377 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
378 struct rbug_context *rb_context = NULL;
379
380 mtx_lock(&rb_screen->list_mutex);
381 rb_context = rbug_get_context_locked(rb_screen, step->context);
382
383 if (!rb_context) {
384 mtx_unlock(&rb_screen->list_mutex);
385 return -ESRCH;
386 }
387
388 mtx_lock(&rb_context->draw_mutex);
389 if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
390 if (step->step & RBUG_BLOCK_RULE)
391 rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
392 } else {
393 rb_context->draw_blocked &= ~step->step;
394 }
395 mtx_unlock(&rb_context->draw_mutex);
396
397 cnd_broadcast(&rb_context->draw_cond);
398
399 mtx_unlock(&rb_screen->list_mutex);
400
401 return 0;
402 }
403
404 static int
rbug_context_draw_unblock(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)405 rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
406 {
407 struct rbug_proto_context_draw_unblock *unblock = (struct rbug_proto_context_draw_unblock *)header;
408
409 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
410 struct rbug_context *rb_context = NULL;
411
412 mtx_lock(&rb_screen->list_mutex);
413 rb_context = rbug_get_context_locked(rb_screen, unblock->context);
414
415 if (!rb_context) {
416 mtx_unlock(&rb_screen->list_mutex);
417 return -ESRCH;
418 }
419
420 mtx_lock(&rb_context->draw_mutex);
421 if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
422 if (unblock->unblock & RBUG_BLOCK_RULE)
423 rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
424 } else {
425 rb_context->draw_blocked &= ~unblock->unblock;
426 }
427 rb_context->draw_blocker &= ~unblock->unblock;
428 mtx_unlock(&rb_context->draw_mutex);
429
430 cnd_broadcast(&rb_context->draw_cond);
431
432 mtx_unlock(&rb_screen->list_mutex);
433
434 return 0;
435 }
436
437 static int
rbug_context_draw_rule(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)438 rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
439 {
440 struct rbug_proto_context_draw_rule *rule = (struct rbug_proto_context_draw_rule *)header;
441
442 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
443 struct rbug_context *rb_context = NULL;
444
445 mtx_lock(&rb_screen->list_mutex);
446 rb_context = rbug_get_context_locked(rb_screen, rule->context);
447
448 if (!rb_context) {
449 mtx_unlock(&rb_screen->list_mutex);
450 return -ESRCH;
451 }
452
453 mtx_lock(&rb_context->draw_mutex);
454 rb_context->draw_rule.shader[PIPE_SHADER_VERTEX] = U642VOID(rule->vertex);
455 rb_context->draw_rule.shader[PIPE_SHADER_FRAGMENT] = U642VOID(rule->fragment);
456 rb_context->draw_rule.texture = U642VOID(rule->texture);
457 rb_context->draw_rule.surf = U642VOID(rule->surface);
458 rb_context->draw_rule.blocker = rule->block;
459 rb_context->draw_blocker |= RBUG_BLOCK_RULE;
460 mtx_unlock(&rb_context->draw_mutex);
461
462 cnd_broadcast(&rb_context->draw_cond);
463
464 mtx_unlock(&rb_screen->list_mutex);
465
466 return 0;
467 }
468
469 static int
rbug_context_flush(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)470 rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
471 {
472 struct rbug_proto_context_flush *flush = (struct rbug_proto_context_flush *)header;
473
474 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
475 struct rbug_context *rb_context = NULL;
476
477 mtx_lock(&rb_screen->list_mutex);
478 rb_context = rbug_get_context_locked(rb_screen, flush->context);
479
480 if (!rb_context) {
481 mtx_unlock(&rb_screen->list_mutex);
482 return -ESRCH;
483 }
484
485 /* protect the pipe context */
486 mtx_lock(&rb_context->call_mutex);
487
488 rb_context->pipe->flush(rb_context->pipe, NULL, 0);
489
490 mtx_unlock(&rb_context->call_mutex);
491 mtx_unlock(&rb_screen->list_mutex);
492
493 return 0;
494 }
495
496 static int
rbug_shader_list(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)497 rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
498 {
499 struct rbug_proto_shader_list *list = (struct rbug_proto_shader_list *)header;
500
501 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
502 struct rbug_context *rb_context = NULL;
503 struct rbug_shader *tr_shdr, *next;
504 rbug_shader_t *shdrs;
505 int i = 0;
506
507 mtx_lock(&rb_screen->list_mutex);
508 rb_context = rbug_get_context_locked(rb_screen, list->context);
509
510 if (!rb_context) {
511 mtx_unlock(&rb_screen->list_mutex);
512 return -ESRCH;
513 }
514
515 mtx_lock(&rb_context->list_mutex);
516 shdrs = MALLOC(rb_context->num_shaders * sizeof(rbug_shader_t));
517 LIST_FOR_EACH_ENTRY_SAFE(tr_shdr, next, &rb_context->shaders, list) {
518 shdrs[i++] = VOID2U64(tr_shdr);
519 }
520
521 mtx_unlock(&rb_context->list_mutex);
522 mtx_unlock(&rb_screen->list_mutex);
523
524 rbug_send_shader_list_reply(tr_rbug->con, serial, shdrs, i, NULL);
525 FREE(shdrs);
526
527 return 0;
528 }
529
530 static int
rbug_shader_info(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)531 rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
532 {
533 struct rbug_proto_shader_info *info = (struct rbug_proto_shader_info *)header;
534
535 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
536 struct rbug_context *rb_context = NULL;
537 struct rbug_shader *tr_shdr = NULL;
538 unsigned original_len;
539 unsigned replaced_len;
540
541 mtx_lock(&rb_screen->list_mutex);
542 rb_context = rbug_get_context_locked(rb_screen, info->context);
543
544 if (!rb_context) {
545 mtx_unlock(&rb_screen->list_mutex);
546 return -ESRCH;
547 }
548
549 mtx_lock(&rb_context->list_mutex);
550
551 tr_shdr = rbug_get_shader_locked(rb_context, info->shader);
552
553 if (!tr_shdr) {
554 mtx_unlock(&rb_context->list_mutex);
555 mtx_unlock(&rb_screen->list_mutex);
556 return -ESRCH;
557 }
558
559 /* just in case */
560 assert(sizeof(struct tgsi_token) == 4);
561
562 if (tr_shdr->tokens) {
563 original_len = tgsi_num_tokens(tr_shdr->tokens);
564 if (tr_shdr->replaced_tokens)
565 replaced_len = tgsi_num_tokens(tr_shdr->replaced_tokens);
566 else
567 replaced_len = 0;
568
569 rbug_send_shader_info_reply(tr_rbug->con, serial,
570 (uint32_t*)tr_shdr->tokens, original_len,
571 (uint32_t*)tr_shdr->replaced_tokens, replaced_len,
572 tr_shdr->disabled,
573 NULL);
574 }
575
576 mtx_unlock(&rb_context->list_mutex);
577 mtx_unlock(&rb_screen->list_mutex);
578
579 return 0;
580 }
581
582 static int
rbug_shader_disable(struct rbug_rbug * tr_rbug,struct rbug_header * header)583 rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
584 {
585 struct rbug_proto_shader_disable *dis = (struct rbug_proto_shader_disable *)header;
586
587 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
588 struct rbug_context *rb_context = NULL;
589 struct rbug_shader *tr_shdr = NULL;
590
591 mtx_lock(&rb_screen->list_mutex);
592 rb_context = rbug_get_context_locked(rb_screen, dis->context);
593
594 if (!rb_context) {
595 mtx_unlock(&rb_screen->list_mutex);
596 return -ESRCH;
597 }
598
599 mtx_lock(&rb_context->list_mutex);
600
601 tr_shdr = rbug_get_shader_locked(rb_context, dis->shader);
602
603 if (!tr_shdr) {
604 mtx_unlock(&rb_context->list_mutex);
605 mtx_unlock(&rb_screen->list_mutex);
606 return -ESRCH;
607 }
608
609 tr_shdr->disabled = dis->disable;
610
611 mtx_unlock(&rb_context->list_mutex);
612 mtx_unlock(&rb_screen->list_mutex);
613
614 return 0;
615 }
616
617 static int
rbug_shader_replace(struct rbug_rbug * tr_rbug,struct rbug_header * header)618 rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
619 {
620 struct rbug_proto_shader_replace *rep = (struct rbug_proto_shader_replace *)header;
621
622 struct rbug_screen *rb_screen = tr_rbug->rb_screen;
623 struct rbug_context *rb_context = NULL;
624 struct rbug_shader *tr_shdr = NULL;
625 struct pipe_context *pipe = NULL;
626 void *state;
627
628 mtx_lock(&rb_screen->list_mutex);
629 rb_context = rbug_get_context_locked(rb_screen, rep->context);
630
631 if (!rb_context) {
632 mtx_unlock(&rb_screen->list_mutex);
633 return -ESRCH;
634 }
635
636 mtx_lock(&rb_context->list_mutex);
637
638 tr_shdr = rbug_get_shader_locked(rb_context, rep->shader);
639
640 if (!tr_shdr) {
641 mtx_unlock(&rb_context->list_mutex);
642 mtx_unlock(&rb_screen->list_mutex);
643 return -ESRCH;
644 }
645
646 /* protect the pipe context */
647 mtx_lock(&rb_context->call_mutex);
648
649 pipe = rb_context->pipe;
650
651 /* remove old replaced shader */
652 if (tr_shdr->replaced_shader) {
653 /* if this shader is bound rebind the original shader */
654 if (rb_context->curr.shader[PIPE_SHADER_FRAGMENT] == tr_shdr || rb_context->curr.shader[PIPE_SHADER_VERTEX] == tr_shdr)
655 rbug_shader_bind_locked(pipe, tr_shdr, tr_shdr->shader);
656
657 FREE(tr_shdr->replaced_tokens);
658 rbug_shader_delete_locked(pipe, tr_shdr, tr_shdr->replaced_shader);
659 tr_shdr->replaced_shader = NULL;
660 tr_shdr->replaced_tokens = NULL;
661 }
662
663 /* empty inputs means restore old which we did above */
664 if (rep->tokens_len == 0)
665 goto out;
666
667 tr_shdr->replaced_tokens = tgsi_dup_tokens((struct tgsi_token *)rep->tokens);
668 if (!tr_shdr->replaced_tokens)
669 goto err;
670
671 state = rbug_shader_create_locked(pipe, tr_shdr, tr_shdr->replaced_tokens);
672 if (!state)
673 goto err;
674
675 /* bind new shader if the shader is currently a bound */
676 if (rb_context->curr.shader[PIPE_SHADER_FRAGMENT] == tr_shdr || rb_context->curr.shader[PIPE_SHADER_VERTEX] == tr_shdr)
677 rbug_shader_bind_locked(pipe, tr_shdr, state);
678
679 /* save state */
680 tr_shdr->replaced_shader = state;
681
682 out:
683 mtx_unlock(&rb_context->call_mutex);
684 mtx_unlock(&rb_context->list_mutex);
685 mtx_unlock(&rb_screen->list_mutex);
686
687 return 0;
688
689 err:
690 FREE(tr_shdr->replaced_tokens);
691 tr_shdr->replaced_shader = NULL;
692 tr_shdr->replaced_tokens = NULL;
693
694 mtx_unlock(&rb_context->call_mutex);
695 mtx_unlock(&rb_context->list_mutex);
696 mtx_unlock(&rb_screen->list_mutex);
697 return -EINVAL;
698 }
699
700 static bool
rbug_header(struct rbug_rbug * tr_rbug,struct rbug_header * header,uint32_t serial)701 rbug_header(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
702 {
703 int ret = 0;
704
705 switch(header->opcode) {
706 case RBUG_OP_PING:
707 rbug_send_ping_reply(tr_rbug->con, serial, NULL);
708 break;
709 case RBUG_OP_TEXTURE_LIST:
710 ret = rbug_texture_list(tr_rbug, header, serial);
711 break;
712 case RBUG_OP_TEXTURE_INFO:
713 ret = rbug_texture_info(tr_rbug, header, serial);
714 break;
715 case RBUG_OP_TEXTURE_READ:
716 ret = rbug_texture_read(tr_rbug, header, serial);
717 break;
718 case RBUG_OP_CONTEXT_LIST:
719 ret = rbug_context_list(tr_rbug, header, serial);
720 break;
721 case RBUG_OP_CONTEXT_INFO:
722 ret = rbug_context_info(tr_rbug, header, serial);
723 break;
724 case RBUG_OP_CONTEXT_DRAW_BLOCK:
725 ret = rbug_context_draw_block(tr_rbug, header, serial);
726 break;
727 case RBUG_OP_CONTEXT_DRAW_STEP:
728 ret = rbug_context_draw_step(tr_rbug, header, serial);
729 break;
730 case RBUG_OP_CONTEXT_DRAW_UNBLOCK:
731 ret = rbug_context_draw_unblock(tr_rbug, header, serial);
732 break;
733 case RBUG_OP_CONTEXT_DRAW_RULE:
734 ret = rbug_context_draw_rule(tr_rbug, header, serial);
735 break;
736 case RBUG_OP_CONTEXT_FLUSH:
737 ret = rbug_context_flush(tr_rbug, header, serial);
738 break;
739 case RBUG_OP_SHADER_LIST:
740 ret = rbug_shader_list(tr_rbug, header, serial);
741 break;
742 case RBUG_OP_SHADER_INFO:
743 ret = rbug_shader_info(tr_rbug, header, serial);
744 break;
745 case RBUG_OP_SHADER_DISABLE:
746 ret = rbug_shader_disable(tr_rbug, header);
747 break;
748 case RBUG_OP_SHADER_REPLACE:
749 ret = rbug_shader_replace(tr_rbug, header);
750 break;
751 default:
752 debug_printf("%s - unsupported opcode %u\n", __FUNCTION__, header->opcode);
753 ret = -ENOSYS;
754 break;
755 }
756 rbug_free_header(header);
757
758 if (ret)
759 rbug_send_error_reply(tr_rbug->con, serial, ret, NULL);
760
761 return true;
762 }
763
764 static void
rbug_con(struct rbug_rbug * tr_rbug)765 rbug_con(struct rbug_rbug *tr_rbug)
766 {
767 struct rbug_header *header;
768 uint32_t serial;
769
770 debug_printf("%s - connection received\n", __FUNCTION__);
771
772 while(tr_rbug->running) {
773 header = rbug_get_message(tr_rbug->con, &serial);
774 if (!header)
775 break;
776
777 if (!rbug_header(tr_rbug, header, serial))
778 break;
779 }
780
781 debug_printf("%s - connection closed\n", __FUNCTION__);
782
783 rbug_disconnect(tr_rbug->con);
784 tr_rbug->con = NULL;
785 }
786
787 int
rbug_thread(void * void_tr_rbug)788 rbug_thread(void *void_tr_rbug)
789 {
790 struct rbug_rbug *tr_rbug = void_tr_rbug;
791 uint16_t port = 13370;
792 int s = -1;
793 int c;
794
795 u_socket_init();
796
797 for (;port <= 13379 && s < 0; port++)
798 s = u_socket_listen_on_port(port);
799
800 if (s < 0) {
801 debug_printf("rbug_rbug - failed to listen\n");
802 return 0;
803 }
804
805 u_socket_block(s, false);
806
807 debug_printf("rbug_rbug - remote debugging listening on port %u\n", --port);
808
809 while(tr_rbug->running) {
810 os_time_sleep(1);
811
812 c = u_socket_accept(s);
813 if (c < 0)
814 continue;
815
816 u_socket_block(c, true);
817 tr_rbug->con = rbug_from_socket(c);
818
819 rbug_con(tr_rbug);
820
821 u_socket_close(c);
822 }
823
824 u_socket_close(s);
825
826 u_socket_stop();
827
828 return 0;
829 }
830
831 /**********************************************************
832 *
833 */
834
835 struct rbug_rbug *
rbug_start(struct rbug_screen * rb_screen)836 rbug_start(struct rbug_screen *rb_screen)
837 {
838 struct rbug_rbug *tr_rbug = CALLOC_STRUCT(rbug_rbug);
839 if (!tr_rbug)
840 return NULL;
841
842 tr_rbug->rb_screen = rb_screen;
843 tr_rbug->running = true;
844 if (thrd_success != u_thread_create(&tr_rbug->thread, rbug_thread, tr_rbug)) {
845 FREE(tr_rbug);
846 return NULL;
847 }
848
849 return tr_rbug;
850 }
851
852 void
rbug_stop(struct rbug_rbug * tr_rbug)853 rbug_stop(struct rbug_rbug *tr_rbug)
854 {
855 if (!tr_rbug)
856 return;
857
858 tr_rbug->running = false;
859 thrd_join(tr_rbug->thread, NULL);
860
861 FREE(tr_rbug);
862
863 return;
864 }
865
866 void
rbug_notify_draw_blocked(struct rbug_context * rb_context)867 rbug_notify_draw_blocked(struct rbug_context *rb_context)
868 {
869 struct rbug_screen *rb_screen = rbug_screen(rb_context->base.screen);
870 struct rbug_rbug *tr_rbug = rb_screen->rbug;
871
872 if (tr_rbug && tr_rbug->con)
873 rbug_send_context_draw_blocked(tr_rbug->con,
874 VOID2U64(rb_context), rb_context->draw_blocked, NULL);
875 }
876