• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  Copyright (C) Intel Corp.  2006.  All Rights Reserved.
3  Intel funded Tungsten Graphics to
4  develop this 3D driver.
5 
6  Permission is hereby granted, free of charge, to any person obtaining
7  a copy of this software and associated documentation files (the
8  "Software"), to deal in the Software without restriction, including
9  without limitation the rights to use, copy, modify, merge, publish,
10  distribute, sublicense, and/or sell copies of the Software, and to
11  permit persons to whom the Software is furnished to do so, subject to
12  the following conditions:
13 
14  The above copyright notice and this permission notice (including the
15  next paragraph) shall be included in all copies or substantial
16  portions of the Software.
17 
18  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 
26  **********************************************************************/
27  /*
28   * Authors:
29   *   Keith Whitwell <keithw@vmware.com>
30   */
31 
32 #include <pthread.h>
33 #include "main/imports.h"
34 #include "program/prog_parameter.h"
35 #include "program/prog_print.h"
36 #include "program/prog_to_nir.h"
37 #include "program/program.h"
38 #include "program/programopt.h"
39 #include "tnl/tnl.h"
40 #include "util/ralloc.h"
41 #include "compiler/glsl/ir.h"
42 #include "compiler/glsl/glsl_to_nir.h"
43 
44 #include "brw_program.h"
45 #include "brw_context.h"
46 #include "brw_shader.h"
47 #include "brw_nir.h"
48 #include "intel_batchbuffer.h"
49 
50 static void
brw_nir_lower_uniforms(nir_shader * nir,bool is_scalar)51 brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
52 {
53    if (is_scalar) {
54       nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
55                                type_size_scalar_bytes);
56       nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
57    } else {
58       nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
59                                type_size_vec4_bytes);
60       nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
61    }
62 }
63 
64 nir_shader *
brw_create_nir(struct brw_context * brw,const struct gl_shader_program * shader_prog,struct gl_program * prog,gl_shader_stage stage,bool is_scalar)65 brw_create_nir(struct brw_context *brw,
66                const struct gl_shader_program *shader_prog,
67                struct gl_program *prog,
68                gl_shader_stage stage,
69                bool is_scalar)
70 {
71    struct gl_context *ctx = &brw->ctx;
72    const nir_shader_compiler_options *options =
73       ctx->Const.ShaderCompilerOptions[stage].NirOptions;
74    bool progress;
75    nir_shader *nir;
76 
77    /* First, lower the GLSL IR or Mesa IR to NIR */
78    if (shader_prog) {
79       nir = glsl_to_nir(shader_prog, stage, options);
80       nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
81       nir_lower_returns(nir);
82       nir_validate_shader(nir);
83       NIR_PASS_V(nir, nir_lower_io_to_temporaries,
84                  nir_shader_get_entrypoint(nir), true, false);
85    } else {
86       nir = prog_to_nir(prog, options);
87       NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
88    }
89    nir_validate_shader(nir);
90 
91    (void)progress;
92 
93    nir = brw_preprocess_nir(brw->screen->compiler, nir);
94 
95    if (stage == MESA_SHADER_FRAGMENT) {
96       static const struct nir_lower_wpos_ytransform_options wpos_options = {
97          .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
98          .fs_coord_pixel_center_integer = 1,
99          .fs_coord_origin_upper_left = 1,
100       };
101       _mesa_add_state_reference(prog->Parameters,
102                                 (gl_state_index *) wpos_options.state_tokens);
103 
104       NIR_PASS(progress, nir, nir_lower_wpos_ytransform, &wpos_options);
105    }
106 
107    NIR_PASS(progress, nir, nir_lower_system_values);
108    NIR_PASS_V(nir, brw_nir_lower_uniforms, is_scalar);
109 
110    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
111 
112    /* nir_shader may have been cloned so make sure shader_info is in sync */
113    if (nir->info != &prog->info) {
114       const char *name = prog->info.name;
115       const char *label = prog->info.label;
116       prog->info = *nir->info;
117       prog->info.name = name;
118       prog->info.label = label;
119    }
120 
121    if (shader_prog) {
122       NIR_PASS_V(nir, nir_lower_samplers, shader_prog);
123       NIR_PASS_V(nir, nir_lower_atomics, shader_prog);
124    }
125 
126    return nir;
127 }
128 
129 static unsigned
get_new_program_id(struct intel_screen * screen)130 get_new_program_id(struct intel_screen *screen)
131 {
132    static pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
133    pthread_mutex_lock(&m);
134    unsigned id = screen->program_id++;
135    pthread_mutex_unlock(&m);
136    return id;
137 }
138 
brwNewProgram(struct gl_context * ctx,GLenum target,GLuint id,bool is_arb_asm)139 static struct gl_program *brwNewProgram(struct gl_context *ctx, GLenum target,
140                                         GLuint id, bool is_arb_asm)
141 {
142    struct brw_context *brw = brw_context(ctx);
143 
144    switch (target) {
145    case GL_VERTEX_PROGRAM_ARB:
146    case GL_TESS_CONTROL_PROGRAM_NV:
147    case GL_TESS_EVALUATION_PROGRAM_NV:
148    case GL_GEOMETRY_PROGRAM_NV:
149    case GL_COMPUTE_PROGRAM_NV: {
150       struct brw_program *prog = rzalloc(NULL, struct brw_program);
151       if (prog) {
152 	 prog->id = get_new_program_id(brw->screen);
153 
154          return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
155       }
156       else
157 	 return NULL;
158    }
159 
160    case GL_FRAGMENT_PROGRAM_ARB: {
161       struct brw_program *prog = rzalloc(NULL, struct brw_program);
162 
163       if (prog) {
164 	 prog->id = get_new_program_id(brw->screen);
165 
166          return _mesa_init_gl_program(&prog->program, target, id, is_arb_asm);
167       }
168       else
169 	 return NULL;
170    }
171 
172    default:
173       unreachable("Unsupported target in brwNewProgram()");
174    }
175 }
176 
brwDeleteProgram(struct gl_context * ctx,struct gl_program * prog)177 static void brwDeleteProgram( struct gl_context *ctx,
178 			      struct gl_program *prog )
179 {
180    struct brw_context *brw = brw_context(ctx);
181 
182    /* Beware!  prog's refcount has reached zero, and it's about to be freed.
183     *
184     * In brw_upload_pipeline_state(), we compare brw->foo_program to
185     * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
186     * pointer has changed.
187     *
188     * We cannot leave brw->foo_program as a dangling pointer to the dead
189     * program.  malloc() may allocate the same memory for a new gl_program,
190     * causing us to see matching pointers...but totally different programs.
191     *
192     * We cannot set brw->foo_program to NULL, either.  If we've deleted the
193     * active program, Mesa may set ctx->FooProgram._Current to NULL.  That
194     * would cause us to see matching pointers (NULL == NULL), and fail to
195     * detect that a program has changed since our last draw.
196     *
197     * So, set it to a bogus gl_program pointer that will never match,
198     * causing us to properly reevaluate the state on our next draw.
199     *
200     * Getting this wrong causes heisenbugs which are very hard to catch,
201     * as you need a very specific allocation pattern to hit the problem.
202     */
203    static const struct gl_program deleted_program;
204 
205    if (brw->vertex_program == prog)
206       brw->vertex_program = &deleted_program;
207 
208    if (brw->tess_ctrl_program == prog)
209       brw->tess_ctrl_program = &deleted_program;
210 
211    if (brw->tess_eval_program == prog)
212       brw->tess_eval_program = &deleted_program;
213 
214    if (brw->geometry_program == prog)
215       brw->geometry_program = &deleted_program;
216 
217    if (brw->fragment_program == prog)
218       brw->fragment_program = &deleted_program;
219 
220    if (brw->compute_program == prog)
221       brw->compute_program = &deleted_program;
222 
223    _mesa_delete_program( ctx, prog );
224 }
225 
226 
227 static GLboolean
brwProgramStringNotify(struct gl_context * ctx,GLenum target,struct gl_program * prog)228 brwProgramStringNotify(struct gl_context *ctx,
229 		       GLenum target,
230 		       struct gl_program *prog)
231 {
232    assert(target == GL_VERTEX_PROGRAM_ARB || !prog->arb.IsPositionInvariant);
233 
234    struct brw_context *brw = brw_context(ctx);
235    const struct brw_compiler *compiler = brw->screen->compiler;
236 
237    switch (target) {
238    case GL_FRAGMENT_PROGRAM_ARB: {
239       struct brw_program *newFP = brw_program(prog);
240       const struct brw_program *curFP =
241          brw_program_const(brw->fragment_program);
242 
243       if (newFP == curFP)
244 	 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
245       newFP->id = get_new_program_id(brw->screen);
246 
247       brw_add_texrect_params(prog);
248 
249       prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);
250 
251       brw_fs_precompile(ctx, prog);
252       break;
253    }
254    case GL_VERTEX_PROGRAM_ARB: {
255       struct brw_program *newVP = brw_program(prog);
256       const struct brw_program *curVP =
257          brw_program_const(brw->vertex_program);
258 
259       if (newVP == curVP)
260 	 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
261       if (newVP->program.arb.IsPositionInvariant) {
262 	 _mesa_insert_mvp_code(ctx, &newVP->program);
263       }
264       newVP->id = get_new_program_id(brw->screen);
265 
266       /* Also tell tnl about it:
267        */
268       _tnl_program_string(ctx, target, prog);
269 
270       brw_add_texrect_params(prog);
271 
272       prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
273                                  compiler->scalar_stage[MESA_SHADER_VERTEX]);
274 
275       brw_vs_precompile(ctx, prog);
276       break;
277    }
278    default:
279       /*
280        * driver->ProgramStringNotify is only called for ARB programs, fixed
281        * function vertex programs, and ir_to_mesa (which isn't used by the
282        * i965 back-end).  Therefore, even after geometry shaders are added,
283        * this function should only ever be called with a target of
284        * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
285        */
286       unreachable("Unexpected target in brwProgramStringNotify");
287    }
288 
289    return true;
290 }
291 
292 static void
brw_memory_barrier(struct gl_context * ctx,GLbitfield barriers)293 brw_memory_barrier(struct gl_context *ctx, GLbitfield barriers)
294 {
295    struct brw_context *brw = brw_context(ctx);
296    unsigned bits = (PIPE_CONTROL_DATA_CACHE_FLUSH |
297                     PIPE_CONTROL_NO_WRITE |
298                     PIPE_CONTROL_CS_STALL);
299    assert(brw->gen >= 7 && brw->gen <= 9);
300 
301    if (barriers & (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT |
302                    GL_ELEMENT_ARRAY_BARRIER_BIT |
303                    GL_COMMAND_BARRIER_BIT))
304       bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
305 
306    if (barriers & GL_UNIFORM_BARRIER_BIT)
307       bits |= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
308                PIPE_CONTROL_CONST_CACHE_INVALIDATE);
309 
310    if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT)
311       bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
312 
313    if (barriers & GL_TEXTURE_UPDATE_BARRIER_BIT)
314       bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
315 
316    if (barriers & GL_FRAMEBUFFER_BARRIER_BIT)
317       bits |= (PIPE_CONTROL_DEPTH_CACHE_FLUSH |
318                PIPE_CONTROL_RENDER_TARGET_FLUSH);
319 
320    /* Typed surface messages are handled by the render cache on IVB, so we
321     * need to flush it too.
322     */
323    if (brw->gen == 7 && !brw->is_haswell)
324       bits |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
325 
326    brw_emit_pipe_control_flush(brw, bits);
327 }
328 
329 static void
brw_blend_barrier(struct gl_context * ctx)330 brw_blend_barrier(struct gl_context *ctx)
331 {
332    struct brw_context *brw = brw_context(ctx);
333 
334    if (!ctx->Extensions.MESA_shader_framebuffer_fetch) {
335       if (brw->gen >= 6) {
336          brw_emit_pipe_control_flush(brw,
337                                      PIPE_CONTROL_RENDER_TARGET_FLUSH |
338                                      PIPE_CONTROL_CS_STALL);
339          brw_emit_pipe_control_flush(brw,
340                                      PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
341       } else {
342          brw_emit_pipe_control_flush(brw,
343                                      PIPE_CONTROL_RENDER_TARGET_FLUSH);
344       }
345    }
346 }
347 
348 void
brw_add_texrect_params(struct gl_program * prog)349 brw_add_texrect_params(struct gl_program *prog)
350 {
351    for (int texunit = 0; texunit < BRW_MAX_TEX_UNIT; texunit++) {
352       if (!(prog->TexturesUsed[texunit] & (1 << TEXTURE_RECT_INDEX)))
353          continue;
354 
355       int tokens[STATE_LENGTH] = {
356          STATE_INTERNAL,
357          STATE_TEXRECT_SCALE,
358          texunit,
359          0,
360          0
361       };
362 
363       _mesa_add_state_reference(prog->Parameters, (gl_state_index *)tokens);
364    }
365 }
366 
367 void
brw_get_scratch_bo(struct brw_context * brw,drm_intel_bo ** scratch_bo,int size)368 brw_get_scratch_bo(struct brw_context *brw,
369 		   drm_intel_bo **scratch_bo, int size)
370 {
371    drm_intel_bo *old_bo = *scratch_bo;
372 
373    if (old_bo && old_bo->size < size) {
374       drm_intel_bo_unreference(old_bo);
375       old_bo = NULL;
376    }
377 
378    if (!old_bo) {
379       *scratch_bo = drm_intel_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
380    }
381 }
382 
383 /**
384  * Reserve enough scratch space for the given stage to hold \p per_thread_size
385  * bytes times the given \p thread_count.
386  */
387 void
brw_alloc_stage_scratch(struct brw_context * brw,struct brw_stage_state * stage_state,unsigned per_thread_size,unsigned thread_count)388 brw_alloc_stage_scratch(struct brw_context *brw,
389                         struct brw_stage_state *stage_state,
390                         unsigned per_thread_size,
391                         unsigned thread_count)
392 {
393    if (stage_state->per_thread_scratch < per_thread_size) {
394       stage_state->per_thread_scratch = per_thread_size;
395 
396       if (stage_state->scratch_bo)
397          drm_intel_bo_unreference(stage_state->scratch_bo);
398 
399       stage_state->scratch_bo =
400          drm_intel_bo_alloc(brw->bufmgr, "shader scratch space",
401                             per_thread_size * thread_count, 4096);
402    }
403 }
404 
brwInitFragProgFuncs(struct dd_function_table * functions)405 void brwInitFragProgFuncs( struct dd_function_table *functions )
406 {
407    assert(functions->ProgramStringNotify == _tnl_program_string);
408 
409    functions->NewProgram = brwNewProgram;
410    functions->DeleteProgram = brwDeleteProgram;
411    functions->ProgramStringNotify = brwProgramStringNotify;
412 
413    functions->LinkShader = brw_link_shader;
414 
415    functions->MemoryBarrier = brw_memory_barrier;
416    functions->BlendBarrier = brw_blend_barrier;
417 }
418 
419 struct shader_times {
420    uint64_t time;
421    uint64_t written;
422    uint64_t reset;
423 };
424 
425 void
brw_init_shader_time(struct brw_context * brw)426 brw_init_shader_time(struct brw_context *brw)
427 {
428    const int max_entries = 2048;
429    brw->shader_time.bo =
430       drm_intel_bo_alloc(brw->bufmgr, "shader time",
431                          max_entries * SHADER_TIME_STRIDE * 3, 4096);
432    brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
433    brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
434    brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
435                                           max_entries);
436    brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
437                                                max_entries);
438    brw->shader_time.max_entries = max_entries;
439 }
440 
441 static int
compare_time(const void * a,const void * b)442 compare_time(const void *a, const void *b)
443 {
444    uint64_t * const *a_val = a;
445    uint64_t * const *b_val = b;
446 
447    /* We don't just subtract because we're turning the value to an int. */
448    if (**a_val < **b_val)
449       return -1;
450    else if (**a_val == **b_val)
451       return 0;
452    else
453       return 1;
454 }
455 
456 static void
print_shader_time_line(const char * stage,const char * name,int shader_num,uint64_t time,uint64_t total)457 print_shader_time_line(const char *stage, const char *name,
458                        int shader_num, uint64_t time, uint64_t total)
459 {
460    fprintf(stderr, "%-6s%-18s", stage, name);
461 
462    if (shader_num != 0)
463       fprintf(stderr, "%4d: ", shader_num);
464    else
465       fprintf(stderr, "    : ");
466 
467    fprintf(stderr, "%16lld (%7.2f Gcycles)      %4.1f%%\n",
468            (long long)time,
469            (double)time / 1000000000.0,
470            (double)time / total * 100.0);
471 }
472 
473 static void
brw_report_shader_time(struct brw_context * brw)474 brw_report_shader_time(struct brw_context *brw)
475 {
476    if (!brw->shader_time.bo || !brw->shader_time.num_entries)
477       return;
478 
479    uint64_t scaled[brw->shader_time.num_entries];
480    uint64_t *sorted[brw->shader_time.num_entries];
481    uint64_t total_by_type[ST_CS + 1];
482    memset(total_by_type, 0, sizeof(total_by_type));
483    double total = 0;
484    for (int i = 0; i < brw->shader_time.num_entries; i++) {
485       uint64_t written = 0, reset = 0;
486       enum shader_time_shader_type type = brw->shader_time.types[i];
487 
488       sorted[i] = &scaled[i];
489 
490       switch (type) {
491       case ST_VS:
492       case ST_TCS:
493       case ST_TES:
494       case ST_GS:
495       case ST_FS8:
496       case ST_FS16:
497       case ST_CS:
498          written = brw->shader_time.cumulative[i].written;
499          reset = brw->shader_time.cumulative[i].reset;
500          break;
501 
502       default:
503          /* I sometimes want to print things that aren't the 3 shader times.
504           * Just print the sum in that case.
505           */
506          written = 1;
507          reset = 0;
508          break;
509       }
510 
511       uint64_t time = brw->shader_time.cumulative[i].time;
512       if (written) {
513          scaled[i] = time / written * (written + reset);
514       } else {
515          scaled[i] = time;
516       }
517 
518       switch (type) {
519       case ST_VS:
520       case ST_TCS:
521       case ST_TES:
522       case ST_GS:
523       case ST_FS8:
524       case ST_FS16:
525       case ST_CS:
526          total_by_type[type] += scaled[i];
527          break;
528       default:
529          break;
530       }
531 
532       total += scaled[i];
533    }
534 
535    if (total == 0) {
536       fprintf(stderr, "No shader time collected yet\n");
537       return;
538    }
539 
540    qsort(sorted, brw->shader_time.num_entries, sizeof(sorted[0]), compare_time);
541 
542    fprintf(stderr, "\n");
543    fprintf(stderr, "type          ID                  cycles spent                   %% of total\n");
544    for (int s = 0; s < brw->shader_time.num_entries; s++) {
545       const char *stage;
546       /* Work back from the sorted pointers times to a time to print. */
547       int i = sorted[s] - scaled;
548 
549       if (scaled[i] == 0)
550          continue;
551 
552       int shader_num = brw->shader_time.ids[i];
553       const char *shader_name = brw->shader_time.names[i];
554 
555       switch (brw->shader_time.types[i]) {
556       case ST_VS:
557          stage = "vs";
558          break;
559       case ST_TCS:
560          stage = "tcs";
561          break;
562       case ST_TES:
563          stage = "tes";
564          break;
565       case ST_GS:
566          stage = "gs";
567          break;
568       case ST_FS8:
569          stage = "fs8";
570          break;
571       case ST_FS16:
572          stage = "fs16";
573          break;
574       case ST_CS:
575          stage = "cs";
576          break;
577       default:
578          stage = "other";
579          break;
580       }
581 
582       print_shader_time_line(stage, shader_name, shader_num,
583                              scaled[i], total);
584    }
585 
586    fprintf(stderr, "\n");
587    print_shader_time_line("total", "vs", 0, total_by_type[ST_VS], total);
588    print_shader_time_line("total", "tcs", 0, total_by_type[ST_TCS], total);
589    print_shader_time_line("total", "tes", 0, total_by_type[ST_TES], total);
590    print_shader_time_line("total", "gs", 0, total_by_type[ST_GS], total);
591    print_shader_time_line("total", "fs8", 0, total_by_type[ST_FS8], total);
592    print_shader_time_line("total", "fs16", 0, total_by_type[ST_FS16], total);
593    print_shader_time_line("total", "cs", 0, total_by_type[ST_CS], total);
594 }
595 
596 static void
brw_collect_shader_time(struct brw_context * brw)597 brw_collect_shader_time(struct brw_context *brw)
598 {
599    if (!brw->shader_time.bo)
600       return;
601 
602    /* This probably stalls on the last rendering.  We could fix that by
603     * delaying reading the reports, but it doesn't look like it's a big
604     * overhead compared to the cost of tracking the time in the first place.
605     */
606    drm_intel_bo_map(brw->shader_time.bo, true);
607    void *bo_map = brw->shader_time.bo->virtual;
608 
609    for (int i = 0; i < brw->shader_time.num_entries; i++) {
610       uint32_t *times = bo_map + i * 3 * SHADER_TIME_STRIDE;
611 
612       brw->shader_time.cumulative[i].time += times[SHADER_TIME_STRIDE * 0 / 4];
613       brw->shader_time.cumulative[i].written += times[SHADER_TIME_STRIDE * 1 / 4];
614       brw->shader_time.cumulative[i].reset += times[SHADER_TIME_STRIDE * 2 / 4];
615    }
616 
617    /* Zero the BO out to clear it out for our next collection.
618     */
619    memset(bo_map, 0, brw->shader_time.bo->size);
620    drm_intel_bo_unmap(brw->shader_time.bo);
621 }
622 
623 void
brw_collect_and_report_shader_time(struct brw_context * brw)624 brw_collect_and_report_shader_time(struct brw_context *brw)
625 {
626    brw_collect_shader_time(brw);
627 
628    if (brw->shader_time.report_time == 0 ||
629        get_time() - brw->shader_time.report_time >= 1.0) {
630       brw_report_shader_time(brw);
631       brw->shader_time.report_time = get_time();
632    }
633 }
634 
635 /**
636  * Chooses an index in the shader_time buffer and sets up tracking information
637  * for our printouts.
638  *
639  * Note that this holds on to references to the underlying programs, which may
640  * change their lifetimes compared to normal operation.
641  */
642 int
brw_get_shader_time_index(struct brw_context * brw,struct gl_program * prog,enum shader_time_shader_type type,bool is_glsl_sh)643 brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
644                           enum shader_time_shader_type type, bool is_glsl_sh)
645 {
646    int shader_time_index = brw->shader_time.num_entries++;
647    assert(shader_time_index < brw->shader_time.max_entries);
648    brw->shader_time.types[shader_time_index] = type;
649 
650    const char *name;
651    if (prog->Id == 0) {
652       name = "ff";
653    } else if (is_glsl_sh) {
654       name = prog->info.label ?
655          ralloc_strdup(brw->shader_time.names, prog->info.label) : "glsl";
656    } else {
657       name = "prog";
658    }
659 
660    brw->shader_time.names[shader_time_index] = name;
661    brw->shader_time.ids[shader_time_index] = prog->Id;
662 
663    return shader_time_index;
664 }
665 
666 void
brw_destroy_shader_time(struct brw_context * brw)667 brw_destroy_shader_time(struct brw_context *brw)
668 {
669    drm_intel_bo_unreference(brw->shader_time.bo);
670    brw->shader_time.bo = NULL;
671 }
672 
673 void
brw_stage_prog_data_free(const void * p)674 brw_stage_prog_data_free(const void *p)
675 {
676    struct brw_stage_prog_data *prog_data = (struct brw_stage_prog_data *)p;
677 
678    ralloc_free(prog_data->param);
679    ralloc_free(prog_data->pull_param);
680    ralloc_free(prog_data->image_param);
681 }
682 
683 void
brw_dump_arb_asm(const char * stage,struct gl_program * prog)684 brw_dump_arb_asm(const char *stage, struct gl_program *prog)
685 {
686    fprintf(stderr, "ARB_%s_program %d ir for native %s shader\n",
687            stage, prog->Id, stage);
688    _mesa_print_program(prog);
689 }
690 
691 void
brw_setup_tex_for_precompile(struct brw_context * brw,struct brw_sampler_prog_key_data * tex,struct gl_program * prog)692 brw_setup_tex_for_precompile(struct brw_context *brw,
693                              struct brw_sampler_prog_key_data *tex,
694                              struct gl_program *prog)
695 {
696    const bool has_shader_channel_select = brw->is_haswell || brw->gen >= 8;
697    unsigned sampler_count = util_last_bit(prog->SamplersUsed);
698    for (unsigned i = 0; i < sampler_count; i++) {
699       if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
700          /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
701          tex->swizzles[i] =
702             MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
703       } else {
704          /* Color sampler: assume no swizzling. */
705          tex->swizzles[i] = SWIZZLE_XYZW;
706       }
707    }
708 }
709