1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 #include "compiler/glsl/glsl_parser_extras.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
30
31 #include "st_nir.h"
32 #include "st_shader_cache.h"
33 #include "st_program.h"
34
35 #include "tgsi/tgsi_from_mesa.h"
36
37 static GLboolean
link_shader(struct gl_context * ctx,struct gl_shader_program * prog)38 link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
39 {
40 GLboolean ret;
41 struct st_context *sctx = st_context(ctx);
42 struct pipe_screen *pscreen = sctx->screen;
43
44 /* Return early if we are loading the shader from on-disk cache */
45 if (st_load_nir_from_disk_cache(ctx, prog)) {
46 return GL_TRUE;
47 }
48
49 assert(prog->data->LinkStatus);
50
51 /* Skip the GLSL steps when using SPIR-V. */
52 if (prog->data->spirv) {
53 return st_link_nir(ctx, prog);
54 }
55
56 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
57 if (prog->_LinkedShaders[i] == NULL)
58 continue;
59
60 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
61 exec_list *ir = shader->ir;
62 gl_shader_stage stage = shader->Stage;
63 const struct gl_shader_compiler_options *options =
64 &ctx->Const.ShaderCompilerOptions[stage];
65
66 enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage);
67 bool have_dround = pscreen->get_shader_param(pscreen, ptarget,
68 PIPE_SHADER_CAP_DROUND_SUPPORTED);
69 bool have_dfrexp = pscreen->get_shader_param(pscreen, ptarget,
70 PIPE_SHADER_CAP_DFRACEXP_DLDEXP_SUPPORTED);
71 bool have_ldexp = pscreen->get_shader_param(pscreen, ptarget,
72 PIPE_SHADER_CAP_LDEXP_SUPPORTED);
73
74 if (!pscreen->get_param(pscreen, PIPE_CAP_INT64_DIVMOD))
75 lower_64bit_integer_instructions(ir, DIV64 | MOD64);
76
77 if (ctx->Extensions.ARB_shading_language_packing) {
78 unsigned lower_inst = LOWER_PACK_SNORM_2x16 |
79 LOWER_UNPACK_SNORM_2x16 |
80 LOWER_PACK_UNORM_2x16 |
81 LOWER_UNPACK_UNORM_2x16 |
82 LOWER_PACK_SNORM_4x8 |
83 LOWER_UNPACK_SNORM_4x8 |
84 LOWER_UNPACK_UNORM_4x8 |
85 LOWER_PACK_UNORM_4x8;
86
87 if (ctx->Extensions.ARB_gpu_shader5)
88 lower_inst |= LOWER_PACK_USE_BFI |
89 LOWER_PACK_USE_BFE;
90 if (!ctx->st->has_half_float_packing)
91 lower_inst |= LOWER_PACK_HALF_2x16 |
92 LOWER_UNPACK_HALF_2x16;
93
94 lower_packing_builtins(ir, lower_inst);
95 }
96
97 do_mat_op_to_vec(ir);
98
99 if (stage == MESA_SHADER_FRAGMENT && pscreen->get_param(pscreen, PIPE_CAP_FBFETCH))
100 lower_blend_equation_advanced(
101 shader, ctx->Extensions.KHR_blend_equation_advanced_coherent);
102
103 lower_instructions(ir,
104 (have_ldexp ? 0 : LDEXP_TO_ARITH) |
105 (have_dfrexp ? 0 : DFREXP_DLDEXP_TO_ARITH) |
106 CARRY_TO_ARITH |
107 BORROW_TO_ARITH |
108 (have_dround ? 0 : DOPS_TO_DFRAC) |
109 (ctx->Const.ForceGLSLAbsSqrt ? SQRT_TO_ABS_SQRT : 0) |
110 /* Assume that if ARB_gpu_shader5 is not supported
111 * then all of the extended integer functions need
112 * lowering. It may be necessary to add some caps
113 * for individual instructions.
114 */
115 (!ctx->Extensions.ARB_gpu_shader5
116 ? BIT_COUNT_TO_MATH |
117 EXTRACT_TO_SHIFTS |
118 INSERT_TO_SHIFTS |
119 REVERSE_TO_SHIFTS |
120 FIND_LSB_TO_FLOAT_CAST |
121 FIND_MSB_TO_FLOAT_CAST |
122 IMUL_HIGH_TO_MUL
123 : 0));
124
125 do_vec_index_to_cond_assign(ir);
126 lower_vector_insert(ir, true);
127 if (options->MaxIfDepth == 0) {
128 lower_discard(ir);
129 }
130
131 validate_ir_tree(ir);
132 }
133
134 ret = st_link_nir(ctx, prog);
135
136 return ret;
137 }
138
139 extern "C" {
140
141 /**
142 * Link a shader.
143 * Called via ctx->Driver.LinkShader()
144 */
145 GLboolean
st_link_shader(struct gl_context * ctx,struct gl_shader_program * prog)146 st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
147 {
148 struct pipe_context *pctx = st_context(ctx)->pipe;
149
150 GLboolean ret = link_shader(ctx, prog);
151
152 if (pctx->link_shader) {
153 void *driver_handles[PIPE_SHADER_TYPES];
154 memset(driver_handles, 0, sizeof(driver_handles));
155
156 for (uint32_t i = 0; i < MESA_SHADER_STAGES; ++i) {
157 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
158 if (shader) {
159 struct gl_program *p = shader->Program;
160 if (p && p->variants) {
161 enum pipe_shader_type type = pipe_shader_type_from_mesa(shader->Stage);
162 driver_handles[type] = p->variants->driver_shader;
163 }
164 }
165 }
166
167 pctx->link_shader(pctx, driver_handles);
168 }
169
170 return ret;
171 }
172
173 } /* extern "C" */
174