• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /****************************************************************************
2  * Copyright (C) 2015 Intel Corporation.   All Rights Reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  ***************************************************************************/
23 
24 #include "swr_context.h"
25 #include "swr_memory.h"
26 #include "swr_screen.h"
27 #include "swr_resource.h"
28 #include "swr_scratch.h"
29 #include "swr_query.h"
30 #include "swr_fence.h"
31 
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35 #include "util/u_atomic.h"
36 #include "util/u_upload_mgr.h"
37 #include "util/u_transfer.h"
38 #include "util/u_surface.h"
39 
40 #include "api.h"
41 #include "backend.h"
42 #include "knobs.h"
43 
44 static struct pipe_surface *
swr_create_surface(struct pipe_context * pipe,struct pipe_resource * pt,const struct pipe_surface * surf_tmpl)45 swr_create_surface(struct pipe_context *pipe,
46                    struct pipe_resource *pt,
47                    const struct pipe_surface *surf_tmpl)
48 {
49    struct pipe_surface *ps;
50 
51    ps = CALLOC_STRUCT(pipe_surface);
52    if (ps) {
53       pipe_reference_init(&ps->reference, 1);
54       pipe_resource_reference(&ps->texture, pt);
55       ps->context = pipe;
56       ps->format = surf_tmpl->format;
57       if (pt->target != PIPE_BUFFER) {
58          assert(surf_tmpl->u.tex.level <= pt->last_level);
59          ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level);
60          ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level);
61          ps->u.tex.level = surf_tmpl->u.tex.level;
62          ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
63          ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
64       } else {
65          /* setting width as number of elements should get us correct
66           * renderbuffer width */
67          ps->width = surf_tmpl->u.buf.last_element
68             - surf_tmpl->u.buf.first_element + 1;
69          ps->height = pt->height0;
70          ps->u.buf.first_element = surf_tmpl->u.buf.first_element;
71          ps->u.buf.last_element = surf_tmpl->u.buf.last_element;
72          assert(ps->u.buf.first_element <= ps->u.buf.last_element);
73          assert(ps->u.buf.last_element < ps->width);
74       }
75    }
76    return ps;
77 }
78 
79 static void
swr_surface_destroy(struct pipe_context * pipe,struct pipe_surface * surf)80 swr_surface_destroy(struct pipe_context *pipe, struct pipe_surface *surf)
81 {
82    assert(surf->texture);
83    struct pipe_resource *resource = surf->texture;
84 
85    /* If the resource has been drawn to, store tiles. */
86    swr_store_dirty_resource(pipe, resource, SWR_TILE_RESOLVED);
87 
88    pipe_resource_reference(&resource, NULL);
89    FREE(surf);
90 }
91 
92 
93 static void *
swr_transfer_map(struct pipe_context * pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** transfer)94 swr_transfer_map(struct pipe_context *pipe,
95                  struct pipe_resource *resource,
96                  unsigned level,
97                  unsigned usage,
98                  const struct pipe_box *box,
99                  struct pipe_transfer **transfer)
100 {
101    struct swr_screen *screen = swr_screen(pipe->screen);
102    struct swr_resource *spr = swr_resource(resource);
103    struct pipe_transfer *pt;
104    enum pipe_format format = resource->format;
105 
106    assert(resource);
107    assert(level <= resource->last_level);
108 
109    /* If mapping an attached rendertarget, store tiles to surface and set
110     * postStoreTileState to SWR_TILE_INVALID so tiles get reloaded on next use
111     * and nothing needs to be done at unmap. */
112    swr_store_dirty_resource(pipe, resource, SWR_TILE_INVALID);
113 
114    if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
115       /* If resource is in use, finish fence before mapping.
116        * Unless requested not to block, then if not done return NULL map */
117       if (usage & PIPE_TRANSFER_DONTBLOCK) {
118          if (swr_is_fence_pending(screen->flush_fence))
119             return NULL;
120       } else {
121          if (spr->status) {
122             /* But, if there's no fence pending, submit one.
123              * XXX: Remove once draw timestamps are finished. */
124             if (!swr_is_fence_pending(screen->flush_fence))
125                swr_fence_submit(swr_context(pipe), screen->flush_fence);
126 
127             swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
128             swr_resource_unused(resource);
129          }
130       }
131    }
132 
133    pt = CALLOC_STRUCT(pipe_transfer);
134    if (!pt)
135       return NULL;
136    pipe_resource_reference(&pt->resource, resource);
137    pt->usage = (pipe_transfer_usage)usage;
138    pt->level = level;
139    pt->box = *box;
140    pt->stride = spr->swr.pitch;
141    pt->layer_stride = spr->swr.qpitch * spr->swr.pitch;
142 
143    /* if we're mapping the depth/stencil, copy in stencil for the section
144     * being read in
145     */
146    if (usage & PIPE_TRANSFER_READ && spr->has_depth && spr->has_stencil) {
147       size_t zbase, sbase;
148       for (int z = box->z; z < box->z + box->depth; z++) {
149          zbase = (z * spr->swr.qpitch + box->y) * spr->swr.pitch +
150             spr->mip_offsets[level];
151          sbase = (z * spr->secondary.qpitch + box->y) * spr->secondary.pitch +
152             spr->secondary_mip_offsets[level];
153          for (int y = box->y; y < box->y + box->height; y++) {
154             if (spr->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
155                for (int x = box->x; x < box->x + box->width; x++)
156                   ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 4 * x + 3] =
157                      ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x];
158             } else if (spr->base.format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
159                for (int x = box->x; x < box->x + box->width; x++)
160                   ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 8 * x + 4] =
161                      ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x];
162             }
163             zbase += spr->swr.pitch;
164             sbase += spr->secondary.pitch;
165          }
166       }
167    }
168 
169    unsigned offset = box->z * pt->layer_stride +
170       util_format_get_nblocksy(format, box->y) * pt->stride +
171       util_format_get_stride(format, box->x);
172 
173    *transfer = pt;
174 
175    return (void*)(spr->swr.xpBaseAddress + offset + spr->mip_offsets[level]);
176 }
177 
178 static void
swr_transfer_flush_region(struct pipe_context * pipe,struct pipe_transfer * transfer,const struct pipe_box * flush_box)179 swr_transfer_flush_region(struct pipe_context *pipe,
180                           struct pipe_transfer *transfer,
181                           const struct pipe_box *flush_box)
182 {
183    assert(transfer->resource);
184    assert(transfer->usage & PIPE_TRANSFER_WRITE);
185 
186    struct swr_resource *spr = swr_resource(transfer->resource);
187    if (!spr->has_depth || !spr->has_stencil)
188       return;
189 
190    size_t zbase, sbase;
191    struct pipe_box box = *flush_box;
192    box.x += transfer->box.x;
193    box.y += transfer->box.y;
194    box.z += transfer->box.z;
195    for (int z = box.z; z < box.z + box.depth; z++) {
196       zbase = (z * spr->swr.qpitch + box.y) * spr->swr.pitch +
197          spr->mip_offsets[transfer->level];
198       sbase = (z * spr->secondary.qpitch + box.y) * spr->secondary.pitch +
199          spr->secondary_mip_offsets[transfer->level];
200       for (int y = box.y; y < box.y + box.height; y++) {
201          if (spr->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
202             for (int x = box.x; x < box.x + box.width; x++)
203                ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x] =
204                   ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 4 * x + 3];
205          } else if (spr->base.format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
206             for (int x = box.x; x < box.x + box.width; x++)
207                ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x] =
208                   ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 8 * x + 4];
209          }
210          zbase += spr->swr.pitch;
211          sbase += spr->secondary.pitch;
212       }
213    }
214 }
215 
216 static void
swr_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)217 swr_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer)
218 {
219    assert(transfer->resource);
220 
221    struct swr_resource *spr = swr_resource(transfer->resource);
222    /* if we're mapping the depth/stencil, copy in stencil for the section
223     * being written out
224     */
225    if (transfer->usage & PIPE_TRANSFER_WRITE &&
226        !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) &&
227        spr->has_depth && spr->has_stencil) {
228       struct pipe_box box;
229       u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height,
230                transfer->box.depth, &box);
231       swr_transfer_flush_region(pipe, transfer, &box);
232    }
233 
234    pipe_resource_reference(&transfer->resource, NULL);
235    FREE(transfer);
236 }
237 
238 
239 static void
swr_resource_copy(struct pipe_context * pipe,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)240 swr_resource_copy(struct pipe_context *pipe,
241                   struct pipe_resource *dst,
242                   unsigned dst_level,
243                   unsigned dstx,
244                   unsigned dsty,
245                   unsigned dstz,
246                   struct pipe_resource *src,
247                   unsigned src_level,
248                   const struct pipe_box *src_box)
249 {
250    struct swr_screen *screen = swr_screen(pipe->screen);
251 
252    /* If either the src or dst is a renderTarget, store tiles before copy */
253    swr_store_dirty_resource(pipe, src, SWR_TILE_RESOLVED);
254    swr_store_dirty_resource(pipe, dst, SWR_TILE_RESOLVED);
255 
256    swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
257    swr_resource_unused(src);
258    swr_resource_unused(dst);
259 
260    if ((dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER)
261        || (dst->target != PIPE_BUFFER && src->target != PIPE_BUFFER)) {
262       util_resource_copy_region(
263          pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box);
264       return;
265    }
266 
267    debug_printf("unhandled swr_resource_copy\n");
268 }
269 
270 
271 static void
swr_blit(struct pipe_context * pipe,const struct pipe_blit_info * blit_info)272 swr_blit(struct pipe_context *pipe, const struct pipe_blit_info *blit_info)
273 {
274    struct swr_context *ctx = swr_context(pipe);
275    /* Make a copy of the const blit_info, so we can modify it */
276    struct pipe_blit_info info = *blit_info;
277 
278    if (info.render_condition_enable && !swr_check_render_cond(pipe))
279       return;
280 
281    if (info.src.resource->nr_samples > 1 && info.dst.resource->nr_samples <= 1
282        && !util_format_is_depth_or_stencil(info.src.resource->format)
283        && !util_format_is_pure_integer(info.src.resource->format)) {
284       debug_printf("swr_blit: color resolve : %d -> %d\n",
285             info.src.resource->nr_samples, info.dst.resource->nr_samples);
286 
287       /* Resolve is done as part of the surface store. */
288       swr_store_dirty_resource(pipe, info.src.resource, SWR_TILE_RESOLVED);
289 
290       struct pipe_resource *src_resource = info.src.resource;
291       struct pipe_resource *resolve_target =
292          swr_resource(src_resource)->resolve_target;
293 
294       /* The resolve target becomes the new source for the blit. */
295       info.src.resource = resolve_target;
296    }
297 
298    if (util_try_blit_via_copy_region(pipe, &info)) {
299       return; /* done */
300    }
301 
302    if (info.mask & PIPE_MASK_S) {
303       debug_printf("swr: cannot blit stencil, skipping\n");
304       info.mask &= ~PIPE_MASK_S;
305    }
306 
307    if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
308       debug_printf("swr: blit unsupported %s -> %s\n",
309                    util_format_short_name(info.src.resource->format),
310                    util_format_short_name(info.dst.resource->format));
311       return;
312    }
313 
314    if (ctx->active_queries) {
315       ctx->api.pfnSwrEnableStatsFE(ctx->swrContext, FALSE);
316       ctx->api.pfnSwrEnableStatsBE(ctx->swrContext, FALSE);
317    }
318 
319    util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vertex_buffer);
320    util_blitter_save_vertex_elements(ctx->blitter, (void *)ctx->velems);
321    util_blitter_save_vertex_shader(ctx->blitter, (void *)ctx->vs);
322    util_blitter_save_geometry_shader(ctx->blitter, (void*)ctx->gs);
323    util_blitter_save_so_targets(
324       ctx->blitter,
325       ctx->num_so_targets,
326       (struct pipe_stream_output_target **)ctx->so_targets);
327    util_blitter_save_rasterizer(ctx->blitter, (void *)ctx->rasterizer);
328    util_blitter_save_viewport(ctx->blitter, &ctx->viewport);
329    util_blitter_save_scissor(ctx->blitter, &ctx->scissor);
330    util_blitter_save_fragment_shader(ctx->blitter, ctx->fs);
331    util_blitter_save_blend(ctx->blitter, (void *)ctx->blend);
332    util_blitter_save_depth_stencil_alpha(ctx->blitter,
333                                          (void *)ctx->depth_stencil);
334    util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
335    util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
336    util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
337    util_blitter_save_fragment_sampler_states(
338       ctx->blitter,
339       ctx->num_samplers[PIPE_SHADER_FRAGMENT],
340       (void **)ctx->samplers[PIPE_SHADER_FRAGMENT]);
341    util_blitter_save_fragment_sampler_views(
342       ctx->blitter,
343       ctx->num_sampler_views[PIPE_SHADER_FRAGMENT],
344       ctx->sampler_views[PIPE_SHADER_FRAGMENT]);
345    util_blitter_save_render_condition(ctx->blitter,
346                                       ctx->render_cond_query,
347                                       ctx->render_cond_cond,
348                                       ctx->render_cond_mode);
349 
350    util_blitter_blit(ctx->blitter, &info);
351 
352    if (ctx->active_queries) {
353       ctx->api.pfnSwrEnableStatsFE(ctx->swrContext, TRUE);
354       ctx->api.pfnSwrEnableStatsBE(ctx->swrContext, TRUE);
355    }
356 }
357 
358 
359 static void
swr_destroy(struct pipe_context * pipe)360 swr_destroy(struct pipe_context *pipe)
361 {
362    struct swr_context *ctx = swr_context(pipe);
363    struct swr_screen *screen = swr_screen(pipe->screen);
364 
365    if (ctx->blitter)
366       util_blitter_destroy(ctx->blitter);
367 
368    for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
369       if (ctx->framebuffer.cbufs[i]) {
370          struct swr_resource *res = swr_resource(ctx->framebuffer.cbufs[i]->texture);
371          /* NULL curr_pipe, so we don't have a reference to a deleted pipe */
372          res->curr_pipe = NULL;
373          pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL);
374       }
375    }
376 
377    if (ctx->framebuffer.zsbuf) {
378       struct swr_resource *res = swr_resource(ctx->framebuffer.zsbuf->texture);
379       /* NULL curr_pipe, so we don't have a reference to a deleted pipe */
380       res->curr_pipe = NULL;
381       pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL);
382    }
383 
384    for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
385       pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_FRAGMENT][i], NULL);
386    }
387 
388    for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
389       pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_VERTEX][i], NULL);
390    }
391 
392    if (ctx->pipe.stream_uploader)
393       u_upload_destroy(ctx->pipe.stream_uploader);
394 
395    /* Idle core after destroying buffer resources, but before deleting
396     * context.  Destroying resources has potentially called StoreTiles.*/
397    ctx->api.pfnSwrWaitForIdle(ctx->swrContext);
398 
399    if (ctx->swrContext)
400       ctx->api.pfnSwrDestroyContext(ctx->swrContext);
401 
402    delete ctx->blendJIT;
403 
404    swr_destroy_scratch_buffers(ctx);
405 
406    /* Only update screen->pipe if current context is being destroyed */
407    assert(screen);
408    if (screen->pipe == pipe)
409       screen->pipe = NULL;
410 
411    AlignedFree(ctx);
412 }
413 
414 
415 static void
swr_render_condition(struct pipe_context * pipe,struct pipe_query * query,boolean condition,enum pipe_render_cond_flag mode)416 swr_render_condition(struct pipe_context *pipe,
417                      struct pipe_query *query,
418                      boolean condition,
419                      enum pipe_render_cond_flag mode)
420 {
421    struct swr_context *ctx = swr_context(pipe);
422 
423    ctx->render_cond_query = query;
424    ctx->render_cond_mode = mode;
425    ctx->render_cond_cond = condition;
426 }
427 
428 static void
swr_UpdateStats(HANDLE hPrivateContext,const SWR_STATS * pStats)429 swr_UpdateStats(HANDLE hPrivateContext, const SWR_STATS *pStats)
430 {
431    swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
432 
433    if (!pDC)
434       return;
435 
436    struct swr_query_result *pqr = pDC->pStats;
437 
438    SWR_STATS *pSwrStats = &pqr->core;
439 
440    pSwrStats->DepthPassCount += pStats->DepthPassCount;
441    pSwrStats->PsInvocations += pStats->PsInvocations;
442    pSwrStats->CsInvocations += pStats->CsInvocations;
443 }
444 
445 static void
swr_UpdateStatsFE(HANDLE hPrivateContext,const SWR_STATS_FE * pStats)446 swr_UpdateStatsFE(HANDLE hPrivateContext, const SWR_STATS_FE *pStats)
447 {
448    swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
449 
450    if (!pDC)
451       return;
452 
453    struct swr_query_result *pqr = pDC->pStats;
454 
455    SWR_STATS_FE *pSwrStats = &pqr->coreFE;
456    p_atomic_add(&pSwrStats->IaVertices, pStats->IaVertices);
457    p_atomic_add(&pSwrStats->IaPrimitives, pStats->IaPrimitives);
458    p_atomic_add(&pSwrStats->VsInvocations, pStats->VsInvocations);
459    p_atomic_add(&pSwrStats->HsInvocations, pStats->HsInvocations);
460    p_atomic_add(&pSwrStats->DsInvocations, pStats->DsInvocations);
461    p_atomic_add(&pSwrStats->GsInvocations, pStats->GsInvocations);
462    p_atomic_add(&pSwrStats->CInvocations, pStats->CInvocations);
463    p_atomic_add(&pSwrStats->CPrimitives, pStats->CPrimitives);
464    p_atomic_add(&pSwrStats->GsPrimitives, pStats->GsPrimitives);
465 
466    for (unsigned i = 0; i < 4; i++) {
467       p_atomic_add(&pSwrStats->SoPrimStorageNeeded[i],
468             pStats->SoPrimStorageNeeded[i]);
469       p_atomic_add(&pSwrStats->SoNumPrimsWritten[i],
470             pStats->SoNumPrimsWritten[i]);
471    }
472 }
473 
474 struct pipe_context *
swr_create_context(struct pipe_screen * p_screen,void * priv,unsigned flags)475 swr_create_context(struct pipe_screen *p_screen, void *priv, unsigned flags)
476 {
477    struct swr_context *ctx = (struct swr_context *)
478       AlignedMalloc(sizeof(struct swr_context), KNOB_SIMD_BYTES);
479    memset(ctx, 0, sizeof(struct swr_context));
480 
481    swr_screen(p_screen)->pfnSwrGetInterface(ctx->api);
482    ctx->swrDC.pAPI = &ctx->api;
483 
484    ctx->blendJIT =
485       new std::unordered_map<BLEND_COMPILE_STATE, PFN_BLEND_JIT_FUNC>;
486 
487    ctx->max_draws_in_flight = KNOB_MAX_DRAWS_IN_FLIGHT;
488 
489    SWR_CREATECONTEXT_INFO createInfo;
490    memset(&createInfo, 0, sizeof(createInfo));
491    createInfo.privateStateSize = sizeof(swr_draw_context);
492    createInfo.pfnLoadTile = swr_LoadHotTile;
493    createInfo.pfnStoreTile = swr_StoreHotTile;
494    createInfo.pfnClearTile = swr_StoreHotTileClear;
495    createInfo.pfnUpdateStats = swr_UpdateStats;
496    createInfo.pfnUpdateStatsFE = swr_UpdateStatsFE;
497 
498    SWR_THREADING_INFO threadingInfo {0};
499 
500    threadingInfo.MAX_WORKER_THREADS        = KNOB_MAX_WORKER_THREADS;
501    threadingInfo.MAX_NUMA_NODES            = KNOB_MAX_NUMA_NODES;
502    threadingInfo.MAX_CORES_PER_NUMA_NODE   = KNOB_MAX_CORES_PER_NUMA_NODE;
503    threadingInfo.MAX_THREADS_PER_CORE      = KNOB_MAX_THREADS_PER_CORE;
504    threadingInfo.SINGLE_THREADED           = KNOB_SINGLE_THREADED;
505 
506    // Use non-standard settings for KNL
507    if (swr_screen(p_screen)->is_knl)
508    {
509       if (nullptr == getenv("KNOB_MAX_THREADS_PER_CORE"))
510          threadingInfo.MAX_THREADS_PER_CORE  = 2;
511 
512       if (nullptr == getenv("KNOB_MAX_DRAWS_IN_FLIGHT"))
513       {
514          ctx->max_draws_in_flight = 2048;
515          createInfo.MAX_DRAWS_IN_FLIGHT = ctx->max_draws_in_flight;
516       }
517    }
518 
519    createInfo.pThreadInfo = &threadingInfo;
520 
521    ctx->swrContext = ctx->api.pfnSwrCreateContext(&createInfo);
522 
523    ctx->api.pfnSwrInit();
524 
525    if (ctx->swrContext == NULL)
526       goto fail;
527 
528    ctx->pipe.screen = p_screen;
529    ctx->pipe.destroy = swr_destroy;
530    ctx->pipe.priv = priv;
531    ctx->pipe.create_surface = swr_create_surface;
532    ctx->pipe.surface_destroy = swr_surface_destroy;
533    ctx->pipe.transfer_map = swr_transfer_map;
534    ctx->pipe.transfer_unmap = swr_transfer_unmap;
535    ctx->pipe.transfer_flush_region = swr_transfer_flush_region;
536 
537    ctx->pipe.buffer_subdata = u_default_buffer_subdata;
538    ctx->pipe.texture_subdata = u_default_texture_subdata;
539 
540    ctx->pipe.clear_texture = util_clear_texture;
541    ctx->pipe.resource_copy_region = swr_resource_copy;
542    ctx->pipe.render_condition = swr_render_condition;
543 
544    swr_state_init(&ctx->pipe);
545    swr_clear_init(&ctx->pipe);
546    swr_draw_init(&ctx->pipe);
547    swr_query_init(&ctx->pipe);
548 
549    ctx->pipe.stream_uploader = u_upload_create_default(&ctx->pipe);
550    if (!ctx->pipe.stream_uploader)
551       goto fail;
552    ctx->pipe.const_uploader = ctx->pipe.stream_uploader;
553 
554    ctx->pipe.blit = swr_blit;
555    ctx->blitter = util_blitter_create(&ctx->pipe);
556    if (!ctx->blitter)
557       goto fail;
558 
559    swr_init_scratch_buffers(ctx);
560 
561    return &ctx->pipe;
562 
563 fail:
564    /* Should really validate the init steps and fail gracefully */
565    swr_destroy(&ctx->pipe);
566    return NULL;
567 }
568