/third_party/mesa3d/src/gallium/drivers/nouveau/nvc0/ |
D | nvc0_context.c | 41 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_svm_migrate() local 42 struct nouveau_screen *screen = &nvc0->screen->base; in nvc0_svm_migrate() 83 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_flush() local 84 struct nouveau_screen *screen = &nvc0->screen->base; in nvc0_flush() 89 PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */ in nvc0_flush() 91 nouveau_context_update_frame_stats(&nvc0->base); in nvc0_flush() 106 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_memory_barrier() local 107 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nvc0_memory_barrier() 114 for (i = 0; i < nvc0->num_vtxbufs; ++i) { in nvc0_memory_barrier() 115 if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer) in nvc0_memory_barrier() [all …]
|
D | nvc0_shader_state.c | 34 nvc0_program_update_context_state(struct nvc0_context *nvc0, in nvc0_program_update_context_state() argument 38 const uint32_t flags = NV_VRAM_DOMAIN(&nvc0->screen->base) | NOUVEAU_BO_RDWR; in nvc0_program_update_context_state() 39 if (!nvc0->state.tls_required) in nvc0_program_update_context_state() 40 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_TLS, flags, nvc0->screen->tls); in nvc0_program_update_context_state() 41 nvc0->state.tls_required |= 1 << stage; in nvc0_program_update_context_state() 43 if (nvc0->state.tls_required == (1 << stage)) in nvc0_program_update_context_state() 44 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_TLS); in nvc0_program_update_context_state() 45 nvc0->state.tls_required &= ~(1 << stage); in nvc0_program_update_context_state() 50 nvc0_program_validate(struct nvc0_context *nvc0, struct nvc0_program *prog) in nvc0_program_validate() argument 57 prog, nvc0->screen->base.device->chipset, in nvc0_program_validate() [all …]
|
D | nvc0_state_validate.c | 10 nvc0_validate_zcull(struct nvc0_context *nvc0) 12 struct nouveau_pushbuf *push = nvc0->base.pushbuf; 13 struct pipe_framebuffer_state *fb = &nvc0->framebuffer; 89 gm200_validate_sample_locations(struct nvc0_context *nvc0, unsigned ms) in gm200_validate_sample_locations() argument 91 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in gm200_validate_sample_locations() 92 struct nvc0_screen *screen = nvc0->screen; in gm200_validate_sample_locations() 106 if (nvc0->sample_locations_enabled) { in gm200_validate_sample_locations() 108 memcpy(locations, nvc0->sample_locations, sizeof(locations)); in gm200_validate_sample_locations() 110 &screen->base.base, nvc0->framebuffer.height, ms, locations); in gm200_validate_sample_locations() 163 nvc0_validate_sample_locations(struct nvc0_context *nvc0, unsigned ms) in nvc0_validate_sample_locations() argument [all …]
|
D | nvc0_vbo.c | 172 nvc0_set_constant_vertex_attrib(struct nvc0_context *nvc0, const unsigned a) in nvc0_set_constant_vertex_attrib() argument 174 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nvc0_set_constant_vertex_attrib() 175 struct pipe_vertex_element *ve = &nvc0->vertex->element[a].pipe; in nvc0_set_constant_vertex_attrib() 176 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index]; in nvc0_set_constant_vertex_attrib() 203 nvc0_user_vbuf_range(struct nvc0_context *nvc0, int vbi, in nvc0_user_vbuf_range() argument 206 if (unlikely(nvc0->vertex->instance_bufs & (1 << vbi))) { in nvc0_user_vbuf_range() 207 const uint32_t div = nvc0->vertex->min_instance_div[vbi]; in nvc0_user_vbuf_range() 208 *base = nvc0->instance_off * nvc0->vtxbuf[vbi].stride; in nvc0_user_vbuf_range() 209 *size = (nvc0->instance_max / div) * nvc0->vtxbuf[vbi].stride + in nvc0_user_vbuf_range() 210 nvc0->vertex->vb_access_size[vbi]; in nvc0_user_vbuf_range() [all …]
|
D | nvc0_state.c | 198 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_blend_state_bind() local 200 nvc0->blend = hwcso; in nvc0_blend_state_bind() 201 nvc0->dirty_3d |= NVC0_NEW_3D_BLEND; in nvc0_blend_state_bind() 350 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_rasterizer_state_bind() local 352 nvc0->rast = hwcso; in nvc0_rasterizer_state_bind() 353 nvc0->dirty_3d |= NVC0_NEW_3D_RASTERIZER; in nvc0_rasterizer_state_bind() 428 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_zsa_state_bind() local 430 nvc0->zsa = hwcso; in nvc0_zsa_state_bind() 431 nvc0->dirty_3d |= NVC0_NEW_3D_ZSA; in nvc0_zsa_state_bind() 462 nvc0_stage_sampler_states_bind(struct nvc0_context *nvc0, in nvc0_stage_sampler_states_bind() argument [all …]
|
D | nvc0_compute.c | 144 nvc0_compute_validate_samplers(struct nvc0_context *nvc0) in nvc0_compute_validate_samplers() argument 146 bool need_flush = nvc0_validate_tsc(nvc0, 5); in nvc0_compute_validate_samplers() 148 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_CP(TSC_FLUSH), 1); in nvc0_compute_validate_samplers() 149 PUSH_DATA (nvc0->base.pushbuf, 0); in nvc0_compute_validate_samplers() 154 nvc0->samplers_dirty[s] = ~0; in nvc0_compute_validate_samplers() 155 nvc0->dirty_3d |= NVC0_NEW_3D_SAMPLERS; in nvc0_compute_validate_samplers() 159 nvc0_compute_validate_textures(struct nvc0_context *nvc0) in nvc0_compute_validate_textures() argument 161 bool need_flush = nvc0_validate_tic(nvc0, 5); in nvc0_compute_validate_textures() 163 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_CP(TIC_FLUSH), 1); in nvc0_compute_validate_textures() 164 PUSH_DATA (nvc0->base.pushbuf, 0); in nvc0_compute_validate_textures() [all …]
|
D | nve4_compute.c | 220 gm107_compute_validate_surfaces(struct nvc0_context *nvc0, in gm107_compute_validate_surfaces() argument 224 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in gm107_compute_validate_surfaces() 225 struct nvc0_screen *screen = nvc0->screen; in gm107_compute_validate_surfaces() 226 struct nouveau_bo *txc = nvc0->screen->txc; in gm107_compute_validate_surfaces() 231 tic = nv50_tic_entry(nvc0->images_tic[s][slot]); in gm107_compute_validate_surfaces() 234 nvc0_update_tic(nvc0, tic, res); in gm107_compute_validate_surfaces() 237 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic); in gm107_compute_validate_surfaces() 258 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32); in gm107_compute_validate_surfaces() 263 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD); in gm107_compute_validate_surfaces() 283 nve4_compute_validate_surfaces(struct nvc0_context *nvc0) in nve4_compute_validate_surfaces() argument [all …]
|
D | nvc0_tex.c | 453 nvc0_update_tic(struct nvc0_context *nvc0, struct nv50_tic_entry *tic, in nvc0_update_tic() argument 469 nvc0->base.push_data(&nvc0->base, nvc0->screen->txc, tic->id * 32, in nvc0_update_tic() 470 NV_VRAM_DOMAIN(&nvc0->screen->base), 32, in nvc0_update_tic() 479 nvc0_validate_tic(struct nvc0_context *nvc0, int s) in nvc0_validate_tic() argument 482 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nvc0_validate_tic() 487 for (i = 0; i < nvc0->num_textures[s]; ++i) { in nvc0_validate_tic() 488 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]); in nvc0_validate_tic() 490 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i)); in nvc0_validate_tic() 498 need_flush |= nvc0_update_tic(nvc0, tic, res); in nvc0_validate_tic() 501 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic); in nvc0_validate_tic() [all …]
|
D | nvc0_surface.c | 211 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_resource_copy_region() local 217 nouveau_copy_buffer(&nvc0->base, in nvc0_resource_copy_region() 220 NOUVEAU_DRV_STAT(&nvc0->screen->base, buf_copy_bytes, src_box->width); in nvc0_resource_copy_region() 223 NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_copy_count, 1); in nvc0_resource_copy_region() 249 nvc0->m2mf_copy_rect(nvc0, &drect, &srect, nx, ny); in nvc0_resource_copy_region() 267 BCTX_REFN(nvc0->bufctx, 2D, nv04_resource(src), RD); in nvc0_resource_copy_region() 268 BCTX_REFN(nvc0->bufctx, 2D, nv04_resource(dst), WR); in nvc0_resource_copy_region() 269 nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx); in nvc0_resource_copy_region() 270 nouveau_pushbuf_validate(nvc0->base.pushbuf); in nvc0_resource_copy_region() 273 ret = nvc0_2d_texture_do_copy(nvc0->base.pushbuf, in nvc0_resource_copy_region() [all …]
|
D | nvc0_query_hw.c | 34 nvc0_hw_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q, in nvc0_hw_query_allocate() argument 38 struct nvc0_screen *screen = nvc0->screen; in nvc0_hw_query_allocate() 58 ret = nouveau_bo_map(hq->bo, 0, nvc0->base.client); in nvc0_hw_query_allocate() 60 nvc0_hw_query_allocate(nvc0, q, 0); in nvc0_hw_query_allocate() 86 nvc0_hw_query_rotate(struct nvc0_context *nvc0, struct nvc0_query *q) in nvc0_hw_query_rotate() argument 93 nvc0_hw_query_allocate(nvc0, q, NVC0_HW_QUERY_ALLOC_SPACE); in nvc0_hw_query_rotate() 111 nvc0_hw_destroy_query(struct nvc0_context *nvc0, struct nvc0_query *q) in nvc0_hw_destroy_query() argument 116 hq->funcs->destroy_query(nvc0, hq); in nvc0_hw_destroy_query() 120 nvc0_hw_query_allocate(nvc0, q, 0); in nvc0_hw_destroy_query() 126 nvc0_hw_query_write_compute_invocations(struct nvc0_context *nvc0, in nvc0_hw_query_write_compute_invocations() argument [all …]
|
D | nvc0_vbo_translate.c | 43 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx) in nvc0_push_context_init() argument 45 ctx->push = nvc0->base.pushbuf; in nvc0_push_context_init() 47 ctx->translate = nvc0->vertex->translate; in nvc0_push_context_init() 48 ctx->vertex_size = nvc0->vertex->size; in nvc0_push_context_init() 52 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32); in nvc0_push_context_init() 55 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS; in nvc0_push_context_init() 64 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias) in nvc0_vertex_configure_translate() argument 66 struct translate *translate = nvc0->vertex->translate; in nvc0_vertex_configure_translate() 69 for (i = 0; i < nvc0->num_vtxbufs; ++i) { in nvc0_vertex_configure_translate() 71 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i]; in nvc0_vertex_configure_translate() [all …]
|
D | nvc0_transfer.c | 15 nvc0_m2mf_transfer_rect(struct nvc0_context *nvc0, in nvc0_m2mf_transfer_rect() argument 20 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nvc0_m2mf_transfer_rect() 21 struct nouveau_bufctx *bctx = nvc0->bufctx; in nvc0_m2mf_transfer_rect() 110 nve4_m2mf_transfer_rect(struct nvc0_context *nvc0, in nve4_m2mf_transfer_rect() argument 129 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nve4_m2mf_transfer_rect() 130 struct nouveau_bufctx *bctx = nvc0->bufctx; in nve4_m2mf_transfer_rect() 203 struct nvc0_context *nvc0 = nvc0_context(&nv->pipe); in nvc0_m2mf_push_linear() local 208 nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR); in nvc0_m2mf_push_linear() 209 nouveau_pushbuf_bufctx(push, nvc0->bufctx); in nvc0_m2mf_push_linear() 237 nouveau_bufctx_reset(nvc0->bufctx, 0); in nvc0_m2mf_push_linear() [all …]
|
D | nvc0_query.c | 37 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_create_query() local 40 q = nvc0_sw_create_query(nvc0, type, index); in nvc0_create_query() 42 q = nvc0_hw_create_query(nvc0, type, index); in nvc0_create_query() 100 struct nvc0_context *nvc0 = nvc0_context(pipe); in nvc0_render_condition() local 101 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nvc0_render_condition() 139 nvc0->cond_query = pq; in nvc0_render_condition() 140 nvc0->cond_cond = condition; in nvc0_render_condition() 141 nvc0->cond_condmode = cond; in nvc0_render_condition() 142 nvc0->cond_mode = mode; in nvc0_render_condition() 147 if (nvc0->screen->compute) in nvc0_render_condition() [all …]
|
D | nvc0_program.c | 773 nvc0_program_alloc_code(struct nvc0_context *nvc0, struct nvc0_program *prog) in nvc0_program_alloc_code() argument 775 struct nvc0_screen *screen = nvc0->screen; in nvc0_program_alloc_code() 825 nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog) in nvc0_program_upload_code() argument 827 struct nvc0_screen *screen = nvc0->screen; in nvc0_program_upload_code() 864 nvc0->base.push_data(&nvc0->base, screen->text, prog->code_base, in nvc0_program_upload_code() 867 nvc0->base.push_data(&nvc0->base, screen->text, code_pos, in nvc0_program_upload_code() 873 nvc0_program_upload(struct nvc0_context *nvc0, struct nvc0_program *prog) in nvc0_program_upload() argument 875 struct nvc0_screen *screen = nvc0->screen; in nvc0_program_upload() 887 ret = nvc0_program_alloc_code(nvc0, prog); in nvc0_program_upload() 891 nvc0->compprog, nvc0->vertprog, nvc0->tctlprog, in nvc0_program_upload() [all …]
|
D | nvc0_query_hw_sm.c | 2282 nvc0_hw_sm_query_get_cfg(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nvc0_hw_sm_query_get_cfg() argument 2285 struct nvc0_screen *screen = nvc0->screen; in nvc0_hw_sm_query_get_cfg() 2302 nvc0_hw_sm_destroy_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nvc0_hw_sm_destroy_query() argument 2305 nvc0_hw_query_allocate(nvc0, q, 0); in nvc0_hw_sm_destroy_query() 2311 nve4_hw_sm_begin_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nve4_hw_sm_begin_query() argument 2313 struct nvc0_screen *screen = nvc0->screen; in nve4_hw_sm_begin_query() 2314 struct nouveau_pushbuf *push = nvc0->base.pushbuf; in nve4_hw_sm_begin_query() 2320 cfg = nvc0_hw_sm_query_get_cfg(nvc0, hq); in nve4_hw_sm_begin_query() 2396 nvc0_hw_sm_begin_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nvc0_hw_sm_begin_query() argument 2398 struct nvc0_screen *screen = nvc0->screen; in nvc0_hw_sm_begin_query() [all …]
|
D | nvc0_query_sw.c | 68 nvc0_sw_destroy_query(struct nvc0_context *nvc0, struct nvc0_query *q) in nvc0_sw_destroy_query() argument 75 nvc0_sw_begin_query(struct nvc0_context *nvc0, struct nvc0_query *q) in nvc0_sw_begin_query() argument 81 sq->value = nvc0->screen->base.stats.v[q->index]; in nvc0_sw_begin_query() 90 nvc0_sw_end_query(struct nvc0_context *nvc0, struct nvc0_query *q) in nvc0_sw_end_query() argument 94 sq->value = nvc0->screen->base.stats.v[q->index] - sq->value; in nvc0_sw_end_query() 99 nvc0_sw_get_query_result(struct nvc0_context *nvc0, struct nvc0_query *q, in nvc0_sw_get_query_result() argument
|
D | nvc0_query_hw_metric.c | 445 nvc0_hw_metric_query_get_cfg(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nvc0_hw_metric_query_get_cfg() argument 448 struct nvc0_screen *screen = nvc0->screen; in nvc0_hw_metric_query_get_cfg() 465 nvc0_hw_metric_destroy_query(struct nvc0_context *nvc0, in nvc0_hw_metric_destroy_query() argument 473 hmq->queries[i]->funcs->destroy_query(nvc0, hmq->queries[i]); in nvc0_hw_metric_destroy_query() 478 nvc0_hw_metric_begin_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nvc0_hw_metric_begin_query() argument 485 ret = hmq->queries[i]->funcs->begin_query(nvc0, hmq->queries[i]); in nvc0_hw_metric_begin_query() 493 nvc0_hw_metric_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq) in nvc0_hw_metric_end_query() argument 499 hmq->queries[i]->funcs->end_query(nvc0, hmq->queries[i]); in nvc0_hw_metric_end_query() 667 nvc0_hw_metric_get_query_result(struct nvc0_context *nvc0, in nvc0_hw_metric_get_query_result() argument 672 struct nvc0_screen *screen = nvc0->screen; in nvc0_hw_metric_get_query_result() [all …]
|
D | nvc0_miptree.c | 327 if (config->nvc0.memtype == 0x00) in nvc0_miptree_get_modifier() 329 if (NVC0_TILE_MODE_Y(config->nvc0.tile_mode) > 5) in nvc0_miptree_get_modifier() 331 if (config->nvc0.memtype != uc_kind) in nvc0_miptree_get_modifier() 338 config->nvc0.memtype, in nvc0_miptree_get_modifier() 339 NVC0_TILE_MODE_Y(config->nvc0.tile_mode)); in nvc0_miptree_get_modifier() 496 bo_config.nvc0.memtype = 0; in nvc0_miptree_create() 498 bo_config.nvc0.memtype = (modifier >> 12) & 0xff; in nvc0_miptree_create() 501 bo_config.nvc0.memtype = nvc0_mt_choose_storage_type(pscreen, mt, compressed); in nvc0_miptree_create() 513 if (likely(bo_config.nvc0.memtype)) { in nvc0_miptree_create() 529 bo_config.nvc0.tile_mode = mt->level[0].tile_mode; in nvc0_miptree_create() [all …]
|
/third_party/mesa3d/src/gallium/drivers/nouveau/ |
D | meson.build | 125 'nvc0/cla0c0qmd.h', 126 'nvc0/clc0c0qmd.h', 127 'nvc0/clc3c0qmd.h', 128 'nvc0/drf.h', 129 'nvc0/qmd.h', 130 'nvc0/qmda0c0.c', 131 'nvc0/qmdc0c0.c', 132 'nvc0/qmdc3c0.c', 133 'nvc0/gm107_texture.xml.h', 134 'nvc0/nvc0_3d.xml.h', [all …]
|
/third_party/mesa3d/docs/ |
D | features.txt | 39 GL 3.0, GLSL 1.30 --- all DONE: freedreno, i965, nv50, nvc0, r600, radeonsi, llvmpipe, softpipe, vi… 72 GL 3.1, GLSL 1.40 --- all DONE: freedreno, i965, nv50, nvc0, r600, radeonsi, llvmpipe, softpipe, vi… 85 GL 3.2, GLSL 1.50 --- all DONE: freedreno, i965, nv50, nvc0, r600, radeonsi, llvmpipe, softpipe, vi… 100 GL 3.3, GLSL 3.30 --- all DONE: freedreno, i965, nv50, nvc0, r600, radeonsi, llvmpipe, softpipe, vi… 114 GL 4.0, GLSL 4.00 --- all DONE: i965/gen7+, nvc0, r600, radeonsi, llvmpipe, virgl, zink, d3d12 143 GL 4.1, GLSL 4.10 --- all DONE: i965/gen7+, nvc0, r600, radeonsi, llvmpipe, virgl, zink, d3d12 153 GL 4.2, GLSL 4.20 -- all DONE: i965/gen7+, nvc0, r600, radeonsi, llvmpipe, virgl, zink, d3d12 169 GL 4.3, GLSL 4.30 -- all DONE: i965/gen8+, nvc0, r600, radeonsi, llvmpipe, virgl, zink 195 GL 4.4, GLSL 4.40 -- all DONE: i965/gen8+, nvc0, r600, radeonsi, llvmpipe, zink 213 GL 4.5, GLSL 4.50 -- all DONE: nvc0, r600, radeonsi, llvmpipe, zink [all …]
|
/third_party/libdrm/nouveau/ |
D | abi16.c | 63 struct nvc0_fifo *nvc0 = obj->data; in abi16_chan_nvc0() local 71 nvc0->base.channel = req.channel; in abi16_chan_nvc0() 72 nvc0->base.pushbuf = req.pushbuf_domains; in abi16_chan_nvc0() 73 nvc0->notify = req.notifier_handle; in abi16_chan_nvc0() 74 nvc0->base.object->handle = req.channel; in abi16_chan_nvc0() 75 nvc0->base.object->length = sizeof(*nvc0); in abi16_chan_nvc0() 293 bo->config.nvc0.memtype = (info->tile_flags & 0xff00) >> 8; in abi16_bo_info() 294 bo->config.nvc0.tile_mode = info->tile_mode; in abi16_bo_info() 338 info->tile_flags = (config->nvc0.memtype & 0xff) << 8; in abi16_bo_init() 339 info->tile_mode = config->nvc0.tile_mode; in abi16_bo_init()
|
/third_party/mesa3d/docs/relnotes/ |
D | 10.2.rst | 29 - GL_ARB_buffer_storage on i965, nv30, nv50, nvc0, r300, r600, and 32 - GL_ARB_sample_shading on nv50 (GT21x only), nvc0 37 - GL_ARB_texture_gather on nv50 (GT21x only), nvc0 38 - GL_ARB_texture_query_lod on nv50 (GT21x only), nvc0 40 - GL_ARB_vertex_type_10f_11f_11f_rev on nv50, nvc0, radeonsi
|
D | 11.1.1.rst | 92 - nvc0: don't forget to reset VTX_TMP bufctx slot after blit completion 94 - nv50,nvc0: make sure there's pushbuf space and that we ref the bo 96 - nv50,nvc0: fix crash when increasing bsp bo size for h264 97 - nvc0: scale up inter_bo size so that it's 16M for a 4K video 109 - nvc0: Set winding order regardless of domain. 151 - nv50,nvc0: fix use-after-free when vertex buffers are unbound 161 - nvc0: free memory allocated by the prog which reads MP perf counters 162 - nv50,nvc0: free memory allocated by performance metrics
|
D | 10.6.1.rst | 70 - nvc0/ir: fix collection of first uses for texture barrier insertion 71 - nv50,nvc0: clamp uniform size to 64k 72 - nvc0/ir: can't have a join on a load with an indirect source 78 - nvc0: always put all tfb bufs into bufctx 79 - nv50,nvc0: make sure to pushbuf_refn before putting bo into
|
D | 11.0.9.rst | 57 - nvc0: don't forget to reset VTX_TMP bufctx slot after blit completion 59 - nv50,nvc0: make sure there's pushbuf space and that we ref the bo 61 - nv50,nvc0: fix crash when increasing bsp bo size for h264 62 - nvc0: scale up inter_bo size so that it's 16M for a 4K video 67 - nvc0: Set winding order regardless of domain.
|