1 /************************************************************************** 2 * 3 * Copyright 2007 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef PIPE_CONTEXT_H 29 #define PIPE_CONTEXT_H 30 31 #include "util/compiler.h" 32 #include "util/format/u_formats.h" 33 #include "p_video_enums.h" 34 #include "p_defines.h" 35 #include "util/u_debug.h" 36 #include <stdio.h> 37 #include "frontend/winsys_handle.h" 38 39 #ifdef __cplusplus 40 extern "C" { 41 #endif 42 43 44 struct pipe_blend_color; 45 struct pipe_blend_state; 46 struct pipe_blit_info; 47 struct pipe_box; 48 struct pipe_clip_state; 49 struct pipe_compute_state_object_info; 50 struct pipe_constant_buffer; 51 struct pipe_depth_stencil_alpha_state; 52 struct pipe_device_reset_callback; 53 struct pipe_draw_info; 54 struct pipe_draw_indirect_info; 55 struct pipe_draw_start_count_bias; 56 struct pipe_draw_vertex_state_info; 57 struct pipe_grid_info; 58 struct pipe_fence_handle; 59 struct pipe_framebuffer_state; 60 struct pipe_image_view; 61 struct pipe_query; 62 struct pipe_poly_stipple; 63 struct pipe_rasterizer_state; 64 struct pipe_resolve_info; 65 struct pipe_resource; 66 struct pipe_sampler_state; 67 struct pipe_sampler_view; 68 struct pipe_scissor_state; 69 struct pipe_shader_buffer; 70 struct pipe_shader_state; 71 struct pipe_stencil_ref; 72 struct pipe_stream_output_target; 73 struct pipe_surface; 74 struct pipe_transfer; 75 struct pipe_vertex_buffer; 76 struct pipe_vertex_element; 77 struct pipe_vertex_state; 78 struct pipe_video_buffer; 79 struct pipe_video_codec; 80 struct pipe_viewport_state; 81 struct pipe_compute_state; 82 struct pipe_ml_operation; 83 struct pipe_tensor; 84 union pipe_color_union; 85 union pipe_query_result; 86 struct u_log_context; 87 struct u_upload_mgr; 88 struct util_debug_callback; 89 struct u_vbuf; 90 struct pipe_context; 91 92 typedef void (*pipe_draw_func)(struct pipe_context *pipe, 93 const struct pipe_draw_info *info, 94 unsigned drawid_offset, 95 const struct pipe_draw_indirect_info *indirect, 96 const struct pipe_draw_start_count_bias *draws, 97 unsigned num_draws); 98 99 /** 100 * Gallium rendering context. Basically: 101 * - state setting functions 102 * - VBO drawing functions 103 * - surface functions 104 */ 105 struct pipe_context { 106 struct pipe_screen *screen; 107 108 void *priv; /**< context private data (for DRI for example) */ 109 void *draw; /**< private, for draw module (temporary?) */ 110 struct u_vbuf *vbuf; /**< for cso_context, don't use in drivers */ 111 112 /** 113 * Stream uploaders created by the driver. All drivers, gallium frontends, and 114 * modules should use them. 115 * 116 * Use u_upload_alloc or u_upload_data as many times as you want. 117 * Once you are done, use u_upload_unmap. 118 */ 119 struct u_upload_mgr *stream_uploader; /* everything but shader constants */ 120 struct u_upload_mgr *const_uploader; /* shader constants only */ 121 122 /** 123 * Debug callback set by u_default_set_debug_callback. Frontends should use 124 * set_debug_callback in case drivers need to flush compiler queues. 125 */ 126 struct util_debug_callback debug; 127 128 void (*destroy)(struct pipe_context *); 129 130 /** 131 * VBO drawing 132 */ 133 /*@{*/ 134 /** 135 * Multi draw. 136 * 137 * For indirect multi draws, num_draws is 1 and indirect->draw_count 138 * is used instead. 139 * 140 * Caps: 141 * - Always supported: Direct multi draws 142 * - pipe_caps.multi_draw_indirect: Indirect multi draws 143 * - pipe_caps.multi_draw_indirect_params: Indirect draw count 144 * 145 * Differences against glMultiDraw and glMultiMode: 146 * - "info->mode" and "draws->index_bias" are always constant due to the lack 147 * of hardware support and CPU performance concerns. Only start and count 148 * vary. 149 * - if "info->increment_draw_id" is false, draw_id doesn't change between 150 * draws 151 * 152 * Direct multi draws are also generated by u_threaded_context, which looks 153 * ahead in gallium command buffers and merges single draws. 154 * 155 * \param pipe context 156 * \param info draw info 157 * \param drawid_offset offset to add for drawid param of each draw 158 * \param indirect indirect multi draws 159 * \param draws array of (start, count) pairs for direct draws 160 * \param num_draws number of direct draws; 1 for indirect multi draws 161 */ 162 pipe_draw_func draw_vbo; 163 164 /** 165 * Multi draw for display lists. 166 * 167 * For more information, see pipe_vertex_state and 168 * pipe_draw_vertex_state_info. 169 * 170 * Explanation of partial_vertex_mask: 171 * 172 * 1. pipe_vertex_state::input::elements have a monotonic logical index 173 * determined by pipe_vertex_state::input::full_velem_mask, specifically, 174 * the position of the i-th bit set is the logical index of the i-th 175 * vertex element, up to 31. 176 * 177 * 2. pipe_vertex_state::input::partial_velem_mask is a subset of 178 * full_velem_mask where the bits set determine which vertex elements 179 * should be bound contiguously. The vertex elements corresponding to 180 * the bits not set in partial_velem_mask should be ignored. 181 * 182 * Those two allow creating pipe_vertex_state that has more vertex 183 * attributes than the vertex shader has inputs. The idea is that 184 * pipe_vertex_state can be used with any vertex shader that has the same 185 * number of inputs and same logical indices or less. This may sound like 186 * an overly complicated way to bind a subset of vertex elements, but it 187 * actually simplifies everything else: 188 * 189 * - In st/mesa, full_velem_mask is exactly the mask of enabled vertex 190 * attributes (VERT_ATTRIB_x) in the display list VAO, while 191 * partial_velem_mask is exactly the inputs_read mask of the vertex 192 * shader (also VERT_ATTRIB_x). 193 * 194 * - In the driver, some bit ops and popcnt is needed to assemble vertex 195 * elements very quickly. 196 */ 197 void (*draw_vertex_state)(struct pipe_context *ctx, 198 struct pipe_vertex_state *state, 199 uint32_t partial_velem_mask, 200 struct pipe_draw_vertex_state_info info, 201 const struct pipe_draw_start_count_bias *draws, 202 unsigned num_draws); 203 /*@}*/ 204 205 /** 206 * Predicate subsequent rendering on occlusion query result 207 * \param query the query predicate, or NULL if no predicate 208 * \param condition whether to skip on FALSE or TRUE query results 209 * \param mode one of PIPE_RENDER_COND_x 210 */ 211 void (*render_condition)(struct pipe_context *pipe, 212 struct pipe_query *query, 213 bool condition, 214 enum pipe_render_cond_flag mode); 215 216 /** 217 * Predicate subsequent rendering on a value in a buffer 218 * \param buffer The buffer to query for the value 219 * \param offset Offset in the buffer to query 32-bit 220 * \param condition whether to skip on FALSE or TRUE query results 221 */ 222 void (*render_condition_mem)(struct pipe_context *pipe, 223 struct pipe_resource *buffer, 224 uint32_t offset, 225 bool condition); 226 /** 227 * Query objects 228 */ 229 /*@{*/ 230 struct pipe_query *(*create_query)(struct pipe_context *pipe, 231 unsigned query_type, 232 unsigned index); 233 234 /** 235 * Create a query object that queries all given query types simultaneously. 236 * 237 * This can only be used for those query types for which 238 * get_driver_query_info indicates that it must be used. Only one batch 239 * query object may be active at a time. 240 * 241 * There may be additional constraints on which query types can be used 242 * together, in particular those that are implied by 243 * get_driver_query_group_info. 244 * 245 * \param num_queries the number of query types 246 * \param query_types array of \p num_queries query types 247 * \return a query object, or NULL on error. 248 */ 249 struct pipe_query *(*create_batch_query)(struct pipe_context *pipe, 250 unsigned num_queries, 251 unsigned *query_types); 252 253 void (*destroy_query)(struct pipe_context *pipe, 254 struct pipe_query *q); 255 256 bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q); 257 bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q); 258 259 /** 260 * Get results of a query. 261 * \param wait if true, this query will block until the result is ready 262 * \return TRUE if results are ready, FALSE otherwise 263 */ 264 bool (*get_query_result)(struct pipe_context *pipe, 265 struct pipe_query *q, 266 bool wait, 267 union pipe_query_result *result); 268 269 /** 270 * Get results of a query, storing into resource. Note that this may not 271 * be used with batch queries. 272 * 273 * \param wait if true, this query will block until the result is ready 274 * \param result_type the type of the value being stored: 275 * \param index for queries that return multiple pieces of data, which 276 * item of that data to store (e.g. for 277 * PIPE_QUERY_PIPELINE_STATISTICS). 278 * When the index is -1, instead of the value of the query 279 * the driver should instead write a 1 or 0 to the appropriate 280 * location with 1 meaning that the query result is available. 281 */ 282 void (*get_query_result_resource)(struct pipe_context *pipe, 283 struct pipe_query *q, 284 enum pipe_query_flags flags, 285 enum pipe_query_value_type result_type, 286 int index, 287 struct pipe_resource *resource, 288 unsigned offset); 289 290 /** 291 * Set whether all current non-driver queries except TIME_ELAPSED are 292 * active or paused. 293 */ 294 void (*set_active_query_state)(struct pipe_context *pipe, bool enable); 295 296 /** 297 * INTEL Performance Query 298 */ 299 /*@{*/ 300 301 unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe); 302 303 void (*get_intel_perf_query_info)(struct pipe_context *pipe, 304 unsigned query_index, 305 const char **name, 306 uint32_t *data_size, 307 uint32_t *n_counters, 308 uint32_t *n_active); 309 310 void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe, 311 unsigned query_index, 312 unsigned counter_index, 313 const char **name, 314 const char **desc, 315 uint32_t *offset, 316 uint32_t *data_size, 317 uint32_t *type_enum, 318 uint32_t *data_type_enum, 319 uint64_t *raw_max); 320 321 struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe, 322 unsigned query_index); 323 324 bool (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 325 326 void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 327 328 void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 329 330 void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 331 332 bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q); 333 334 bool (*get_intel_perf_query_data)(struct pipe_context *pipe, 335 struct pipe_query *q, 336 size_t data_size, 337 uint32_t *data, 338 uint32_t *bytes_written); 339 340 /*@}*/ 341 342 /** 343 * \name GLSL shader/program functions. 344 */ 345 /*@{*/ 346 /** 347 * Called when a shader program is linked. 348 * \param handles Array of shader handles attached to this program. 349 * The size of the array is \c PIPE_SHADER_TYPES, and each 350 * position contains the corresponding \c pipe_shader_state* 351 * or \c pipe_compute_state*, or \c NULL. 352 * E.g. You can retrieve the fragment shader handle with 353 * \c handles[PIPE_SHADER_FRAGMENT] 354 */ 355 void (*link_shader)(struct pipe_context *, void** handles); 356 /*@}*/ 357 358 /** 359 * State functions (create/bind/destroy state objects) 360 */ 361 /*@{*/ 362 void * (*create_blend_state)(struct pipe_context *, 363 const struct pipe_blend_state *); 364 void (*bind_blend_state)(struct pipe_context *, void *); 365 void (*delete_blend_state)(struct pipe_context *, void *); 366 367 void * (*create_sampler_state)(struct pipe_context *, 368 const struct pipe_sampler_state *); 369 void (*bind_sampler_states)(struct pipe_context *, 370 enum pipe_shader_type shader, 371 unsigned start_slot, unsigned num_samplers, 372 void **samplers); 373 void (*delete_sampler_state)(struct pipe_context *, void *); 374 375 void * (*create_rasterizer_state)(struct pipe_context *, 376 const struct pipe_rasterizer_state *); 377 void (*bind_rasterizer_state)(struct pipe_context *, void *); 378 void (*delete_rasterizer_state)(struct pipe_context *, void *); 379 380 void * (*create_depth_stencil_alpha_state)(struct pipe_context *, 381 const struct pipe_depth_stencil_alpha_state *); 382 void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *); 383 void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *); 384 385 void * (*create_fs_state)(struct pipe_context *, 386 const struct pipe_shader_state *); 387 void (*bind_fs_state)(struct pipe_context *, void *); 388 void (*delete_fs_state)(struct pipe_context *, void *); 389 390 void * (*create_vs_state)(struct pipe_context *, 391 const struct pipe_shader_state *); 392 void (*bind_vs_state)(struct pipe_context *, void *); 393 void (*delete_vs_state)(struct pipe_context *, void *); 394 395 void * (*create_gs_state)(struct pipe_context *, 396 const struct pipe_shader_state *); 397 void (*bind_gs_state)(struct pipe_context *, void *); 398 void (*delete_gs_state)(struct pipe_context *, void *); 399 400 void * (*create_tcs_state)(struct pipe_context *, 401 const struct pipe_shader_state *); 402 void (*bind_tcs_state)(struct pipe_context *, void *); 403 void (*delete_tcs_state)(struct pipe_context *, void *); 404 405 void * (*create_tes_state)(struct pipe_context *, 406 const struct pipe_shader_state *); 407 void (*bind_tes_state)(struct pipe_context *, void *); 408 void (*delete_tes_state)(struct pipe_context *, void *); 409 410 void * (*create_vertex_elements_state)(struct pipe_context *, 411 unsigned num_elements, 412 const struct pipe_vertex_element *); 413 /** 414 * Bind vertex elements state. 415 * 416 * Frontends MUST call set_vertex_buffers after bind_vertex_elements_state 417 * and before the next draw. This ensures the driver can apply the state 418 * change before the next draw. Drivers MAY use this constraint to merge 419 * vertex elements and vertex buffers in set_vertex_buffers instead of 420 * in draw_vbo. 421 */ 422 void (*bind_vertex_elements_state)(struct pipe_context *, void *); 423 void (*delete_vertex_elements_state)(struct pipe_context *, void *); 424 425 void * (*create_ts_state)(struct pipe_context *, 426 const struct pipe_shader_state *); 427 void (*bind_ts_state)(struct pipe_context *, void *); 428 void (*delete_ts_state)(struct pipe_context *, void *); 429 430 void * (*create_ms_state)(struct pipe_context *, 431 const struct pipe_shader_state *); 432 void (*bind_ms_state)(struct pipe_context *, void *); 433 void (*delete_ms_state)(struct pipe_context *, void *); 434 /*@}*/ 435 436 /** 437 * Parameter-like state (or properties) 438 */ 439 /*@{*/ 440 void (*set_blend_color)(struct pipe_context *, 441 const struct pipe_blend_color *); 442 443 void (*set_stencil_ref)(struct pipe_context *, 444 const struct pipe_stencil_ref ref); 445 446 void (*set_sample_mask)(struct pipe_context *, 447 unsigned sample_mask); 448 449 void (*set_min_samples)(struct pipe_context *, 450 unsigned min_samples); 451 452 void (*set_clip_state)(struct pipe_context *, 453 const struct pipe_clip_state *); 454 455 /** 456 * Set constant buffer 457 * 458 * \param shader Shader stage 459 * \param index Buffer binding slot index within a shader stage 460 * \param take_ownership The callee takes ownership of the buffer reference. 461 * (the callee shouldn't increment the ref count) 462 * \param buf Constant buffer parameters 463 */ 464 void (*set_constant_buffer)(struct pipe_context *, 465 enum pipe_shader_type shader, uint index, 466 bool take_ownership, 467 const struct pipe_constant_buffer *buf); 468 469 /** 470 * Set inlinable constants for constant buffer 0. 471 * 472 * These are constants that the driver would like to inline in the IR 473 * of the current shader and recompile it. Drivers can determine which 474 * constants they prefer to inline in finalize_nir and store that 475 * information in shader_info::*inlinable_uniform*. When the state tracker 476 * or frontend uploads constants to a constant buffer, it can pass 477 * inlinable constants separately via this call. 478 * 479 * Any set_constant_buffer call invalidates this state, so this function 480 * must be called after it. Binding a shader also invalidates this state. 481 * 482 * There is no PIPE_CAP for this. Drivers shouldn't set the shader_info 483 * fields if they don't want this or if they don't implement this. 484 */ 485 void (*set_inlinable_constants)(struct pipe_context *, 486 enum pipe_shader_type shader, 487 uint num_values, uint32_t *values); 488 489 void (*set_framebuffer_state)(struct pipe_context *, 490 const struct pipe_framebuffer_state *); 491 492 /** 493 * Set the sample locations used during rasterization. When NULL or sized 494 * zero, the default locations are used. 495 * 496 * Note that get_sample_position() still returns the default locations. 497 * 498 * The samples are accessed with 499 * locations[(pixel_y*grid_w+pixel_x)*ms+i], 500 * where: 501 * ms = the sample count 502 * grid_w = the pixel grid width for the sample count 503 * grid_w = the pixel grid height for the sample count 504 * pixel_x = the window x coordinate modulo grid_w 505 * pixel_y = the window y coordinate modulo grid_w 506 * i = the sample index 507 * This gives a result with the x coordinate as the low 4 bits and the y 508 * coordinate as the high 4 bits. For each coordinate 0 is the left or top 509 * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge. 510 * 511 * Out of bounds accesses are return undefined values. 512 * 513 * The pixel grid is used to vary sample locations across pixels and its 514 * size can be queried with get_sample_pixel_grid(). 515 */ 516 void (*set_sample_locations)(struct pipe_context *, 517 size_t size, const uint8_t *locations); 518 519 void (*set_polygon_stipple)(struct pipe_context *, 520 const struct pipe_poly_stipple *); 521 522 void (*set_scissor_states)(struct pipe_context *, 523 unsigned start_slot, 524 unsigned num_scissors, 525 const struct pipe_scissor_state *); 526 527 void (*set_window_rectangles)(struct pipe_context *, 528 bool include, 529 unsigned num_rectangles, 530 const struct pipe_scissor_state *); 531 532 void (*set_viewport_states)(struct pipe_context *, 533 unsigned start_slot, 534 unsigned num_viewports, 535 const struct pipe_viewport_state *); 536 537 void (*set_sampler_views)(struct pipe_context *, 538 enum pipe_shader_type shader, 539 unsigned start_slot, unsigned num_views, 540 unsigned unbind_num_trailing_slots, 541 bool take_ownership, 542 struct pipe_sampler_view **views); 543 544 void (*set_tess_state)(struct pipe_context *, 545 const float default_outer_level[4], 546 const float default_inner_level[2]); 547 548 /** 549 * Set the number of vertices per input patch for tessellation. 550 */ 551 void (*set_patch_vertices)(struct pipe_context *ctx, uint8_t patch_vertices); 552 553 /** 554 * Sets the debug callback. If the pointer is null, then no callback is 555 * set, otherwise a copy of the data should be made. 556 */ 557 void (*set_debug_callback)(struct pipe_context *, 558 const struct util_debug_callback *); 559 560 /** 561 * Bind an array of shader buffers that will be used by a shader. 562 * Any buffers that were previously bound to the specified range 563 * will be unbound. 564 * 565 * \param shader selects shader stage 566 * \param start_slot first buffer slot to bind. 567 * \param count number of consecutive buffers to bind. 568 * \param buffers array of pointers to the buffers to bind, it 569 * should contain at least \a count elements 570 * unless it's NULL, in which case no buffers will 571 * be bound. 572 * \param writable_bitmask If bit i is not set, buffers[i] will only be 573 * used with loads. If unsure, set to ~0. 574 */ 575 void (*set_shader_buffers)(struct pipe_context *, 576 enum pipe_shader_type shader, 577 unsigned start_slot, unsigned count, 578 const struct pipe_shader_buffer *buffers, 579 unsigned writable_bitmask); 580 581 /** 582 * Bind an array of hw atomic buffers for use by all shaders. 583 * And buffers that were previously bound to the specified range 584 * will be unbound. 585 * 586 * \param start_slot first buffer slot to bind. 587 * \param count number of consecutive buffers to bind. 588 * \param buffers array of pointers to the buffers to bind, it 589 * should contain at least \a count elements 590 * unless it's NULL, in which case no buffers will 591 * be bound. 592 */ 593 void (*set_hw_atomic_buffers)(struct pipe_context *, 594 unsigned start_slot, unsigned count, 595 const struct pipe_shader_buffer *buffers); 596 597 /** 598 * Bind an array of images that will be used by a shader. 599 * Any images that were previously bound to the specified range 600 * will be unbound. 601 * 602 * \param shader selects shader stage 603 * \param start_slot first image slot to bind. 604 * \param count number of consecutive images to bind. 605 * \param unbind_num_trailing_slots number of images to unbind after 606 * the bound slot 607 * \param buffers array of the images to bind, it 608 * should contain at least \a count elements 609 * unless it's NULL, in which case no images will 610 * be bound. 611 */ 612 void (*set_shader_images)(struct pipe_context *, 613 enum pipe_shader_type shader, 614 unsigned start_slot, unsigned count, 615 unsigned unbind_num_trailing_slots, 616 const struct pipe_image_view *images); 617 618 /** 619 * Bind an array of vertex buffers to the specified slots. 620 * 621 * Unlike other set functions, the caller should always increment 622 * the buffer reference counts because the driver should only copy 623 * the pipe_resource pointers. This is the same behavior as setting 624 * take_ownership = true in other functions. 625 * 626 * count must be equal to the maximum used vertex buffer index + 1 627 * in vertex elements or 0. 628 * 629 * \param count number of consecutive vertex buffers to bind. 630 * \param buffers array of the buffers to bind 631 */ 632 void (*set_vertex_buffers)(struct pipe_context *, 633 unsigned count, 634 const struct pipe_vertex_buffer *); 635 636 /*@}*/ 637 638 /** 639 * Stream output functions. 640 */ 641 /*@{*/ 642 643 struct pipe_stream_output_target *(*create_stream_output_target)( 644 struct pipe_context *, 645 struct pipe_resource *, 646 unsigned buffer_offset, 647 unsigned buffer_size); 648 649 void (*stream_output_target_destroy)(struct pipe_context *, 650 struct pipe_stream_output_target *); 651 652 void (*set_stream_output_targets)(struct pipe_context *, 653 unsigned num_targets, 654 struct pipe_stream_output_target **targets, 655 const unsigned *offsets, 656 enum mesa_prim output_prim); 657 658 uint32_t (*stream_output_target_offset)(struct pipe_stream_output_target *target); 659 660 /*@}*/ 661 662 663 /** 664 * INTEL_blackhole_render 665 */ 666 /*@{*/ 667 668 void (*set_frontend_noop)(struct pipe_context *, 669 bool enable); 670 671 /*@}*/ 672 673 674 /** 675 * Resource functions for blit-like functionality 676 * 677 * If a driver supports multisampling, blit must implement color resolve. 678 */ 679 /*@{*/ 680 681 /** 682 * Copy a block of pixels from one resource to another. 683 * The resource must be of the same format. 684 * Resources with nr_samples > 1 are not allowed. 685 */ 686 void (*resource_copy_region)(struct pipe_context *pipe, 687 struct pipe_resource *dst, 688 unsigned dst_level, 689 unsigned dstx, unsigned dsty, unsigned dstz, 690 struct pipe_resource *src, 691 unsigned src_level, 692 const struct pipe_box *src_box); 693 694 /* Optimal hardware path for blitting pixels. 695 * Scaling, format conversion, up- and downsampling (resolve) are allowed. 696 */ 697 void (*blit)(struct pipe_context *pipe, 698 const struct pipe_blit_info *info); 699 700 /*@}*/ 701 702 /** 703 * Clear the specified set of currently bound buffers to specified values. 704 * The entire buffers are cleared (no scissor, no colormask, etc). 705 * 706 * \param buffers bitfield of PIPE_CLEAR_* values. 707 * \param scissor_state the scissored region to clear 708 * \param color pointer to a union of fiu array for each of r, g, b, a. 709 * \param depth depth clear value in [0,1]. 710 * \param stencil stencil clear value 711 */ 712 void (*clear)(struct pipe_context *pipe, 713 unsigned buffers, 714 const struct pipe_scissor_state *scissor_state, 715 const union pipe_color_union *color, 716 double depth, 717 unsigned stencil); 718 719 /** 720 * Clear a color rendertarget surface. 721 * \param color pointer to an union of fiu array for each of r, g, b, a. 722 */ 723 void (*clear_render_target)(struct pipe_context *pipe, 724 struct pipe_surface *dst, 725 const union pipe_color_union *color, 726 unsigned dstx, unsigned dsty, 727 unsigned width, unsigned height, 728 bool render_condition_enabled); 729 730 /** 731 * Clear a depth-stencil surface. 732 * \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values. 733 * \param depth depth clear value in [0,1]. 734 * \param stencil stencil clear value 735 */ 736 void (*clear_depth_stencil)(struct pipe_context *pipe, 737 struct pipe_surface *dst, 738 unsigned clear_flags, 739 double depth, 740 unsigned stencil, 741 unsigned dstx, unsigned dsty, 742 unsigned width, unsigned height, 743 bool render_condition_enabled); 744 745 /** 746 * Clear the texture with the specified texel. Not guaranteed to be a 747 * renderable format. Data provided in the resource's format. 748 */ 749 void (*clear_texture)(struct pipe_context *pipe, 750 struct pipe_resource *res, 751 unsigned level, 752 const struct pipe_box *box, 753 const void *data); 754 755 /** 756 * Clear a buffer. Runs a memset over the specified region with the element 757 * value passed in through clear_value of size clear_value_size. 758 */ 759 void (*clear_buffer)(struct pipe_context *pipe, 760 struct pipe_resource *res, 761 unsigned offset, 762 unsigned size, 763 const void *clear_value, 764 int clear_value_size); 765 766 /** 767 * If a depth buffer is rendered with different sample location state than 768 * what is current at the time of reading, the values may differ because 769 * depth buffer compression can depend the sample locations. 770 * 771 * This function is a hint to decompress the current depth buffer to avoid 772 * such problems. 773 */ 774 void (*evaluate_depth_buffer)(struct pipe_context *pipe); 775 776 /** 777 * Flush draw commands. 778 * 779 * This guarantees that the new fence (if any) will finish in finite time, 780 * unless PIPE_FLUSH_DEFERRED is used. 781 * 782 * Subsequent operations on other contexts of the same screen are guaranteed 783 * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used. 784 * 785 * NOTE: use screen->fence_reference() (or equivalent) to transfer 786 * new fence ref to **fence, to ensure that previous fence is unref'd 787 * 788 * \param fence if not NULL, an old fence to unref and transfer a 789 * new fence reference to 790 * \param flags bitfield of enum pipe_flush_flags values. 791 */ 792 void (*flush)(struct pipe_context *pipe, 793 struct pipe_fence_handle **fence, 794 unsigned flags); 795 796 /** 797 * Create a fence from a fd. 798 * 799 * This is used for importing a foreign/external fence fd. 800 * 801 * \param fence if not NULL, an old fence to unref and transfer a 802 * new fence reference to 803 * \param fd fd representing the fence object 804 * \param type indicates which fence types backs fd 805 */ 806 void (*create_fence_fd)(struct pipe_context *pipe, 807 struct pipe_fence_handle **fence, 808 int fd, 809 enum pipe_fd_type type); 810 811 /** 812 * Insert commands to have GPU wait for fence to be signaled. 813 */ 814 void (*fence_server_sync)(struct pipe_context *pipe, 815 struct pipe_fence_handle *fence); 816 817 /** 818 * Insert commands to have the GPU signal a fence. 819 */ 820 void (*fence_server_signal)(struct pipe_context *pipe, 821 struct pipe_fence_handle *fence); 822 823 /** 824 * Create a view on a texture to be used by a shader stage. 825 */ 826 struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx, 827 struct pipe_resource *texture, 828 const struct pipe_sampler_view *templat); 829 830 /** 831 * Destroy a view on a texture. 832 * 833 * \param ctx the current context 834 * \param view the view to be destroyed 835 * 836 * \note The current context may not be the context in which the view was 837 * created (view->context). However, the caller must guarantee that 838 * the context which created the view is still alive. 839 */ 840 void (*sampler_view_destroy)(struct pipe_context *ctx, 841 struct pipe_sampler_view *view); 842 843 844 /** 845 * Get a surface which is a "view" into a resource, used by 846 * render target / depth stencil stages. 847 */ 848 struct pipe_surface *(*create_surface)(struct pipe_context *ctx, 849 struct pipe_resource *resource, 850 const struct pipe_surface *templat); 851 852 void (*surface_destroy)(struct pipe_context *ctx, 853 struct pipe_surface *); 854 855 856 /** 857 * Map a resource. 858 * 859 * Transfers are (by default) context-private and allow uploads to be 860 * interleaved with rendering. 861 * 862 * out_transfer will contain the transfer object that must be passed 863 * to all the other transfer functions. It also contains useful 864 * information (like texture strides for texture_map). 865 */ 866 void *(*buffer_map)(struct pipe_context *, 867 struct pipe_resource *resource, 868 unsigned level, 869 unsigned usage, /* a combination of PIPE_MAP_x */ 870 const struct pipe_box *, 871 struct pipe_transfer **out_transfer); 872 873 /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the 874 * regions specified with this call are guaranteed to be written to 875 * the resource. 876 */ 877 void (*transfer_flush_region)(struct pipe_context *, 878 struct pipe_transfer *transfer, 879 const struct pipe_box *); 880 881 void (*buffer_unmap)(struct pipe_context *, 882 struct pipe_transfer *transfer); 883 884 void *(*texture_map)(struct pipe_context *, 885 struct pipe_resource *resource, 886 unsigned level, 887 unsigned usage, /* a combination of PIPE_MAP_x */ 888 const struct pipe_box *, 889 struct pipe_transfer **out_transfer); 890 891 void (*texture_unmap)(struct pipe_context *, 892 struct pipe_transfer *transfer); 893 894 /* One-shot transfer operation with data supplied in a user 895 * pointer. 896 */ 897 void (*buffer_subdata)(struct pipe_context *, 898 struct pipe_resource *, 899 unsigned usage, /* a combination of PIPE_MAP_x */ 900 unsigned offset, 901 unsigned size, 902 const void *data); 903 904 void (*texture_subdata)(struct pipe_context *, 905 struct pipe_resource *, 906 unsigned level, 907 unsigned usage, /* a combination of PIPE_MAP_x */ 908 const struct pipe_box *, 909 const void *data, 910 unsigned stride, 911 uintptr_t layer_stride); 912 913 /** 914 * Flush any pending framebuffer writes and invalidate texture caches. 915 */ 916 void (*texture_barrier)(struct pipe_context *, unsigned flags); 917 918 /** 919 * Flush caches according to flags. 920 */ 921 void (*memory_barrier)(struct pipe_context *, unsigned flags); 922 923 /** 924 * Change the commitment status of a part of the given resource, which must 925 * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit. 926 * 927 * \param level The texture level whose commitment should be changed. 928 * \param box The region of the resource whose commitment should be changed. 929 * \param commit Whether memory should be committed or un-committed. 930 * 931 * \return false if out of memory, true on success. 932 */ 933 bool (*resource_commit)(struct pipe_context *, struct pipe_resource *, 934 unsigned level, struct pipe_box *box, bool commit); 935 936 /** 937 * Creates a video codec for a specific video format/profile 938 */ 939 struct pipe_video_codec *(*create_video_codec)(struct pipe_context *context, 940 const struct pipe_video_codec *templat); 941 942 /** 943 * Creates a video buffer as decoding target 944 */ 945 struct pipe_video_buffer *(*create_video_buffer)(struct pipe_context *context, 946 const struct pipe_video_buffer *templat); 947 948 /** 949 * Compute kernel execution 950 */ 951 /*@{*/ 952 /** 953 * Define the compute program and parameters to be used by 954 * pipe_context::launch_grid. 955 */ 956 void *(*create_compute_state)(struct pipe_context *context, 957 const struct pipe_compute_state *); 958 void (*bind_compute_state)(struct pipe_context *, void *); 959 void (*delete_compute_state)(struct pipe_context *, void *); 960 961 void (*get_compute_state_info)(struct pipe_context *, void *, 962 struct pipe_compute_state_object_info *); 963 964 uint32_t (*get_compute_state_subgroup_size)(struct pipe_context *, void *, 965 const uint32_t block[3]); 966 967 /** 968 * Bind an array of shader resources that will be used by the 969 * compute program. Any resources that were previously bound to 970 * the specified range will be unbound after this call. 971 * 972 * \param start first resource to bind. 973 * \param count number of consecutive resources to bind. 974 * \param resources array of pointers to the resources to bind, it 975 * should contain at least \a count elements 976 * unless it's NULL, in which case no new 977 * resources will be bound. 978 */ 979 void (*set_compute_resources)(struct pipe_context *, 980 unsigned start, unsigned count, 981 struct pipe_surface **resources); 982 983 /** 984 * Bind an array of buffers to be mapped into the address space of 985 * the GLOBAL resource. Any buffers that were previously bound 986 * between [first, first + count - 1] are unbound after this call. 987 * 988 * \param first first buffer to map. 989 * \param count number of consecutive buffers to map. 990 * \param resources array of pointers to the buffers to map, it 991 * should contain at least \a count elements 992 * unless it's NULL, in which case no new 993 * resources will be bound. 994 * \param handles array of pointers to the memory locations that 995 * will be updated with the address each buffer 996 * will be mapped to. The base memory address of 997 * each of the buffers will be added to the value 998 * pointed to by its corresponding handle to form 999 * the final address argument. It should contain 1000 * at least \a count elements, unless \a 1001 * resources is NULL in which case \a handles 1002 * should be NULL as well. 1003 * 1004 * Note that the driver isn't required to make any guarantees about 1005 * the contents of the \a handles array being valid anytime except 1006 * during the subsequent calls to pipe_context::launch_grid. This 1007 * means that the only sensible location handles[i] may point to is 1008 * somewhere within the INPUT buffer itself. This is so to 1009 * accommodate implementations that lack virtual memory but 1010 * nevertheless migrate buffers on the fly, leading to resource 1011 * base addresses that change on each kernel invocation or are 1012 * unknown to the pipe driver. 1013 */ 1014 void (*set_global_binding)(struct pipe_context *context, 1015 unsigned first, unsigned count, 1016 struct pipe_resource **resources, 1017 uint32_t **handles); 1018 1019 /** 1020 * Launch the compute kernel starting from instruction \a pc of the 1021 * currently bound compute program. 1022 */ 1023 void (*launch_grid)(struct pipe_context *context, 1024 const struct pipe_grid_info *info); 1025 1026 void (*draw_mesh_tasks)(struct pipe_context *context, 1027 unsigned drawid_offset, 1028 const struct pipe_grid_info *info); 1029 /*@}*/ 1030 1031 /** 1032 * SVM (Share Virtual Memory) helpers 1033 */ 1034 /*@{*/ 1035 /** 1036 * Migrate range of virtual address to device or host memory. 1037 * 1038 * \param to_device - true if the virtual memory is migrated to the device 1039 * false if the virtual memory is migrated to the host 1040 * \param content_undefined - whether the content of the migrated memory 1041 * is undefined after migration 1042 */ 1043 void (*svm_migrate)(struct pipe_context *context, unsigned num_ptrs, 1044 const void* const* ptrs, const size_t *sizes, 1045 bool to_device, bool content_undefined); 1046 /*@}*/ 1047 1048 /** 1049 * Get the default sample position for an individual sample point. 1050 * 1051 * \param sample_count - total number of samples 1052 * \param sample_index - sample to get the position values for 1053 * \param out_value - return value of 2 floats for x and y position for 1054 * requested sample. 1055 */ 1056 void (*get_sample_position)(struct pipe_context *context, 1057 unsigned sample_count, 1058 unsigned sample_index, 1059 float *out_value); 1060 1061 /** 1062 * Query a timestamp in nanoseconds. This is completely equivalent to 1063 * pipe_screen::get_timestamp() but takes a context handle for drivers 1064 * that require a context. 1065 */ 1066 uint64_t (*get_timestamp)(struct pipe_context *); 1067 1068 /** 1069 * Flush the resource cache, so that the resource can be used 1070 * by an external client. Possible usage: 1071 * - flushing a resource before presenting it on the screen 1072 * - flushing a resource if some other process or device wants to use it 1073 * This shouldn't be used to flush caches if the resource is only managed 1074 * by a single pipe_screen and is not shared with another process. 1075 * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g. 1076 * use the resource for texturing) 1077 */ 1078 void (*flush_resource)(struct pipe_context *ctx, 1079 struct pipe_resource *resource); 1080 1081 /** 1082 * Invalidate the contents of the resource. This is used to 1083 * 1084 * (1) implement EGL's semantic of undefined depth/stencil 1085 * contents after a swapbuffers. This allows a tiled renderer (for 1086 * example) to not store the depth buffer. 1087 * 1088 * (2) implement GL's InvalidateBufferData. For backwards compatibility, 1089 * you must only rely on the usability for this purpose when 1090 * pipe_caps.invalidate_buffer is enabled. 1091 */ 1092 void (*invalidate_resource)(struct pipe_context *ctx, 1093 struct pipe_resource *resource); 1094 1095 /** 1096 * Return information about unexpected device resets. 1097 */ 1098 enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx); 1099 1100 /** 1101 * Sets the reset status callback. If the pointer is null, then no callback 1102 * is set, otherwise a copy of the data should be made. 1103 */ 1104 void (*set_device_reset_callback)(struct pipe_context *ctx, 1105 const struct pipe_device_reset_callback *cb); 1106 1107 /** 1108 * Dump driver-specific debug information into a stream. This is 1109 * used by debugging tools. 1110 * 1111 * \param ctx pipe context 1112 * \param stream where the output should be written to 1113 * \param flags a mask of PIPE_DUMP_* flags 1114 */ 1115 void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream, 1116 unsigned flags); 1117 1118 /** 1119 * Set the log context to which the driver should write internal debug logs 1120 * (internal states, command streams). 1121 * 1122 * The caller must ensure that the log context is destroyed and reset to 1123 * NULL before the pipe context is destroyed, and that log context functions 1124 * are only called from the driver thread. 1125 * 1126 * \param ctx pipe context 1127 * \param log logging context 1128 */ 1129 void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log); 1130 1131 /** 1132 * Emit string marker in cmdstream 1133 */ 1134 void (*emit_string_marker)(struct pipe_context *ctx, 1135 const char *string, 1136 int len); 1137 1138 /** 1139 * Generate mipmap. 1140 * \return TRUE if mipmap generation succeeds, FALSE otherwise 1141 */ 1142 bool (*generate_mipmap)(struct pipe_context *ctx, 1143 struct pipe_resource *resource, 1144 enum pipe_format format, 1145 unsigned base_level, 1146 unsigned last_level, 1147 unsigned first_layer, 1148 unsigned last_layer); 1149 1150 /** 1151 * Create a 64-bit texture handle. 1152 * 1153 * \param ctx pipe context 1154 * \param view pipe sampler view object 1155 * \param state pipe sampler state template 1156 * \return a 64-bit texture handle if success, 0 otherwise 1157 */ 1158 uint64_t (*create_texture_handle)(struct pipe_context *ctx, 1159 struct pipe_sampler_view *view, 1160 const struct pipe_sampler_state *state); 1161 1162 /** 1163 * Delete a texture handle. 1164 * 1165 * \param ctx pipe context 1166 * \param handle 64-bit texture handle 1167 */ 1168 void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle); 1169 1170 /** 1171 * Make a texture handle resident. 1172 * 1173 * \param ctx pipe context 1174 * \param handle 64-bit texture handle 1175 * \param resident TRUE for resident, FALSE otherwise 1176 */ 1177 void (*make_texture_handle_resident)(struct pipe_context *ctx, 1178 uint64_t handle, bool resident); 1179 1180 /** 1181 * Create a 64-bit image handle. 1182 * 1183 * \param ctx pipe context 1184 * \param image pipe image view template 1185 * \return a 64-bit image handle if success, 0 otherwise 1186 */ 1187 uint64_t (*create_image_handle)(struct pipe_context *ctx, 1188 const struct pipe_image_view *image); 1189 1190 /** 1191 * Delete an image handle. 1192 * 1193 * \param ctx pipe context 1194 * \param handle 64-bit image handle 1195 */ 1196 void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle); 1197 1198 /** 1199 * Make an image handle resident. 1200 * 1201 * \param ctx pipe context 1202 * \param handle 64-bit image handle 1203 * \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE 1204 * \param resident TRUE for resident, FALSE otherwise 1205 */ 1206 void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle, 1207 unsigned access, bool resident); 1208 1209 /** 1210 * Call the given function from the driver thread. 1211 * 1212 * This is set by threaded contexts for use by debugging wrappers. 1213 * 1214 * \param asap if true, run the callback immediately if there are no pending 1215 * commands to be processed by the driver thread 1216 */ 1217 void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data, 1218 bool asap); 1219 1220 /** 1221 * Set a context parameter See enum pipe_context_param for more details. 1222 */ 1223 void (*set_context_param)(struct pipe_context *ctx, 1224 enum pipe_context_param param, 1225 unsigned value); 1226 1227 /** 1228 * Creates a video buffer as decoding target, with modifiers. 1229 */ 1230 struct pipe_video_buffer *(*create_video_buffer_with_modifiers)(struct pipe_context *context, 1231 const struct pipe_video_buffer *templat, 1232 const uint64_t *modifiers, 1233 unsigned int modifiers_count); 1234 1235 /** 1236 * Creates a video buffer as decoding target, from external memory 1237 */ 1238 struct pipe_video_buffer *(*video_buffer_from_handle)( struct pipe_context *context, 1239 const struct pipe_video_buffer *templat, 1240 struct winsys_handle *handle, 1241 unsigned usage ); 1242 1243 /** 1244 * Compiles a ML subgraph, to be executed later. The returned pipe_ml_subgraph 1245 * should contain all information needed to execute the subgraph with as 1246 * little effort as strictly needed. 1247 * 1248 * \param ctx pipe context 1249 * \param operations array containing the definitions of the operations in the graph 1250 * \param count number of operations 1251 * \return a newly allocated pipe_ml_subgraph 1252 */ 1253 struct pipe_ml_subgraph *(*ml_subgraph_create)(struct pipe_context *context, 1254 const struct pipe_ml_operation *operations, 1255 unsigned count); 1256 1257 /** 1258 * Invokes a ML subgraph for a given input tensor. 1259 * 1260 * \param ctx pipe context 1261 * \param subgraph previously-compiled subgraph 1262 * \param inputs_count number of input tensors to copy in 1263 * \param input_idxs array with the indices of input tensors 1264 * \param inputs array of buffers to copy the tensor data from 1265 * \param is_signed per-buffer signed integer flag 1266 */ 1267 void (*ml_subgraph_invoke)(struct pipe_context *context, 1268 struct pipe_ml_subgraph *subgraph, 1269 unsigned inputs_count, 1270 unsigned input_idxs[], 1271 void *inputs[], bool is_signed[]); 1272 1273 /** 1274 * After a ML subgraph has been invoked, copy the contents of the output 1275 * tensors to the provided buffers. 1276 * 1277 * \param ctx pipe context 1278 * \param subgraph previously-executed subgraph 1279 * \param outputs_count number of output tensors to copy out 1280 * \param output_idxs array with the indices of output tensors 1281 * \param outputs array of buffers to copy the tensor data to 1282 * \param is_signed per-buffer signed integer flag 1283 */ 1284 void (*ml_subgraph_read_output)(struct pipe_context *context, 1285 struct pipe_ml_subgraph *subgraph, 1286 unsigned outputs_count, unsigned output_idxs[], 1287 void *outputs[], bool is_signed[]); 1288 1289 /** 1290 * Release all resources allocated by the implementation of ml_subgraph_create 1291 * 1292 * \param ctx pipe context 1293 * \param subgraph subgraph to release 1294 */ 1295 void (*ml_subgraph_destroy)(struct pipe_context *context, 1296 struct pipe_ml_subgraph *subgraph); 1297 }; 1298 1299 1300 #ifdef __cplusplus 1301 } 1302 #endif 1303 1304 #endif /* PIPE_CONTEXT_H */ 1305