1 /************************************************************************** 2 * 3 * Copyright 2007 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef PIPE_CONTEXT_H 29 #define PIPE_CONTEXT_H 30 31 #include "p_compiler.h" 32 #include "p_format.h" 33 #include "p_video_enums.h" 34 #include "p_defines.h" 35 #include <stdio.h> 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 42 struct pipe_blend_color; 43 struct pipe_blend_state; 44 struct pipe_blit_info; 45 struct pipe_box; 46 struct pipe_clip_state; 47 struct pipe_constant_buffer; 48 struct pipe_debug_callback; 49 struct pipe_depth_stencil_alpha_state; 50 struct pipe_device_reset_callback; 51 struct pipe_draw_info; 52 struct pipe_draw_indirect_info; 53 struct pipe_draw_start_count_bias; 54 struct pipe_draw_vertex_state_info; 55 struct pipe_grid_info; 56 struct pipe_fence_handle; 57 struct pipe_framebuffer_state; 58 struct pipe_image_view; 59 struct pipe_query; 60 struct pipe_poly_stipple; 61 struct pipe_rasterizer_state; 62 struct pipe_resolve_info; 63 struct pipe_resource; 64 struct pipe_sampler_state; 65 struct pipe_sampler_view; 66 struct pipe_scissor_state; 67 struct pipe_shader_buffer; 68 struct pipe_shader_state; 69 struct pipe_stencil_ref; 70 struct pipe_stream_output_target; 71 struct pipe_surface; 72 struct pipe_transfer; 73 struct pipe_vertex_buffer; 74 struct pipe_vertex_element; 75 struct pipe_vertex_state; 76 struct pipe_video_buffer; 77 struct pipe_video_codec; 78 struct pipe_viewport_state; 79 struct pipe_compute_state; 80 union pipe_color_union; 81 union pipe_query_result; 82 struct u_log_context; 83 struct u_upload_mgr; 84 85 /** 86 * Gallium rendering context. Basically: 87 * - state setting functions 88 * - VBO drawing functions 89 * - surface functions 90 */ 91 struct pipe_context { 92 struct pipe_screen *screen; 93 94 void *priv; /**< context private data (for DRI for example) */ 95 void *draw; /**< private, for draw module (temporary?) */ 96 97 /** 98 * Stream uploaders created by the driver. All drivers, gallium frontends, and 99 * modules should use them. 100 * 101 * Use u_upload_alloc or u_upload_data as many times as you want. 102 * Once you are done, use u_upload_unmap. 103 */ 104 struct u_upload_mgr *stream_uploader; /* everything but shader constants */ 105 struct u_upload_mgr *const_uploader; /* shader constants only */ 106 107 void (*destroy)( struct pipe_context * ); 108 109 /** 110 * VBO drawing 111 */ 112 /*@{*/ 113 /** 114 * Multi draw. 115 * 116 * For indirect multi draws, num_draws is 1 and indirect->draw_count 117 * is used instead. 118 * 119 * Caps: 120 * - Always supported: Direct multi draws 121 * - PIPE_CAP_MULTI_DRAW_INDIRECT: Indirect multi draws 122 * - PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: Indirect draw count 123 * 124 * Differences against glMultiDraw and glMultiMode: 125 * - "info->mode" and "draws->index_bias" are always constant due to the lack 126 * of hardware support and CPU performance concerns. Only start and count 127 * vary. 128 * - if "info->increment_draw_id" is false, draw_id doesn't change between 129 * draws 130 * 131 * Direct multi draws are also generated by u_threaded_context, which looks 132 * ahead in gallium command buffers and merges single draws. 133 * 134 * \param pipe context 135 * \param info draw info 136 * \param drawid_offset offset to add for drawid param of each draw 137 * \param indirect indirect multi draws 138 * \param draws array of (start, count) pairs for direct draws 139 * \param num_draws number of direct draws; 1 for indirect multi draws 140 */ 141 void (*draw_vbo)(struct pipe_context *pipe, 142 const struct pipe_draw_info *info, 143 unsigned drawid_offset, 144 const struct pipe_draw_indirect_info *indirect, 145 const struct pipe_draw_start_count_bias *draws, 146 unsigned num_draws); 147 148 /** 149 * Multi draw for display lists. 150 * 151 * For more information, see pipe_vertex_state and 152 * pipe_draw_vertex_state_info. 153 * 154 * Explanation of partial_vertex_mask: 155 * 156 * 1. pipe_vertex_state::input::elements have a monotonic logical index 157 * determined by pipe_vertex_state::input::full_velem_mask, specifically, 158 * the position of the i-th bit set is the logical index of the i-th 159 * vertex element, up to 31. 160 * 161 * 2. pipe_vertex_state::input::partial_velem_mask is a subset of 162 * full_velem_mask where the bits set determine which vertex elements 163 * should be bound contiguously. The vertex elements corresponding to 164 * the bits not set in partial_velem_mask should be ignored. 165 * 166 * Those two allow creating pipe_vertex_state that has more vertex 167 * attributes than the vertex shader has inputs. The idea is that 168 * pipe_vertex_state can be used with any vertex shader that has the same 169 * number of inputs and same logical indices or less. This may sound like 170 * an overly complicated way to bind a subset of vertex elements, but it 171 * actually simplifies everything else: 172 * 173 * - In st/mesa, full_velem_mask is exactly the mask of enabled vertex 174 * attributes (VERT_ATTRIB_x) in the display list VAO, while 175 * partial_velem_mask is exactly the inputs_read mask of the vertex 176 * shader (also VERT_ATTRIB_x). 177 * 178 * - In the driver, some bit ops and popcnt is needed to assemble vertex 179 * elements very quickly. 180 */ 181 void (*draw_vertex_state)(struct pipe_context *ctx, 182 struct pipe_vertex_state *state, 183 uint32_t partial_velem_mask, 184 struct pipe_draw_vertex_state_info info, 185 const struct pipe_draw_start_count_bias *draws, 186 unsigned num_draws); 187 /*@}*/ 188 189 /** 190 * Predicate subsequent rendering on occlusion query result 191 * \param query the query predicate, or NULL if no predicate 192 * \param condition whether to skip on FALSE or TRUE query results 193 * \param mode one of PIPE_RENDER_COND_x 194 */ 195 void (*render_condition)( struct pipe_context *pipe, 196 struct pipe_query *query, 197 bool condition, 198 enum pipe_render_cond_flag mode ); 199 200 /** 201 * Predicate subsequent rendering on a value in a buffer 202 * \param buffer The buffer to query for the value 203 * \param offset Offset in the buffer to query 32-bit 204 * \param condition whether to skip on FALSE or TRUE query results 205 */ 206 void (*render_condition_mem)( struct pipe_context *pipe, 207 struct pipe_resource *buffer, 208 uint32_t offset, 209 bool condition ); 210 /** 211 * Query objects 212 */ 213 /*@{*/ 214 struct pipe_query *(*create_query)( struct pipe_context *pipe, 215 unsigned query_type, 216 unsigned index ); 217 218 /** 219 * Create a query object that queries all given query types simultaneously. 220 * 221 * This can only be used for those query types for which 222 * get_driver_query_info indicates that it must be used. Only one batch 223 * query object may be active at a time. 224 * 225 * There may be additional constraints on which query types can be used 226 * together, in particular those that are implied by 227 * get_driver_query_group_info. 228 * 229 * \param num_queries the number of query types 230 * \param query_types array of \p num_queries query types 231 * \return a query object, or NULL on error. 232 */ 233 struct pipe_query *(*create_batch_query)( struct pipe_context *pipe, 234 unsigned num_queries, 235 unsigned *query_types ); 236 237 void (*destroy_query)(struct pipe_context *pipe, 238 struct pipe_query *q); 239 240 bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q); 241 bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q); 242 243 /** 244 * Get results of a query. 245 * \param wait if true, this query will block until the result is ready 246 * \return TRUE if results are ready, FALSE otherwise 247 */ 248 bool (*get_query_result)(struct pipe_context *pipe, 249 struct pipe_query *q, 250 bool wait, 251 union pipe_query_result *result); 252 253 /** 254 * Get results of a query, storing into resource. Note that this may not 255 * be used with batch queries. 256 * 257 * \param wait if true, this query will block until the result is ready 258 * \param result_type the type of the value being stored: 259 * \param index for queries that return multiple pieces of data, which 260 * item of that data to store (e.g. for 261 * PIPE_QUERY_PIPELINE_STATISTICS). 262 * When the index is -1, instead of the value of the query 263 * the driver should instead write a 1 or 0 to the appropriate 264 * location with 1 meaning that the query result is available. 265 */ 266 void (*get_query_result_resource)(struct pipe_context *pipe, 267 struct pipe_query *q, 268 bool wait, 269 enum pipe_query_value_type result_type, 270 int index, 271 struct pipe_resource *resource, 272 unsigned offset); 273 274 /** 275 * Set whether all current non-driver queries except TIME_ELAPSED are 276 * active or paused. 277 */ 278 void (*set_active_query_state)(struct pipe_context *pipe, bool enable); 279 280 /** 281 * INTEL Performance Query 282 */ 283 /*@{*/ 284 285 unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe); 286 287 void (*get_intel_perf_query_info)(struct pipe_context *pipe, 288 unsigned query_index, 289 const char **name, 290 uint32_t *data_size, 291 uint32_t *n_counters, 292 uint32_t *n_active); 293 294 void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe, 295 unsigned query_index, 296 unsigned counter_index, 297 const char **name, 298 const char **desc, 299 uint32_t *offset, 300 uint32_t *data_size, 301 uint32_t *type_enum, 302 uint32_t *data_type_enum, 303 uint64_t *raw_max); 304 305 struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe, 306 unsigned query_index); 307 308 bool (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 309 310 void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 311 312 void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 313 314 void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 315 316 bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q); 317 318 bool (*get_intel_perf_query_data)(struct pipe_context *pipe, 319 struct pipe_query *q, 320 size_t data_size, 321 uint32_t *data, 322 uint32_t *bytes_written); 323 324 /*@}*/ 325 326 /** 327 * State functions (create/bind/destroy state objects) 328 */ 329 /*@{*/ 330 void * (*create_blend_state)(struct pipe_context *, 331 const struct pipe_blend_state *); 332 void (*bind_blend_state)(struct pipe_context *, void *); 333 void (*delete_blend_state)(struct pipe_context *, void *); 334 335 void * (*create_sampler_state)(struct pipe_context *, 336 const struct pipe_sampler_state *); 337 void (*bind_sampler_states)(struct pipe_context *, 338 enum pipe_shader_type shader, 339 unsigned start_slot, unsigned num_samplers, 340 void **samplers); 341 void (*delete_sampler_state)(struct pipe_context *, void *); 342 343 void * (*create_rasterizer_state)(struct pipe_context *, 344 const struct pipe_rasterizer_state *); 345 void (*bind_rasterizer_state)(struct pipe_context *, void *); 346 void (*delete_rasterizer_state)(struct pipe_context *, void *); 347 348 void * (*create_depth_stencil_alpha_state)(struct pipe_context *, 349 const struct pipe_depth_stencil_alpha_state *); 350 void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *); 351 void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *); 352 353 void * (*create_fs_state)(struct pipe_context *, 354 const struct pipe_shader_state *); 355 void (*bind_fs_state)(struct pipe_context *, void *); 356 void (*delete_fs_state)(struct pipe_context *, void *); 357 358 void * (*create_vs_state)(struct pipe_context *, 359 const struct pipe_shader_state *); 360 void (*bind_vs_state)(struct pipe_context *, void *); 361 void (*delete_vs_state)(struct pipe_context *, void *); 362 363 void * (*create_gs_state)(struct pipe_context *, 364 const struct pipe_shader_state *); 365 void (*bind_gs_state)(struct pipe_context *, void *); 366 void (*delete_gs_state)(struct pipe_context *, void *); 367 368 void * (*create_tcs_state)(struct pipe_context *, 369 const struct pipe_shader_state *); 370 void (*bind_tcs_state)(struct pipe_context *, void *); 371 void (*delete_tcs_state)(struct pipe_context *, void *); 372 373 void * (*create_tes_state)(struct pipe_context *, 374 const struct pipe_shader_state *); 375 void (*bind_tes_state)(struct pipe_context *, void *); 376 void (*delete_tes_state)(struct pipe_context *, void *); 377 378 void * (*create_vertex_elements_state)(struct pipe_context *, 379 unsigned num_elements, 380 const struct pipe_vertex_element *); 381 void (*bind_vertex_elements_state)(struct pipe_context *, void *); 382 void (*delete_vertex_elements_state)(struct pipe_context *, void *); 383 384 /*@}*/ 385 386 /** 387 * Parameter-like state (or properties) 388 */ 389 /*@{*/ 390 void (*set_blend_color)( struct pipe_context *, 391 const struct pipe_blend_color * ); 392 393 void (*set_stencil_ref)( struct pipe_context *, 394 const struct pipe_stencil_ref ref); 395 396 void (*set_sample_mask)( struct pipe_context *, 397 unsigned sample_mask ); 398 399 void (*set_min_samples)( struct pipe_context *, 400 unsigned min_samples ); 401 402 void (*set_clip_state)( struct pipe_context *, 403 const struct pipe_clip_state * ); 404 405 /** 406 * Set constant buffer 407 * 408 * \param shader Shader stage 409 * \param index Buffer binding slot index within a shader stage 410 * \param take_ownership The callee takes ownership of the buffer reference. 411 * (the callee shouldn't increment the ref count) 412 * \param buf Constant buffer parameters 413 */ 414 void (*set_constant_buffer)( struct pipe_context *, 415 enum pipe_shader_type shader, uint index, 416 bool take_ownership, 417 const struct pipe_constant_buffer *buf ); 418 419 /** 420 * Set inlinable constants for constant buffer 0. 421 * 422 * These are constants that the driver would like to inline in the IR 423 * of the current shader and recompile it. Drivers can determine which 424 * constants they prefer to inline in finalize_nir and store that 425 * information in shader_info::*inlinable_uniform*. When the state tracker 426 * or frontend uploads constants to a constant buffer, it can pass 427 * inlinable constants separately via this call. 428 * 429 * Any set_constant_buffer call invalidates this state, so this function 430 * must be called after it. Binding a shader also invalidates this state. 431 * 432 * There is no PIPE_CAP for this. Drivers shouldn't set the shader_info 433 * fields if they don't want this or if they don't implement this. 434 */ 435 void (*set_inlinable_constants)( struct pipe_context *, 436 enum pipe_shader_type shader, 437 uint num_values, uint32_t *values ); 438 439 void (*set_framebuffer_state)( struct pipe_context *, 440 const struct pipe_framebuffer_state * ); 441 442 /** 443 * Set the sample locations used during rasterization. When NULL or sized 444 * zero, the default locations are used. 445 * 446 * Note that get_sample_position() still returns the default locations. 447 * 448 * The samples are accessed with 449 * locations[(pixel_y*grid_w+pixel_x)*ms+i], 450 * where: 451 * ms = the sample count 452 * grid_w = the pixel grid width for the sample count 453 * grid_w = the pixel grid height for the sample count 454 * pixel_x = the window x coordinate modulo grid_w 455 * pixel_y = the window y coordinate modulo grid_w 456 * i = the sample index 457 * This gives a result with the x coordinate as the low 4 bits and the y 458 * coordinate as the high 4 bits. For each coordinate 0 is the left or top 459 * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge. 460 * 461 * Out of bounds accesses are return undefined values. 462 * 463 * The pixel grid is used to vary sample locations across pixels and its 464 * size can be queried with get_sample_pixel_grid(). 465 */ 466 void (*set_sample_locations)( struct pipe_context *, 467 size_t size, const uint8_t *locations ); 468 469 void (*set_polygon_stipple)( struct pipe_context *, 470 const struct pipe_poly_stipple * ); 471 472 void (*set_scissor_states)( struct pipe_context *, 473 unsigned start_slot, 474 unsigned num_scissors, 475 const struct pipe_scissor_state * ); 476 477 void (*set_window_rectangles)( struct pipe_context *, 478 bool include, 479 unsigned num_rectangles, 480 const struct pipe_scissor_state * ); 481 482 void (*set_viewport_states)( struct pipe_context *, 483 unsigned start_slot, 484 unsigned num_viewports, 485 const struct pipe_viewport_state *); 486 487 void (*set_sampler_views)(struct pipe_context *, 488 enum pipe_shader_type shader, 489 unsigned start_slot, unsigned num_views, 490 unsigned unbind_num_trailing_slots, 491 bool take_ownership, 492 struct pipe_sampler_view **views); 493 494 void (*set_tess_state)(struct pipe_context *, 495 const float default_outer_level[4], 496 const float default_inner_level[2]); 497 498 /** 499 * Set the number of vertices per input patch for tessellation. 500 */ 501 void (*set_patch_vertices)(struct pipe_context *ctx, uint8_t patch_vertices); 502 503 /** 504 * Sets the debug callback. If the pointer is null, then no callback is 505 * set, otherwise a copy of the data should be made. 506 */ 507 void (*set_debug_callback)(struct pipe_context *, 508 const struct pipe_debug_callback *); 509 510 /** 511 * Bind an array of shader buffers that will be used by a shader. 512 * Any buffers that were previously bound to the specified range 513 * will be unbound. 514 * 515 * \param shader selects shader stage 516 * \param start_slot first buffer slot to bind. 517 * \param count number of consecutive buffers to bind. 518 * \param buffers array of pointers to the buffers to bind, it 519 * should contain at least \a count elements 520 * unless it's NULL, in which case no buffers will 521 * be bound. 522 * \param writable_bitmask If bit i is not set, buffers[i] will only be 523 * used with loads. If unsure, set to ~0. 524 */ 525 void (*set_shader_buffers)(struct pipe_context *, 526 enum pipe_shader_type shader, 527 unsigned start_slot, unsigned count, 528 const struct pipe_shader_buffer *buffers, 529 unsigned writable_bitmask); 530 531 /** 532 * Bind an array of hw atomic buffers for use by all shaders. 533 * And buffers that were previously bound to the specified range 534 * will be unbound. 535 * 536 * \param start_slot first buffer slot to bind. 537 * \param count number of consecutive buffers to bind. 538 * \param buffers array of pointers to the buffers to bind, it 539 * should contain at least \a count elements 540 * unless it's NULL, in which case no buffers will 541 * be bound. 542 */ 543 void (*set_hw_atomic_buffers)(struct pipe_context *, 544 unsigned start_slot, unsigned count, 545 const struct pipe_shader_buffer *buffers); 546 547 /** 548 * Bind an array of images that will be used by a shader. 549 * Any images that were previously bound to the specified range 550 * will be unbound. 551 * 552 * \param shader selects shader stage 553 * \param start_slot first image slot to bind. 554 * \param count number of consecutive images to bind. 555 * \param unbind_num_trailing_slots number of images to unbind after 556 * the bound slot 557 * \param buffers array of the images to bind, it 558 * should contain at least \a count elements 559 * unless it's NULL, in which case no images will 560 * be bound. 561 */ 562 void (*set_shader_images)(struct pipe_context *, 563 enum pipe_shader_type shader, 564 unsigned start_slot, unsigned count, 565 unsigned unbind_num_trailing_slots, 566 const struct pipe_image_view *images); 567 568 /** 569 * Bind an array of vertex buffers to the specified slots. 570 * 571 * \param start_slot first vertex buffer slot 572 * \param count number of consecutive vertex buffers to bind. 573 * \param unbind_num_trailing_slots unbind slots after the bound slots 574 * \param take_ownership the caller holds buffer references and they 575 * should be taken over by the callee. This means 576 * that drivers shouldn't increment reference counts. 577 * \param buffers array of the buffers to bind 578 */ 579 void (*set_vertex_buffers)( struct pipe_context *, 580 unsigned start_slot, 581 unsigned num_buffers, 582 unsigned unbind_num_trailing_slots, 583 bool take_ownership, 584 const struct pipe_vertex_buffer * ); 585 586 /*@}*/ 587 588 /** 589 * Stream output functions. 590 */ 591 /*@{*/ 592 593 struct pipe_stream_output_target *(*create_stream_output_target)( 594 struct pipe_context *, 595 struct pipe_resource *, 596 unsigned buffer_offset, 597 unsigned buffer_size); 598 599 void (*stream_output_target_destroy)(struct pipe_context *, 600 struct pipe_stream_output_target *); 601 602 void (*set_stream_output_targets)(struct pipe_context *, 603 unsigned num_targets, 604 struct pipe_stream_output_target **targets, 605 const unsigned *offsets); 606 607 uint32_t (*stream_output_target_offset)(struct pipe_stream_output_target *target); 608 609 /*@}*/ 610 611 612 /** 613 * INTEL_blackhole_render 614 */ 615 /*@{*/ 616 617 void (*set_frontend_noop)(struct pipe_context *, 618 bool enable); 619 620 /*@}*/ 621 622 623 /** 624 * Resource functions for blit-like functionality 625 * 626 * If a driver supports multisampling, blit must implement color resolve. 627 */ 628 /*@{*/ 629 630 /** 631 * Copy a block of pixels from one resource to another. 632 * The resource must be of the same format. 633 * Resources with nr_samples > 1 are not allowed. 634 */ 635 void (*resource_copy_region)(struct pipe_context *pipe, 636 struct pipe_resource *dst, 637 unsigned dst_level, 638 unsigned dstx, unsigned dsty, unsigned dstz, 639 struct pipe_resource *src, 640 unsigned src_level, 641 const struct pipe_box *src_box); 642 643 /* Optimal hardware path for blitting pixels. 644 * Scaling, format conversion, up- and downsampling (resolve) are allowed. 645 */ 646 void (*blit)(struct pipe_context *pipe, 647 const struct pipe_blit_info *info); 648 649 /*@}*/ 650 651 /** 652 * Clear the specified set of currently bound buffers to specified values. 653 * The entire buffers are cleared (no scissor, no colormask, etc). 654 * 655 * \param buffers bitfield of PIPE_CLEAR_* values. 656 * \param scissor_state the scissored region to clear 657 * \param color pointer to a union of fiu array for each of r, g, b, a. 658 * \param depth depth clear value in [0,1]. 659 * \param stencil stencil clear value 660 */ 661 void (*clear)(struct pipe_context *pipe, 662 unsigned buffers, 663 const struct pipe_scissor_state *scissor_state, 664 const union pipe_color_union *color, 665 double depth, 666 unsigned stencil); 667 668 /** 669 * Clear a color rendertarget surface. 670 * \param color pointer to an union of fiu array for each of r, g, b, a. 671 */ 672 void (*clear_render_target)(struct pipe_context *pipe, 673 struct pipe_surface *dst, 674 const union pipe_color_union *color, 675 unsigned dstx, unsigned dsty, 676 unsigned width, unsigned height, 677 bool render_condition_enabled); 678 679 /** 680 * Clear a depth-stencil surface. 681 * \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values. 682 * \param depth depth clear value in [0,1]. 683 * \param stencil stencil clear value 684 */ 685 void (*clear_depth_stencil)(struct pipe_context *pipe, 686 struct pipe_surface *dst, 687 unsigned clear_flags, 688 double depth, 689 unsigned stencil, 690 unsigned dstx, unsigned dsty, 691 unsigned width, unsigned height, 692 bool render_condition_enabled); 693 694 /** 695 * Clear the texture with the specified texel. Not guaranteed to be a 696 * renderable format. Data provided in the resource's format. 697 */ 698 void (*clear_texture)(struct pipe_context *pipe, 699 struct pipe_resource *res, 700 unsigned level, 701 const struct pipe_box *box, 702 const void *data); 703 704 /** 705 * Clear a buffer. Runs a memset over the specified region with the element 706 * value passed in through clear_value of size clear_value_size. 707 */ 708 void (*clear_buffer)(struct pipe_context *pipe, 709 struct pipe_resource *res, 710 unsigned offset, 711 unsigned size, 712 const void *clear_value, 713 int clear_value_size); 714 715 /** 716 * If a depth buffer is rendered with different sample location state than 717 * what is current at the time of reading, the values may differ because 718 * depth buffer compression can depend the sample locations. 719 * 720 * This function is a hint to decompress the current depth buffer to avoid 721 * such problems. 722 */ 723 void (*evaluate_depth_buffer)(struct pipe_context *pipe); 724 725 /** 726 * Flush draw commands. 727 * 728 * This guarantees that the new fence (if any) will finish in finite time, 729 * unless PIPE_FLUSH_DEFERRED is used. 730 * 731 * Subsequent operations on other contexts of the same screen are guaranteed 732 * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used. 733 * 734 * NOTE: use screen->fence_reference() (or equivalent) to transfer 735 * new fence ref to **fence, to ensure that previous fence is unref'd 736 * 737 * \param fence if not NULL, an old fence to unref and transfer a 738 * new fence reference to 739 * \param flags bitfield of enum pipe_flush_flags values. 740 */ 741 void (*flush)(struct pipe_context *pipe, 742 struct pipe_fence_handle **fence, 743 unsigned flags); 744 745 /** 746 * Create a fence from a fd. 747 * 748 * This is used for importing a foreign/external fence fd. 749 * 750 * \param fence if not NULL, an old fence to unref and transfer a 751 * new fence reference to 752 * \param fd fd representing the fence object 753 * \param type indicates which fence types backs fd 754 */ 755 void (*create_fence_fd)(struct pipe_context *pipe, 756 struct pipe_fence_handle **fence, 757 int fd, 758 enum pipe_fd_type type); 759 760 /** 761 * Insert commands to have GPU wait for fence to be signaled. 762 */ 763 void (*fence_server_sync)(struct pipe_context *pipe, 764 struct pipe_fence_handle *fence); 765 766 /** 767 * Insert commands to have the GPU signal a fence. 768 */ 769 void (*fence_server_signal)(struct pipe_context *pipe, 770 struct pipe_fence_handle *fence); 771 772 /** 773 * Create a view on a texture to be used by a shader stage. 774 */ 775 struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx, 776 struct pipe_resource *texture, 777 const struct pipe_sampler_view *templat); 778 779 /** 780 * Destroy a view on a texture. 781 * 782 * \param ctx the current context 783 * \param view the view to be destroyed 784 * 785 * \note The current context may not be the context in which the view was 786 * created (view->context). However, the caller must guarantee that 787 * the context which created the view is still alive. 788 */ 789 void (*sampler_view_destroy)(struct pipe_context *ctx, 790 struct pipe_sampler_view *view); 791 792 793 /** 794 * Get a surface which is a "view" into a resource, used by 795 * render target / depth stencil stages. 796 */ 797 struct pipe_surface *(*create_surface)(struct pipe_context *ctx, 798 struct pipe_resource *resource, 799 const struct pipe_surface *templat); 800 801 void (*surface_destroy)(struct pipe_context *ctx, 802 struct pipe_surface *); 803 804 805 /** 806 * Map a resource. 807 * 808 * Transfers are (by default) context-private and allow uploads to be 809 * interleaved with rendering. 810 * 811 * out_transfer will contain the transfer object that must be passed 812 * to all the other transfer functions. It also contains useful 813 * information (like texture strides for texture_map). 814 */ 815 void *(*buffer_map)(struct pipe_context *, 816 struct pipe_resource *resource, 817 unsigned level, 818 unsigned usage, /* a combination of PIPE_MAP_x */ 819 const struct pipe_box *, 820 struct pipe_transfer **out_transfer); 821 822 /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the 823 * regions specified with this call are guaranteed to be written to 824 * the resource. 825 */ 826 void (*transfer_flush_region)( struct pipe_context *, 827 struct pipe_transfer *transfer, 828 const struct pipe_box *); 829 830 void (*buffer_unmap)(struct pipe_context *, 831 struct pipe_transfer *transfer); 832 833 void *(*texture_map)(struct pipe_context *, 834 struct pipe_resource *resource, 835 unsigned level, 836 unsigned usage, /* a combination of PIPE_MAP_x */ 837 const struct pipe_box *, 838 struct pipe_transfer **out_transfer); 839 840 void (*texture_unmap)(struct pipe_context *, 841 struct pipe_transfer *transfer); 842 843 /* One-shot transfer operation with data supplied in a user 844 * pointer. 845 */ 846 void (*buffer_subdata)(struct pipe_context *, 847 struct pipe_resource *, 848 unsigned usage, /* a combination of PIPE_MAP_x */ 849 unsigned offset, 850 unsigned size, 851 const void *data); 852 853 void (*texture_subdata)(struct pipe_context *, 854 struct pipe_resource *, 855 unsigned level, 856 unsigned usage, /* a combination of PIPE_MAP_x */ 857 const struct pipe_box *, 858 const void *data, 859 unsigned stride, 860 unsigned layer_stride); 861 862 /** 863 * Flush any pending framebuffer writes and invalidate texture caches. 864 */ 865 void (*texture_barrier)(struct pipe_context *, unsigned flags); 866 867 /** 868 * Flush caches according to flags. 869 */ 870 void (*memory_barrier)(struct pipe_context *, unsigned flags); 871 872 /** 873 * Change the commitment status of a part of the given resource, which must 874 * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit. 875 * 876 * \param level The texture level whose commitment should be changed. 877 * \param box The region of the resource whose commitment should be changed. 878 * \param commit Whether memory should be committed or un-committed. 879 * 880 * \return false if out of memory, true on success. 881 */ 882 bool (*resource_commit)(struct pipe_context *, struct pipe_resource *, 883 unsigned level, struct pipe_box *box, bool commit); 884 885 /** 886 * Creates a video codec for a specific video format/profile 887 */ 888 struct pipe_video_codec *(*create_video_codec)( struct pipe_context *context, 889 const struct pipe_video_codec *templat ); 890 891 /** 892 * Creates a video buffer as decoding target 893 */ 894 struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context, 895 const struct pipe_video_buffer *templat ); 896 897 /** 898 * Compute kernel execution 899 */ 900 /*@{*/ 901 /** 902 * Define the compute program and parameters to be used by 903 * pipe_context::launch_grid. 904 */ 905 void *(*create_compute_state)(struct pipe_context *context, 906 const struct pipe_compute_state *); 907 void (*bind_compute_state)(struct pipe_context *, void *); 908 void (*delete_compute_state)(struct pipe_context *, void *); 909 910 /** 911 * Bind an array of shader resources that will be used by the 912 * compute program. Any resources that were previously bound to 913 * the specified range will be unbound after this call. 914 * 915 * \param start first resource to bind. 916 * \param count number of consecutive resources to bind. 917 * \param resources array of pointers to the resources to bind, it 918 * should contain at least \a count elements 919 * unless it's NULL, in which case no new 920 * resources will be bound. 921 */ 922 void (*set_compute_resources)(struct pipe_context *, 923 unsigned start, unsigned count, 924 struct pipe_surface **resources); 925 926 /** 927 * Bind an array of buffers to be mapped into the address space of 928 * the GLOBAL resource. Any buffers that were previously bound 929 * between [first, first + count - 1] are unbound after this call. 930 * 931 * \param first first buffer to map. 932 * \param count number of consecutive buffers to map. 933 * \param resources array of pointers to the buffers to map, it 934 * should contain at least \a count elements 935 * unless it's NULL, in which case no new 936 * resources will be bound. 937 * \param handles array of pointers to the memory locations that 938 * will be updated with the address each buffer 939 * will be mapped to. The base memory address of 940 * each of the buffers will be added to the value 941 * pointed to by its corresponding handle to form 942 * the final address argument. It should contain 943 * at least \a count elements, unless \a 944 * resources is NULL in which case \a handles 945 * should be NULL as well. 946 * 947 * Note that the driver isn't required to make any guarantees about 948 * the contents of the \a handles array being valid anytime except 949 * during the subsequent calls to pipe_context::launch_grid. This 950 * means that the only sensible location handles[i] may point to is 951 * somewhere within the INPUT buffer itself. This is so to 952 * accommodate implementations that lack virtual memory but 953 * nevertheless migrate buffers on the fly, leading to resource 954 * base addresses that change on each kernel invocation or are 955 * unknown to the pipe driver. 956 */ 957 void (*set_global_binding)(struct pipe_context *context, 958 unsigned first, unsigned count, 959 struct pipe_resource **resources, 960 uint32_t **handles); 961 962 /** 963 * Launch the compute kernel starting from instruction \a pc of the 964 * currently bound compute program. 965 */ 966 void (*launch_grid)(struct pipe_context *context, 967 const struct pipe_grid_info *info); 968 /*@}*/ 969 970 /** 971 * SVM (Share Virtual Memory) helpers 972 */ 973 /*@{*/ 974 /** 975 * Migrate range of virtual address to device or host memory. 976 * 977 * \param to_device - true if the virtual memory is migrated to the device 978 * false if the virtual memory is migrated to the host 979 * \param migrate_content - whether the content should be migrated as well 980 */ 981 void (*svm_migrate)(struct pipe_context *context, unsigned num_ptrs, 982 const void* const* ptrs, const size_t *sizes, 983 bool to_device, bool migrate_content); 984 /*@}*/ 985 986 /** 987 * Get the default sample position for an individual sample point. 988 * 989 * \param sample_count - total number of samples 990 * \param sample_index - sample to get the position values for 991 * \param out_value - return value of 2 floats for x and y position for 992 * requested sample. 993 */ 994 void (*get_sample_position)(struct pipe_context *context, 995 unsigned sample_count, 996 unsigned sample_index, 997 float *out_value); 998 999 /** 1000 * Query a timestamp in nanoseconds. This is completely equivalent to 1001 * pipe_screen::get_timestamp() but takes a context handle for drivers 1002 * that require a context. 1003 */ 1004 uint64_t (*get_timestamp)(struct pipe_context *); 1005 1006 /** 1007 * Flush the resource cache, so that the resource can be used 1008 * by an external client. Possible usage: 1009 * - flushing a resource before presenting it on the screen 1010 * - flushing a resource if some other process or device wants to use it 1011 * This shouldn't be used to flush caches if the resource is only managed 1012 * by a single pipe_screen and is not shared with another process. 1013 * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g. 1014 * use the resource for texturing) 1015 */ 1016 void (*flush_resource)(struct pipe_context *ctx, 1017 struct pipe_resource *resource); 1018 1019 /** 1020 * Invalidate the contents of the resource. This is used to 1021 * 1022 * (1) implement EGL's semantic of undefined depth/stencil 1023 * contents after a swapbuffers. This allows a tiled renderer (for 1024 * example) to not store the depth buffer. 1025 * 1026 * (2) implement GL's InvalidateBufferData. For backwards compatibility, 1027 * you must only rely on the usability for this purpose when 1028 * PIPE_CAP_INVALIDATE_BUFFER is enabled. 1029 */ 1030 void (*invalidate_resource)(struct pipe_context *ctx, 1031 struct pipe_resource *resource); 1032 1033 /** 1034 * Return information about unexpected device resets. 1035 */ 1036 enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx); 1037 1038 /** 1039 * Sets the reset status callback. If the pointer is null, then no callback 1040 * is set, otherwise a copy of the data should be made. 1041 */ 1042 void (*set_device_reset_callback)(struct pipe_context *ctx, 1043 const struct pipe_device_reset_callback *cb); 1044 1045 /** 1046 * Dump driver-specific debug information into a stream. This is 1047 * used by debugging tools. 1048 * 1049 * \param ctx pipe context 1050 * \param stream where the output should be written to 1051 * \param flags a mask of PIPE_DUMP_* flags 1052 */ 1053 void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream, 1054 unsigned flags); 1055 1056 /** 1057 * Set the log context to which the driver should write internal debug logs 1058 * (internal states, command streams). 1059 * 1060 * The caller must ensure that the log context is destroyed and reset to 1061 * NULL before the pipe context is destroyed, and that log context functions 1062 * are only called from the driver thread. 1063 * 1064 * \param ctx pipe context 1065 * \param log logging context 1066 */ 1067 void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log); 1068 1069 /** 1070 * Emit string marker in cmdstream 1071 */ 1072 void (*emit_string_marker)(struct pipe_context *ctx, 1073 const char *string, 1074 int len); 1075 1076 /** 1077 * Generate mipmap. 1078 * \return TRUE if mipmap generation succeeds, FALSE otherwise 1079 */ 1080 bool (*generate_mipmap)(struct pipe_context *ctx, 1081 struct pipe_resource *resource, 1082 enum pipe_format format, 1083 unsigned base_level, 1084 unsigned last_level, 1085 unsigned first_layer, 1086 unsigned last_layer); 1087 1088 /** 1089 * Create a 64-bit texture handle. 1090 * 1091 * \param ctx pipe context 1092 * \param view pipe sampler view object 1093 * \param state pipe sampler state template 1094 * \return a 64-bit texture handle if success, 0 otherwise 1095 */ 1096 uint64_t (*create_texture_handle)(struct pipe_context *ctx, 1097 struct pipe_sampler_view *view, 1098 const struct pipe_sampler_state *state); 1099 1100 /** 1101 * Delete a texture handle. 1102 * 1103 * \param ctx pipe context 1104 * \param handle 64-bit texture handle 1105 */ 1106 void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle); 1107 1108 /** 1109 * Make a texture handle resident. 1110 * 1111 * \param ctx pipe context 1112 * \param handle 64-bit texture handle 1113 * \param resident TRUE for resident, FALSE otherwise 1114 */ 1115 void (*make_texture_handle_resident)(struct pipe_context *ctx, 1116 uint64_t handle, bool resident); 1117 1118 /** 1119 * Create a 64-bit image handle. 1120 * 1121 * \param ctx pipe context 1122 * \param image pipe image view template 1123 * \return a 64-bit image handle if success, 0 otherwise 1124 */ 1125 uint64_t (*create_image_handle)(struct pipe_context *ctx, 1126 const struct pipe_image_view *image); 1127 1128 /** 1129 * Delete an image handle. 1130 * 1131 * \param ctx pipe context 1132 * \param handle 64-bit image handle 1133 */ 1134 void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle); 1135 1136 /** 1137 * Make an image handle resident. 1138 * 1139 * \param ctx pipe context 1140 * \param handle 64-bit image handle 1141 * \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE 1142 * \param resident TRUE for resident, FALSE otherwise 1143 */ 1144 void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle, 1145 unsigned access, bool resident); 1146 1147 /** 1148 * Call the given function from the driver thread. 1149 * 1150 * This is set by threaded contexts for use by debugging wrappers. 1151 * 1152 * \param asap if true, run the callback immediately if there are no pending 1153 * commands to be processed by the driver thread 1154 */ 1155 void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data, 1156 bool asap); 1157 1158 /** 1159 * Set a context parameter See enum pipe_context_param for more details. 1160 */ 1161 void (*set_context_param)(struct pipe_context *ctx, 1162 enum pipe_context_param param, 1163 unsigned value); 1164 1165 /** 1166 * Creates a video buffer as decoding target, with modifiers. 1167 */ 1168 struct pipe_video_buffer *(*create_video_buffer_with_modifiers)(struct pipe_context *context, 1169 const struct pipe_video_buffer *templat, 1170 const uint64_t *modifiers, 1171 unsigned int modifiers_count); 1172 }; 1173 1174 1175 #ifdef __cplusplus 1176 } 1177 #endif 1178 1179 #endif /* PIPE_CONTEXT_H */ 1180