1 /************************************************************************** 2 * 3 * Copyright 2007 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef PIPE_CONTEXT_H 29 #define PIPE_CONTEXT_H 30 31 #include "p_compiler.h" 32 #include "p_format.h" 33 #include "p_video_enums.h" 34 #include "p_defines.h" 35 #include <stdio.h> 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 42 struct pipe_blend_color; 43 struct pipe_blend_state; 44 struct pipe_blit_info; 45 struct pipe_box; 46 struct pipe_clip_state; 47 struct pipe_constant_buffer; 48 struct pipe_depth_stencil_alpha_state; 49 struct pipe_device_reset_callback; 50 struct pipe_draw_info; 51 struct pipe_draw_indirect_info; 52 struct pipe_draw_start_count_bias; 53 struct pipe_draw_vertex_state_info; 54 struct pipe_grid_info; 55 struct pipe_fence_handle; 56 struct pipe_framebuffer_state; 57 struct pipe_image_view; 58 struct pipe_query; 59 struct pipe_poly_stipple; 60 struct pipe_rasterizer_state; 61 struct pipe_resolve_info; 62 struct pipe_resource; 63 struct pipe_sampler_state; 64 struct pipe_sampler_view; 65 struct pipe_scissor_state; 66 struct pipe_shader_buffer; 67 struct pipe_shader_state; 68 struct pipe_stencil_ref; 69 struct pipe_stream_output_target; 70 struct pipe_surface; 71 struct pipe_transfer; 72 struct pipe_vertex_buffer; 73 struct pipe_vertex_element; 74 struct pipe_vertex_state; 75 struct pipe_video_buffer; 76 struct pipe_video_codec; 77 struct pipe_viewport_state; 78 struct pipe_compute_state; 79 union pipe_color_union; 80 union pipe_query_result; 81 struct u_log_context; 82 struct u_upload_mgr; 83 struct util_debug_callback; 84 85 /** 86 * Gallium rendering context. Basically: 87 * - state setting functions 88 * - VBO drawing functions 89 * - surface functions 90 */ 91 struct pipe_context { 92 struct pipe_screen *screen; 93 94 void *priv; /**< context private data (for DRI for example) */ 95 void *draw; /**< private, for draw module (temporary?) */ 96 97 /** 98 * Stream uploaders created by the driver. All drivers, gallium frontends, and 99 * modules should use them. 100 * 101 * Use u_upload_alloc or u_upload_data as many times as you want. 102 * Once you are done, use u_upload_unmap. 103 */ 104 struct u_upload_mgr *stream_uploader; /* everything but shader constants */ 105 struct u_upload_mgr *const_uploader; /* shader constants only */ 106 107 void (*destroy)( struct pipe_context * ); 108 109 /** 110 * VBO drawing 111 */ 112 /*@{*/ 113 /** 114 * Multi draw. 115 * 116 * For indirect multi draws, num_draws is 1 and indirect->draw_count 117 * is used instead. 118 * 119 * Caps: 120 * - Always supported: Direct multi draws 121 * - PIPE_CAP_MULTI_DRAW_INDIRECT: Indirect multi draws 122 * - PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: Indirect draw count 123 * 124 * Differences against glMultiDraw and glMultiMode: 125 * - "info->mode" and "draws->index_bias" are always constant due to the lack 126 * of hardware support and CPU performance concerns. Only start and count 127 * vary. 128 * - if "info->increment_draw_id" is false, draw_id doesn't change between 129 * draws 130 * 131 * Direct multi draws are also generated by u_threaded_context, which looks 132 * ahead in gallium command buffers and merges single draws. 133 * 134 * \param pipe context 135 * \param info draw info 136 * \param drawid_offset offset to add for drawid param of each draw 137 * \param indirect indirect multi draws 138 * \param draws array of (start, count) pairs for direct draws 139 * \param num_draws number of direct draws; 1 for indirect multi draws 140 */ 141 void (*draw_vbo)(struct pipe_context *pipe, 142 const struct pipe_draw_info *info, 143 unsigned drawid_offset, 144 const struct pipe_draw_indirect_info *indirect, 145 const struct pipe_draw_start_count_bias *draws, 146 unsigned num_draws); 147 148 /** 149 * Multi draw for display lists. 150 * 151 * For more information, see pipe_vertex_state and 152 * pipe_draw_vertex_state_info. 153 * 154 * Explanation of partial_vertex_mask: 155 * 156 * 1. pipe_vertex_state::input::elements have a monotonic logical index 157 * determined by pipe_vertex_state::input::full_velem_mask, specifically, 158 * the position of the i-th bit set is the logical index of the i-th 159 * vertex element, up to 31. 160 * 161 * 2. pipe_vertex_state::input::partial_velem_mask is a subset of 162 * full_velem_mask where the bits set determine which vertex elements 163 * should be bound contiguously. The vertex elements corresponding to 164 * the bits not set in partial_velem_mask should be ignored. 165 * 166 * Those two allow creating pipe_vertex_state that has more vertex 167 * attributes than the vertex shader has inputs. The idea is that 168 * pipe_vertex_state can be used with any vertex shader that has the same 169 * number of inputs and same logical indices or less. This may sound like 170 * an overly complicated way to bind a subset of vertex elements, but it 171 * actually simplifies everything else: 172 * 173 * - In st/mesa, full_velem_mask is exactly the mask of enabled vertex 174 * attributes (VERT_ATTRIB_x) in the display list VAO, while 175 * partial_velem_mask is exactly the inputs_read mask of the vertex 176 * shader (also VERT_ATTRIB_x). 177 * 178 * - In the driver, some bit ops and popcnt is needed to assemble vertex 179 * elements very quickly. 180 */ 181 void (*draw_vertex_state)(struct pipe_context *ctx, 182 struct pipe_vertex_state *state, 183 uint32_t partial_velem_mask, 184 struct pipe_draw_vertex_state_info info, 185 const struct pipe_draw_start_count_bias *draws, 186 unsigned num_draws); 187 /*@}*/ 188 189 /** 190 * Predicate subsequent rendering on occlusion query result 191 * \param query the query predicate, or NULL if no predicate 192 * \param condition whether to skip on FALSE or TRUE query results 193 * \param mode one of PIPE_RENDER_COND_x 194 */ 195 void (*render_condition)( struct pipe_context *pipe, 196 struct pipe_query *query, 197 bool condition, 198 enum pipe_render_cond_flag mode ); 199 200 /** 201 * Predicate subsequent rendering on a value in a buffer 202 * \param buffer The buffer to query for the value 203 * \param offset Offset in the buffer to query 32-bit 204 * \param condition whether to skip on FALSE or TRUE query results 205 */ 206 void (*render_condition_mem)( struct pipe_context *pipe, 207 struct pipe_resource *buffer, 208 uint32_t offset, 209 bool condition ); 210 /** 211 * Query objects 212 */ 213 /*@{*/ 214 struct pipe_query *(*create_query)( struct pipe_context *pipe, 215 unsigned query_type, 216 unsigned index ); 217 218 /** 219 * Create a query object that queries all given query types simultaneously. 220 * 221 * This can only be used for those query types for which 222 * get_driver_query_info indicates that it must be used. Only one batch 223 * query object may be active at a time. 224 * 225 * There may be additional constraints on which query types can be used 226 * together, in particular those that are implied by 227 * get_driver_query_group_info. 228 * 229 * \param num_queries the number of query types 230 * \param query_types array of \p num_queries query types 231 * \return a query object, or NULL on error. 232 */ 233 struct pipe_query *(*create_batch_query)( struct pipe_context *pipe, 234 unsigned num_queries, 235 unsigned *query_types ); 236 237 void (*destroy_query)(struct pipe_context *pipe, 238 struct pipe_query *q); 239 240 bool (*begin_query)(struct pipe_context *pipe, struct pipe_query *q); 241 bool (*end_query)(struct pipe_context *pipe, struct pipe_query *q); 242 243 /** 244 * Get results of a query. 245 * \param wait if true, this query will block until the result is ready 246 * \return TRUE if results are ready, FALSE otherwise 247 */ 248 bool (*get_query_result)(struct pipe_context *pipe, 249 struct pipe_query *q, 250 bool wait, 251 union pipe_query_result *result); 252 253 /** 254 * Get results of a query, storing into resource. Note that this may not 255 * be used with batch queries. 256 * 257 * \param wait if true, this query will block until the result is ready 258 * \param result_type the type of the value being stored: 259 * \param index for queries that return multiple pieces of data, which 260 * item of that data to store (e.g. for 261 * PIPE_QUERY_PIPELINE_STATISTICS). 262 * When the index is -1, instead of the value of the query 263 * the driver should instead write a 1 or 0 to the appropriate 264 * location with 1 meaning that the query result is available. 265 */ 266 void (*get_query_result_resource)(struct pipe_context *pipe, 267 struct pipe_query *q, 268 enum pipe_query_flags flags, 269 enum pipe_query_value_type result_type, 270 int index, 271 struct pipe_resource *resource, 272 unsigned offset); 273 274 /** 275 * Set whether all current non-driver queries except TIME_ELAPSED are 276 * active or paused. 277 */ 278 void (*set_active_query_state)(struct pipe_context *pipe, bool enable); 279 280 /** 281 * INTEL Performance Query 282 */ 283 /*@{*/ 284 285 unsigned (*init_intel_perf_query_info)(struct pipe_context *pipe); 286 287 void (*get_intel_perf_query_info)(struct pipe_context *pipe, 288 unsigned query_index, 289 const char **name, 290 uint32_t *data_size, 291 uint32_t *n_counters, 292 uint32_t *n_active); 293 294 void (*get_intel_perf_query_counter_info)(struct pipe_context *pipe, 295 unsigned query_index, 296 unsigned counter_index, 297 const char **name, 298 const char **desc, 299 uint32_t *offset, 300 uint32_t *data_size, 301 uint32_t *type_enum, 302 uint32_t *data_type_enum, 303 uint64_t *raw_max); 304 305 struct pipe_query *(*new_intel_perf_query_obj)(struct pipe_context *pipe, 306 unsigned query_index); 307 308 bool (*begin_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 309 310 void (*end_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 311 312 void (*delete_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 313 314 void (*wait_intel_perf_query)(struct pipe_context *pipe, struct pipe_query *q); 315 316 bool (*is_intel_perf_query_ready)(struct pipe_context *pipe, struct pipe_query *q); 317 318 bool (*get_intel_perf_query_data)(struct pipe_context *pipe, 319 struct pipe_query *q, 320 size_t data_size, 321 uint32_t *data, 322 uint32_t *bytes_written); 323 324 /*@}*/ 325 326 /** 327 * \name GLSL shader/program functions. 328 */ 329 /*@{*/ 330 /** 331 * Called when a shader program is linked. 332 * \param handles Array of shader handles attached to this program. 333 * The size of the array is \c PIPE_SHADER_TYPES, and each 334 * position contains the corresponding \c pipe_shader_state* 335 * or \c pipe_compute_state*, or \c NULL. 336 * E.g. You can retrieve the fragment shader handle with 337 * \c handles[PIPE_SHADER_FRAGMENT] 338 */ 339 void (*link_shader)(struct pipe_context *, void** handles); 340 /*@}*/ 341 342 /** 343 * State functions (create/bind/destroy state objects) 344 */ 345 /*@{*/ 346 void * (*create_blend_state)(struct pipe_context *, 347 const struct pipe_blend_state *); 348 void (*bind_blend_state)(struct pipe_context *, void *); 349 void (*delete_blend_state)(struct pipe_context *, void *); 350 351 void * (*create_sampler_state)(struct pipe_context *, 352 const struct pipe_sampler_state *); 353 void (*bind_sampler_states)(struct pipe_context *, 354 enum pipe_shader_type shader, 355 unsigned start_slot, unsigned num_samplers, 356 void **samplers); 357 void (*delete_sampler_state)(struct pipe_context *, void *); 358 359 void * (*create_rasterizer_state)(struct pipe_context *, 360 const struct pipe_rasterizer_state *); 361 void (*bind_rasterizer_state)(struct pipe_context *, void *); 362 void (*delete_rasterizer_state)(struct pipe_context *, void *); 363 364 void * (*create_depth_stencil_alpha_state)(struct pipe_context *, 365 const struct pipe_depth_stencil_alpha_state *); 366 void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *); 367 void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *); 368 369 void * (*create_fs_state)(struct pipe_context *, 370 const struct pipe_shader_state *); 371 void (*bind_fs_state)(struct pipe_context *, void *); 372 void (*delete_fs_state)(struct pipe_context *, void *); 373 374 void * (*create_vs_state)(struct pipe_context *, 375 const struct pipe_shader_state *); 376 void (*bind_vs_state)(struct pipe_context *, void *); 377 void (*delete_vs_state)(struct pipe_context *, void *); 378 379 void * (*create_gs_state)(struct pipe_context *, 380 const struct pipe_shader_state *); 381 void (*bind_gs_state)(struct pipe_context *, void *); 382 void (*delete_gs_state)(struct pipe_context *, void *); 383 384 void * (*create_tcs_state)(struct pipe_context *, 385 const struct pipe_shader_state *); 386 void (*bind_tcs_state)(struct pipe_context *, void *); 387 void (*delete_tcs_state)(struct pipe_context *, void *); 388 389 void * (*create_tes_state)(struct pipe_context *, 390 const struct pipe_shader_state *); 391 void (*bind_tes_state)(struct pipe_context *, void *); 392 void (*delete_tes_state)(struct pipe_context *, void *); 393 394 void * (*create_vertex_elements_state)(struct pipe_context *, 395 unsigned num_elements, 396 const struct pipe_vertex_element *); 397 void (*bind_vertex_elements_state)(struct pipe_context *, void *); 398 void (*delete_vertex_elements_state)(struct pipe_context *, void *); 399 400 /*@}*/ 401 402 /** 403 * Parameter-like state (or properties) 404 */ 405 /*@{*/ 406 void (*set_blend_color)( struct pipe_context *, 407 const struct pipe_blend_color * ); 408 409 void (*set_stencil_ref)( struct pipe_context *, 410 const struct pipe_stencil_ref ref); 411 412 void (*set_sample_mask)( struct pipe_context *, 413 unsigned sample_mask ); 414 415 void (*set_min_samples)( struct pipe_context *, 416 unsigned min_samples ); 417 418 void (*set_clip_state)( struct pipe_context *, 419 const struct pipe_clip_state * ); 420 421 /** 422 * Set constant buffer 423 * 424 * \param shader Shader stage 425 * \param index Buffer binding slot index within a shader stage 426 * \param take_ownership The callee takes ownership of the buffer reference. 427 * (the callee shouldn't increment the ref count) 428 * \param buf Constant buffer parameters 429 */ 430 void (*set_constant_buffer)( struct pipe_context *, 431 enum pipe_shader_type shader, uint index, 432 bool take_ownership, 433 const struct pipe_constant_buffer *buf ); 434 435 /** 436 * Set inlinable constants for constant buffer 0. 437 * 438 * These are constants that the driver would like to inline in the IR 439 * of the current shader and recompile it. Drivers can determine which 440 * constants they prefer to inline in finalize_nir and store that 441 * information in shader_info::*inlinable_uniform*. When the state tracker 442 * or frontend uploads constants to a constant buffer, it can pass 443 * inlinable constants separately via this call. 444 * 445 * Any set_constant_buffer call invalidates this state, so this function 446 * must be called after it. Binding a shader also invalidates this state. 447 * 448 * There is no PIPE_CAP for this. Drivers shouldn't set the shader_info 449 * fields if they don't want this or if they don't implement this. 450 */ 451 void (*set_inlinable_constants)( struct pipe_context *, 452 enum pipe_shader_type shader, 453 uint num_values, uint32_t *values ); 454 455 void (*set_framebuffer_state)( struct pipe_context *, 456 const struct pipe_framebuffer_state * ); 457 458 /** 459 * Set the sample locations used during rasterization. When NULL or sized 460 * zero, the default locations are used. 461 * 462 * Note that get_sample_position() still returns the default locations. 463 * 464 * The samples are accessed with 465 * locations[(pixel_y*grid_w+pixel_x)*ms+i], 466 * where: 467 * ms = the sample count 468 * grid_w = the pixel grid width for the sample count 469 * grid_w = the pixel grid height for the sample count 470 * pixel_x = the window x coordinate modulo grid_w 471 * pixel_y = the window y coordinate modulo grid_w 472 * i = the sample index 473 * This gives a result with the x coordinate as the low 4 bits and the y 474 * coordinate as the high 4 bits. For each coordinate 0 is the left or top 475 * edge of the pixel's rectangle and 16 (not 15) is the right or bottom edge. 476 * 477 * Out of bounds accesses are return undefined values. 478 * 479 * The pixel grid is used to vary sample locations across pixels and its 480 * size can be queried with get_sample_pixel_grid(). 481 */ 482 void (*set_sample_locations)( struct pipe_context *, 483 size_t size, const uint8_t *locations ); 484 485 void (*set_polygon_stipple)( struct pipe_context *, 486 const struct pipe_poly_stipple * ); 487 488 void (*set_scissor_states)( struct pipe_context *, 489 unsigned start_slot, 490 unsigned num_scissors, 491 const struct pipe_scissor_state * ); 492 493 void (*set_window_rectangles)( struct pipe_context *, 494 bool include, 495 unsigned num_rectangles, 496 const struct pipe_scissor_state * ); 497 498 void (*set_viewport_states)( struct pipe_context *, 499 unsigned start_slot, 500 unsigned num_viewports, 501 const struct pipe_viewport_state *); 502 503 void (*set_sampler_views)(struct pipe_context *, 504 enum pipe_shader_type shader, 505 unsigned start_slot, unsigned num_views, 506 unsigned unbind_num_trailing_slots, 507 bool take_ownership, 508 struct pipe_sampler_view **views); 509 510 void (*set_tess_state)(struct pipe_context *, 511 const float default_outer_level[4], 512 const float default_inner_level[2]); 513 514 /** 515 * Set the number of vertices per input patch for tessellation. 516 */ 517 void (*set_patch_vertices)(struct pipe_context *ctx, uint8_t patch_vertices); 518 519 /** 520 * Sets the debug callback. If the pointer is null, then no callback is 521 * set, otherwise a copy of the data should be made. 522 */ 523 void (*set_debug_callback)(struct pipe_context *, 524 const struct util_debug_callback *); 525 526 /** 527 * Bind an array of shader buffers that will be used by a shader. 528 * Any buffers that were previously bound to the specified range 529 * will be unbound. 530 * 531 * \param shader selects shader stage 532 * \param start_slot first buffer slot to bind. 533 * \param count number of consecutive buffers to bind. 534 * \param buffers array of pointers to the buffers to bind, it 535 * should contain at least \a count elements 536 * unless it's NULL, in which case no buffers will 537 * be bound. 538 * \param writable_bitmask If bit i is not set, buffers[i] will only be 539 * used with loads. If unsure, set to ~0. 540 */ 541 void (*set_shader_buffers)(struct pipe_context *, 542 enum pipe_shader_type shader, 543 unsigned start_slot, unsigned count, 544 const struct pipe_shader_buffer *buffers, 545 unsigned writable_bitmask); 546 547 /** 548 * Bind an array of hw atomic buffers for use by all shaders. 549 * And buffers that were previously bound to the specified range 550 * will be unbound. 551 * 552 * \param start_slot first buffer slot to bind. 553 * \param count number of consecutive buffers to bind. 554 * \param buffers array of pointers to the buffers to bind, it 555 * should contain at least \a count elements 556 * unless it's NULL, in which case no buffers will 557 * be bound. 558 */ 559 void (*set_hw_atomic_buffers)(struct pipe_context *, 560 unsigned start_slot, unsigned count, 561 const struct pipe_shader_buffer *buffers); 562 563 /** 564 * Bind an array of images that will be used by a shader. 565 * Any images that were previously bound to the specified range 566 * will be unbound. 567 * 568 * \param shader selects shader stage 569 * \param start_slot first image slot to bind. 570 * \param count number of consecutive images to bind. 571 * \param unbind_num_trailing_slots number of images to unbind after 572 * the bound slot 573 * \param buffers array of the images to bind, it 574 * should contain at least \a count elements 575 * unless it's NULL, in which case no images will 576 * be bound. 577 */ 578 void (*set_shader_images)(struct pipe_context *, 579 enum pipe_shader_type shader, 580 unsigned start_slot, unsigned count, 581 unsigned unbind_num_trailing_slots, 582 const struct pipe_image_view *images); 583 584 /** 585 * Bind an array of vertex buffers to the specified slots. 586 * 587 * \param start_slot first vertex buffer slot 588 * \param count number of consecutive vertex buffers to bind. 589 * \param unbind_num_trailing_slots unbind slots after the bound slots 590 * \param take_ownership the caller holds buffer references and they 591 * should be taken over by the callee. This means 592 * that drivers shouldn't increment reference counts. 593 * \param buffers array of the buffers to bind 594 */ 595 void (*set_vertex_buffers)( struct pipe_context *, 596 unsigned start_slot, 597 unsigned num_buffers, 598 unsigned unbind_num_trailing_slots, 599 bool take_ownership, 600 const struct pipe_vertex_buffer * ); 601 602 /*@}*/ 603 604 /** 605 * Stream output functions. 606 */ 607 /*@{*/ 608 609 struct pipe_stream_output_target *(*create_stream_output_target)( 610 struct pipe_context *, 611 struct pipe_resource *, 612 unsigned buffer_offset, 613 unsigned buffer_size); 614 615 void (*stream_output_target_destroy)(struct pipe_context *, 616 struct pipe_stream_output_target *); 617 618 void (*set_stream_output_targets)(struct pipe_context *, 619 unsigned num_targets, 620 struct pipe_stream_output_target **targets, 621 const unsigned *offsets); 622 623 uint32_t (*stream_output_target_offset)(struct pipe_stream_output_target *target); 624 625 /*@}*/ 626 627 628 /** 629 * INTEL_blackhole_render 630 */ 631 /*@{*/ 632 633 void (*set_frontend_noop)(struct pipe_context *, 634 bool enable); 635 636 /*@}*/ 637 638 639 /** 640 * Resource functions for blit-like functionality 641 * 642 * If a driver supports multisampling, blit must implement color resolve. 643 */ 644 /*@{*/ 645 646 /** 647 * Copy a block of pixels from one resource to another. 648 * The resource must be of the same format. 649 * Resources with nr_samples > 1 are not allowed. 650 */ 651 void (*resource_copy_region)(struct pipe_context *pipe, 652 struct pipe_resource *dst, 653 unsigned dst_level, 654 unsigned dstx, unsigned dsty, unsigned dstz, 655 struct pipe_resource *src, 656 unsigned src_level, 657 const struct pipe_box *src_box); 658 659 /* Optimal hardware path for blitting pixels. 660 * Scaling, format conversion, up- and downsampling (resolve) are allowed. 661 */ 662 void (*blit)(struct pipe_context *pipe, 663 const struct pipe_blit_info *info); 664 665 /*@}*/ 666 667 /** 668 * Clear the specified set of currently bound buffers to specified values. 669 * The entire buffers are cleared (no scissor, no colormask, etc). 670 * 671 * \param buffers bitfield of PIPE_CLEAR_* values. 672 * \param scissor_state the scissored region to clear 673 * \param color pointer to a union of fiu array for each of r, g, b, a. 674 * \param depth depth clear value in [0,1]. 675 * \param stencil stencil clear value 676 */ 677 void (*clear)(struct pipe_context *pipe, 678 unsigned buffers, 679 const struct pipe_scissor_state *scissor_state, 680 const union pipe_color_union *color, 681 double depth, 682 unsigned stencil); 683 684 /** 685 * Clear a color rendertarget surface. 686 * \param color pointer to an union of fiu array for each of r, g, b, a. 687 */ 688 void (*clear_render_target)(struct pipe_context *pipe, 689 struct pipe_surface *dst, 690 const union pipe_color_union *color, 691 unsigned dstx, unsigned dsty, 692 unsigned width, unsigned height, 693 bool render_condition_enabled); 694 695 /** 696 * Clear a depth-stencil surface. 697 * \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values. 698 * \param depth depth clear value in [0,1]. 699 * \param stencil stencil clear value 700 */ 701 void (*clear_depth_stencil)(struct pipe_context *pipe, 702 struct pipe_surface *dst, 703 unsigned clear_flags, 704 double depth, 705 unsigned stencil, 706 unsigned dstx, unsigned dsty, 707 unsigned width, unsigned height, 708 bool render_condition_enabled); 709 710 /** 711 * Clear the texture with the specified texel. Not guaranteed to be a 712 * renderable format. Data provided in the resource's format. 713 */ 714 void (*clear_texture)(struct pipe_context *pipe, 715 struct pipe_resource *res, 716 unsigned level, 717 const struct pipe_box *box, 718 const void *data); 719 720 /** 721 * Clear a buffer. Runs a memset over the specified region with the element 722 * value passed in through clear_value of size clear_value_size. 723 */ 724 void (*clear_buffer)(struct pipe_context *pipe, 725 struct pipe_resource *res, 726 unsigned offset, 727 unsigned size, 728 const void *clear_value, 729 int clear_value_size); 730 731 /** 732 * If a depth buffer is rendered with different sample location state than 733 * what is current at the time of reading, the values may differ because 734 * depth buffer compression can depend the sample locations. 735 * 736 * This function is a hint to decompress the current depth buffer to avoid 737 * such problems. 738 */ 739 void (*evaluate_depth_buffer)(struct pipe_context *pipe); 740 741 /** 742 * Flush draw commands. 743 * 744 * This guarantees that the new fence (if any) will finish in finite time, 745 * unless PIPE_FLUSH_DEFERRED is used. 746 * 747 * Subsequent operations on other contexts of the same screen are guaranteed 748 * to execute after the flushed commands, unless PIPE_FLUSH_ASYNC is used. 749 * 750 * NOTE: use screen->fence_reference() (or equivalent) to transfer 751 * new fence ref to **fence, to ensure that previous fence is unref'd 752 * 753 * \param fence if not NULL, an old fence to unref and transfer a 754 * new fence reference to 755 * \param flags bitfield of enum pipe_flush_flags values. 756 */ 757 void (*flush)(struct pipe_context *pipe, 758 struct pipe_fence_handle **fence, 759 unsigned flags); 760 761 /** 762 * Create a fence from a fd. 763 * 764 * This is used for importing a foreign/external fence fd. 765 * 766 * \param fence if not NULL, an old fence to unref and transfer a 767 * new fence reference to 768 * \param fd fd representing the fence object 769 * \param type indicates which fence types backs fd 770 */ 771 void (*create_fence_fd)(struct pipe_context *pipe, 772 struct pipe_fence_handle **fence, 773 int fd, 774 enum pipe_fd_type type); 775 776 /** 777 * Insert commands to have GPU wait for fence to be signaled. 778 */ 779 void (*fence_server_sync)(struct pipe_context *pipe, 780 struct pipe_fence_handle *fence); 781 782 /** 783 * Insert commands to have the GPU signal a fence. 784 */ 785 void (*fence_server_signal)(struct pipe_context *pipe, 786 struct pipe_fence_handle *fence); 787 788 /** 789 * Create a view on a texture to be used by a shader stage. 790 */ 791 struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx, 792 struct pipe_resource *texture, 793 const struct pipe_sampler_view *templat); 794 795 /** 796 * Destroy a view on a texture. 797 * 798 * \param ctx the current context 799 * \param view the view to be destroyed 800 * 801 * \note The current context may not be the context in which the view was 802 * created (view->context). However, the caller must guarantee that 803 * the context which created the view is still alive. 804 */ 805 void (*sampler_view_destroy)(struct pipe_context *ctx, 806 struct pipe_sampler_view *view); 807 808 809 /** 810 * Get a surface which is a "view" into a resource, used by 811 * render target / depth stencil stages. 812 */ 813 struct pipe_surface *(*create_surface)(struct pipe_context *ctx, 814 struct pipe_resource *resource, 815 const struct pipe_surface *templat); 816 817 void (*surface_destroy)(struct pipe_context *ctx, 818 struct pipe_surface *); 819 820 821 /** 822 * Map a resource. 823 * 824 * Transfers are (by default) context-private and allow uploads to be 825 * interleaved with rendering. 826 * 827 * out_transfer will contain the transfer object that must be passed 828 * to all the other transfer functions. It also contains useful 829 * information (like texture strides for texture_map). 830 */ 831 void *(*buffer_map)(struct pipe_context *, 832 struct pipe_resource *resource, 833 unsigned level, 834 unsigned usage, /* a combination of PIPE_MAP_x */ 835 const struct pipe_box *, 836 struct pipe_transfer **out_transfer); 837 838 /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the 839 * regions specified with this call are guaranteed to be written to 840 * the resource. 841 */ 842 void (*transfer_flush_region)( struct pipe_context *, 843 struct pipe_transfer *transfer, 844 const struct pipe_box *); 845 846 void (*buffer_unmap)(struct pipe_context *, 847 struct pipe_transfer *transfer); 848 849 void *(*texture_map)(struct pipe_context *, 850 struct pipe_resource *resource, 851 unsigned level, 852 unsigned usage, /* a combination of PIPE_MAP_x */ 853 const struct pipe_box *, 854 struct pipe_transfer **out_transfer); 855 856 void (*texture_unmap)(struct pipe_context *, 857 struct pipe_transfer *transfer); 858 859 /* One-shot transfer operation with data supplied in a user 860 * pointer. 861 */ 862 void (*buffer_subdata)(struct pipe_context *, 863 struct pipe_resource *, 864 unsigned usage, /* a combination of PIPE_MAP_x */ 865 unsigned offset, 866 unsigned size, 867 const void *data); 868 869 void (*texture_subdata)(struct pipe_context *, 870 struct pipe_resource *, 871 unsigned level, 872 unsigned usage, /* a combination of PIPE_MAP_x */ 873 const struct pipe_box *, 874 const void *data, 875 unsigned stride, 876 unsigned layer_stride); 877 878 /** 879 * Flush any pending framebuffer writes and invalidate texture caches. 880 */ 881 void (*texture_barrier)(struct pipe_context *, unsigned flags); 882 883 /** 884 * Flush caches according to flags. 885 */ 886 void (*memory_barrier)(struct pipe_context *, unsigned flags); 887 888 /** 889 * Change the commitment status of a part of the given resource, which must 890 * have been created with the PIPE_RESOURCE_FLAG_SPARSE bit. 891 * 892 * \param level The texture level whose commitment should be changed. 893 * \param box The region of the resource whose commitment should be changed. 894 * \param commit Whether memory should be committed or un-committed. 895 * 896 * \return false if out of memory, true on success. 897 */ 898 bool (*resource_commit)(struct pipe_context *, struct pipe_resource *, 899 unsigned level, struct pipe_box *box, bool commit); 900 901 /** 902 * Creates a video codec for a specific video format/profile 903 */ 904 struct pipe_video_codec *(*create_video_codec)( struct pipe_context *context, 905 const struct pipe_video_codec *templat ); 906 907 /** 908 * Creates a video buffer as decoding target 909 */ 910 struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context, 911 const struct pipe_video_buffer *templat ); 912 913 /** 914 * Compute kernel execution 915 */ 916 /*@{*/ 917 /** 918 * Define the compute program and parameters to be used by 919 * pipe_context::launch_grid. 920 */ 921 void *(*create_compute_state)(struct pipe_context *context, 922 const struct pipe_compute_state *); 923 void (*bind_compute_state)(struct pipe_context *, void *); 924 void (*delete_compute_state)(struct pipe_context *, void *); 925 926 /** 927 * Bind an array of shader resources that will be used by the 928 * compute program. Any resources that were previously bound to 929 * the specified range will be unbound after this call. 930 * 931 * \param start first resource to bind. 932 * \param count number of consecutive resources to bind. 933 * \param resources array of pointers to the resources to bind, it 934 * should contain at least \a count elements 935 * unless it's NULL, in which case no new 936 * resources will be bound. 937 */ 938 void (*set_compute_resources)(struct pipe_context *, 939 unsigned start, unsigned count, 940 struct pipe_surface **resources); 941 942 /** 943 * Bind an array of buffers to be mapped into the address space of 944 * the GLOBAL resource. Any buffers that were previously bound 945 * between [first, first + count - 1] are unbound after this call. 946 * 947 * \param first first buffer to map. 948 * \param count number of consecutive buffers to map. 949 * \param resources array of pointers to the buffers to map, it 950 * should contain at least \a count elements 951 * unless it's NULL, in which case no new 952 * resources will be bound. 953 * \param handles array of pointers to the memory locations that 954 * will be updated with the address each buffer 955 * will be mapped to. The base memory address of 956 * each of the buffers will be added to the value 957 * pointed to by its corresponding handle to form 958 * the final address argument. It should contain 959 * at least \a count elements, unless \a 960 * resources is NULL in which case \a handles 961 * should be NULL as well. 962 * 963 * Note that the driver isn't required to make any guarantees about 964 * the contents of the \a handles array being valid anytime except 965 * during the subsequent calls to pipe_context::launch_grid. This 966 * means that the only sensible location handles[i] may point to is 967 * somewhere within the INPUT buffer itself. This is so to 968 * accommodate implementations that lack virtual memory but 969 * nevertheless migrate buffers on the fly, leading to resource 970 * base addresses that change on each kernel invocation or are 971 * unknown to the pipe driver. 972 */ 973 void (*set_global_binding)(struct pipe_context *context, 974 unsigned first, unsigned count, 975 struct pipe_resource **resources, 976 uint32_t **handles); 977 978 /** 979 * Launch the compute kernel starting from instruction \a pc of the 980 * currently bound compute program. 981 */ 982 void (*launch_grid)(struct pipe_context *context, 983 const struct pipe_grid_info *info); 984 /*@}*/ 985 986 /** 987 * SVM (Share Virtual Memory) helpers 988 */ 989 /*@{*/ 990 /** 991 * Migrate range of virtual address to device or host memory. 992 * 993 * \param to_device - true if the virtual memory is migrated to the device 994 * false if the virtual memory is migrated to the host 995 * \param migrate_content - whether the content should be migrated as well 996 */ 997 void (*svm_migrate)(struct pipe_context *context, unsigned num_ptrs, 998 const void* const* ptrs, const size_t *sizes, 999 bool to_device, bool migrate_content); 1000 /*@}*/ 1001 1002 /** 1003 * Get the default sample position for an individual sample point. 1004 * 1005 * \param sample_count - total number of samples 1006 * \param sample_index - sample to get the position values for 1007 * \param out_value - return value of 2 floats for x and y position for 1008 * requested sample. 1009 */ 1010 void (*get_sample_position)(struct pipe_context *context, 1011 unsigned sample_count, 1012 unsigned sample_index, 1013 float *out_value); 1014 1015 /** 1016 * Query a timestamp in nanoseconds. This is completely equivalent to 1017 * pipe_screen::get_timestamp() but takes a context handle for drivers 1018 * that require a context. 1019 */ 1020 uint64_t (*get_timestamp)(struct pipe_context *); 1021 1022 /** 1023 * Flush the resource cache, so that the resource can be used 1024 * by an external client. Possible usage: 1025 * - flushing a resource before presenting it on the screen 1026 * - flushing a resource if some other process or device wants to use it 1027 * This shouldn't be used to flush caches if the resource is only managed 1028 * by a single pipe_screen and is not shared with another process. 1029 * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g. 1030 * use the resource for texturing) 1031 */ 1032 void (*flush_resource)(struct pipe_context *ctx, 1033 struct pipe_resource *resource); 1034 1035 /** 1036 * Invalidate the contents of the resource. This is used to 1037 * 1038 * (1) implement EGL's semantic of undefined depth/stencil 1039 * contents after a swapbuffers. This allows a tiled renderer (for 1040 * example) to not store the depth buffer. 1041 * 1042 * (2) implement GL's InvalidateBufferData. For backwards compatibility, 1043 * you must only rely on the usability for this purpose when 1044 * PIPE_CAP_INVALIDATE_BUFFER is enabled. 1045 */ 1046 void (*invalidate_resource)(struct pipe_context *ctx, 1047 struct pipe_resource *resource); 1048 1049 /** 1050 * Return information about unexpected device resets. 1051 */ 1052 enum pipe_reset_status (*get_device_reset_status)(struct pipe_context *ctx); 1053 1054 /** 1055 * Sets the reset status callback. If the pointer is null, then no callback 1056 * is set, otherwise a copy of the data should be made. 1057 */ 1058 void (*set_device_reset_callback)(struct pipe_context *ctx, 1059 const struct pipe_device_reset_callback *cb); 1060 1061 /** 1062 * Dump driver-specific debug information into a stream. This is 1063 * used by debugging tools. 1064 * 1065 * \param ctx pipe context 1066 * \param stream where the output should be written to 1067 * \param flags a mask of PIPE_DUMP_* flags 1068 */ 1069 void (*dump_debug_state)(struct pipe_context *ctx, FILE *stream, 1070 unsigned flags); 1071 1072 /** 1073 * Set the log context to which the driver should write internal debug logs 1074 * (internal states, command streams). 1075 * 1076 * The caller must ensure that the log context is destroyed and reset to 1077 * NULL before the pipe context is destroyed, and that log context functions 1078 * are only called from the driver thread. 1079 * 1080 * \param ctx pipe context 1081 * \param log logging context 1082 */ 1083 void (*set_log_context)(struct pipe_context *ctx, struct u_log_context *log); 1084 1085 /** 1086 * Emit string marker in cmdstream 1087 */ 1088 void (*emit_string_marker)(struct pipe_context *ctx, 1089 const char *string, 1090 int len); 1091 1092 /** 1093 * Generate mipmap. 1094 * \return TRUE if mipmap generation succeeds, FALSE otherwise 1095 */ 1096 bool (*generate_mipmap)(struct pipe_context *ctx, 1097 struct pipe_resource *resource, 1098 enum pipe_format format, 1099 unsigned base_level, 1100 unsigned last_level, 1101 unsigned first_layer, 1102 unsigned last_layer); 1103 1104 /** 1105 * Create a 64-bit texture handle. 1106 * 1107 * \param ctx pipe context 1108 * \param view pipe sampler view object 1109 * \param state pipe sampler state template 1110 * \return a 64-bit texture handle if success, 0 otherwise 1111 */ 1112 uint64_t (*create_texture_handle)(struct pipe_context *ctx, 1113 struct pipe_sampler_view *view, 1114 const struct pipe_sampler_state *state); 1115 1116 /** 1117 * Delete a texture handle. 1118 * 1119 * \param ctx pipe context 1120 * \param handle 64-bit texture handle 1121 */ 1122 void (*delete_texture_handle)(struct pipe_context *ctx, uint64_t handle); 1123 1124 /** 1125 * Make a texture handle resident. 1126 * 1127 * \param ctx pipe context 1128 * \param handle 64-bit texture handle 1129 * \param resident TRUE for resident, FALSE otherwise 1130 */ 1131 void (*make_texture_handle_resident)(struct pipe_context *ctx, 1132 uint64_t handle, bool resident); 1133 1134 /** 1135 * Create a 64-bit image handle. 1136 * 1137 * \param ctx pipe context 1138 * \param image pipe image view template 1139 * \return a 64-bit image handle if success, 0 otherwise 1140 */ 1141 uint64_t (*create_image_handle)(struct pipe_context *ctx, 1142 const struct pipe_image_view *image); 1143 1144 /** 1145 * Delete an image handle. 1146 * 1147 * \param ctx pipe context 1148 * \param handle 64-bit image handle 1149 */ 1150 void (*delete_image_handle)(struct pipe_context *ctx, uint64_t handle); 1151 1152 /** 1153 * Make an image handle resident. 1154 * 1155 * \param ctx pipe context 1156 * \param handle 64-bit image handle 1157 * \param access GL_READ_ONLY, GL_WRITE_ONLY or GL_READ_WRITE 1158 * \param resident TRUE for resident, FALSE otherwise 1159 */ 1160 void (*make_image_handle_resident)(struct pipe_context *ctx, uint64_t handle, 1161 unsigned access, bool resident); 1162 1163 /** 1164 * Call the given function from the driver thread. 1165 * 1166 * This is set by threaded contexts for use by debugging wrappers. 1167 * 1168 * \param asap if true, run the callback immediately if there are no pending 1169 * commands to be processed by the driver thread 1170 */ 1171 void (*callback)(struct pipe_context *ctx, void (*fn)(void *), void *data, 1172 bool asap); 1173 1174 /** 1175 * Set a context parameter See enum pipe_context_param for more details. 1176 */ 1177 void (*set_context_param)(struct pipe_context *ctx, 1178 enum pipe_context_param param, 1179 unsigned value); 1180 1181 /** 1182 * Creates a video buffer as decoding target, with modifiers. 1183 */ 1184 struct pipe_video_buffer *(*create_video_buffer_with_modifiers)(struct pipe_context *context, 1185 const struct pipe_video_buffer *templat, 1186 const uint64_t *modifiers, 1187 unsigned int modifiers_count); 1188 }; 1189 1190 1191 #ifdef __cplusplus 1192 } 1193 #endif 1194 1195 #endif /* PIPE_CONTEXT_H */ 1196