1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * 4 * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. 5 * 6 * This program is free software and is provided to you under the terms of the 7 * GNU General Public License version 2 as published by the Free Software 8 * Foundation, and any use by you of this program is subject to the terms 9 * of such GNU license. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 */ 21 22 /* 23 * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py. 24 * DO NOT EDIT. 25 */ 26 27 #if !defined(_KBASE_TRACEPOINTS_H) 28 #define _KBASE_TRACEPOINTS_H 29 30 /* Tracepoints are abstract callbacks notifying that some important 31 * software or hardware event has happened. 32 * 33 * In this particular implementation, it results into a MIPE 34 * timeline event and, in some cases, it also fires an ftrace event 35 * (a.k.a. Gator events, see details below). 36 */ 37 38 #include "mali_kbase.h" 39 #include "mali_kbase_gator.h" 40 41 #include <linux/types.h> 42 #include <linux/atomic.h> 43 44 /* clang-format off */ 45 46 struct kbase_tlstream; 47 48 extern const size_t __obj_stream_offset; 49 extern const size_t __aux_stream_offset; 50 51 /* This macro dispatches a kbase_tlstream from 52 * a kbase_device instance. Only AUX or OBJ 53 * streams can be dispatched. It is aware of 54 * kbase_timeline binary representation and 55 * relies on offset variables: 56 * __obj_stream_offset and __aux_stream_offset. 57 */ 58 #define __TL_DISPATCH_STREAM(kbdev, stype) \ 59 ((struct kbase_tlstream *) \ 60 ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset)) 61 62 struct tp_desc; 63 64 /* Descriptors of timeline messages transmitted in object events stream. */ 65 extern const char *obj_desc_header; 66 extern const size_t obj_desc_header_size; 67 /* Descriptors of timeline messages transmitted in auxiliary events stream. */ 68 extern const char *aux_desc_header; 69 extern const size_t aux_desc_header_size; 70 71 #define TL_ATOM_STATE_IDLE 0 72 #define TL_ATOM_STATE_READY 1 73 #define TL_ATOM_STATE_DONE 2 74 #define TL_ATOM_STATE_POSTED 3 75 76 #define TL_JS_EVENT_START GATOR_JOB_SLOT_START 77 #define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP 78 #define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED 79 80 #define TLSTREAM_ENABLED (1 << 31) 81 82 void __kbase_tlstream_tl_new_ctx( 83 struct kbase_tlstream *stream, 84 const void *ctx, 85 u32 ctx_nr, 86 u32 tgid); 87 void __kbase_tlstream_tl_new_gpu( 88 struct kbase_tlstream *stream, 89 const void *gpu, 90 u32 gpu_id, 91 u32 core_count); 92 void __kbase_tlstream_tl_new_lpu( 93 struct kbase_tlstream *stream, 94 const void *lpu, 95 u32 lpu_nr, 96 u32 lpu_fn); 97 void __kbase_tlstream_tl_new_atom( 98 struct kbase_tlstream *stream, 99 const void *atom, 100 u32 atom_nr); 101 void __kbase_tlstream_tl_new_as( 102 struct kbase_tlstream *stream, 103 const void *address_space, 104 u32 as_nr); 105 void __kbase_tlstream_tl_del_ctx( 106 struct kbase_tlstream *stream, 107 const void *ctx); 108 void __kbase_tlstream_tl_del_atom( 109 struct kbase_tlstream *stream, 110 const void *atom); 111 void __kbase_tlstream_tl_lifelink_lpu_gpu( 112 struct kbase_tlstream *stream, 113 const void *lpu, 114 const void *gpu); 115 void __kbase_tlstream_tl_lifelink_as_gpu( 116 struct kbase_tlstream *stream, 117 const void *address_space, 118 const void *gpu); 119 void __kbase_tlstream_tl_ret_ctx_lpu( 120 struct kbase_tlstream *stream, 121 const void *ctx, 122 const void *lpu); 123 void __kbase_tlstream_tl_ret_atom_ctx( 124 struct kbase_tlstream *stream, 125 const void *atom, 126 const void *ctx); 127 void __kbase_tlstream_tl_ret_atom_lpu( 128 struct kbase_tlstream *stream, 129 const void *atom, 130 const void *lpu, 131 const char *attrib_match_list); 132 void __kbase_tlstream_tl_nret_ctx_lpu( 133 struct kbase_tlstream *stream, 134 const void *ctx, 135 const void *lpu); 136 void __kbase_tlstream_tl_nret_atom_ctx( 137 struct kbase_tlstream *stream, 138 const void *atom, 139 const void *ctx); 140 void __kbase_tlstream_tl_nret_atom_lpu( 141 struct kbase_tlstream *stream, 142 const void *atom, 143 const void *lpu); 144 void __kbase_tlstream_tl_ret_as_ctx( 145 struct kbase_tlstream *stream, 146 const void *address_space, 147 const void *ctx); 148 void __kbase_tlstream_tl_nret_as_ctx( 149 struct kbase_tlstream *stream, 150 const void *address_space, 151 const void *ctx); 152 void __kbase_tlstream_tl_ret_atom_as( 153 struct kbase_tlstream *stream, 154 const void *atom, 155 const void *address_space); 156 void __kbase_tlstream_tl_nret_atom_as( 157 struct kbase_tlstream *stream, 158 const void *atom, 159 const void *address_space); 160 void __kbase_tlstream_tl_attrib_atom_config( 161 struct kbase_tlstream *stream, 162 const void *atom, 163 u64 descriptor, 164 u64 affinity, 165 u32 config); 166 void __kbase_tlstream_tl_attrib_atom_priority( 167 struct kbase_tlstream *stream, 168 const void *atom, 169 u32 prio); 170 void __kbase_tlstream_tl_attrib_atom_state( 171 struct kbase_tlstream *stream, 172 const void *atom, 173 u32 state); 174 void __kbase_tlstream_tl_attrib_atom_prioritized( 175 struct kbase_tlstream *stream, 176 const void *atom); 177 void __kbase_tlstream_tl_attrib_atom_jit( 178 struct kbase_tlstream *stream, 179 const void *atom, 180 u64 edit_addr, 181 u64 new_addr, 182 u32 jit_flags, 183 u64 mem_flags, 184 u32 j_id, 185 u64 com_pgs, 186 u64 extent, 187 u64 va_pgs); 188 void __kbase_tlstream_tl_jit_usedpages( 189 struct kbase_tlstream *stream, 190 u64 used_pages, 191 u32 j_id); 192 void __kbase_tlstream_tl_attrib_atom_jitallocinfo( 193 struct kbase_tlstream *stream, 194 const void *atom, 195 u64 va_pgs, 196 u64 com_pgs, 197 u64 extent, 198 u32 j_id, 199 u32 bin_id, 200 u32 max_allocs, 201 u32 jit_flags, 202 u32 usg_id); 203 void __kbase_tlstream_tl_attrib_atom_jitfreeinfo( 204 struct kbase_tlstream *stream, 205 const void *atom, 206 u32 j_id); 207 void __kbase_tlstream_tl_attrib_as_config( 208 struct kbase_tlstream *stream, 209 const void *address_space, 210 u64 transtab, 211 u64 memattr, 212 u64 transcfg); 213 void __kbase_tlstream_tl_event_lpu_softstop( 214 struct kbase_tlstream *stream, 215 const void *lpu); 216 void __kbase_tlstream_tl_event_atom_softstop_ex( 217 struct kbase_tlstream *stream, 218 const void *atom); 219 void __kbase_tlstream_tl_event_atom_softstop_issue( 220 struct kbase_tlstream *stream, 221 const void *atom); 222 void __kbase_tlstream_tl_event_atom_softjob_start( 223 struct kbase_tlstream *stream, 224 const void *atom); 225 void __kbase_tlstream_tl_event_atom_softjob_end( 226 struct kbase_tlstream *stream, 227 const void *atom); 228 void __kbase_tlstream_tl_arbiter_granted( 229 struct kbase_tlstream *stream, 230 const void *gpu); 231 void __kbase_tlstream_tl_arbiter_started( 232 struct kbase_tlstream *stream, 233 const void *gpu); 234 void __kbase_tlstream_tl_arbiter_stop_requested( 235 struct kbase_tlstream *stream, 236 const void *gpu); 237 void __kbase_tlstream_tl_arbiter_stopped( 238 struct kbase_tlstream *stream, 239 const void *gpu); 240 void __kbase_tlstream_tl_arbiter_requested( 241 struct kbase_tlstream *stream, 242 const void *gpu); 243 void __kbase_tlstream_jd_gpu_soft_reset( 244 struct kbase_tlstream *stream, 245 const void *gpu); 246 void __kbase_tlstream_aux_pm_state( 247 struct kbase_tlstream *stream, 248 u32 core_type, 249 u64 core_state_bitset); 250 void __kbase_tlstream_aux_pagefault( 251 struct kbase_tlstream *stream, 252 u32 ctx_nr, 253 u32 as_nr, 254 u64 page_cnt_change); 255 void __kbase_tlstream_aux_pagesalloc( 256 struct kbase_tlstream *stream, 257 u32 ctx_nr, 258 u64 page_cnt); 259 void __kbase_tlstream_aux_devfreq_target( 260 struct kbase_tlstream *stream, 261 u64 target_freq); 262 void __kbase_tlstream_aux_protected_enter_start( 263 struct kbase_tlstream *stream, 264 const void *gpu); 265 void __kbase_tlstream_aux_protected_enter_end( 266 struct kbase_tlstream *stream, 267 const void *gpu); 268 void __kbase_tlstream_aux_protected_leave_start( 269 struct kbase_tlstream *stream, 270 const void *gpu); 271 void __kbase_tlstream_aux_protected_leave_end( 272 struct kbase_tlstream *stream, 273 const void *gpu); 274 void __kbase_tlstream_aux_jit_stats( 275 struct kbase_tlstream *stream, 276 u32 ctx_nr, 277 u32 bid, 278 u32 max_allocs, 279 u32 allocs, 280 u32 va_pages, 281 u32 ph_pages); 282 void __kbase_tlstream_aux_tiler_heap_stats( 283 struct kbase_tlstream *stream, 284 u32 ctx_nr, 285 u64 heap_id, 286 u32 va_pages, 287 u32 ph_pages, 288 u32 max_chunks, 289 u32 chunk_size, 290 u32 chunk_count, 291 u32 target_in_flight, 292 u32 nr_in_flight); 293 void __kbase_tlstream_aux_event_job_slot( 294 struct kbase_tlstream *stream, 295 const void *ctx, 296 u32 slot_nr, 297 u32 atom_nr, 298 u32 event); 299 void __kbase_tlstream_aux_mmu_command( 300 struct kbase_tlstream *stream, 301 u32 kernel_ctx_id, 302 u32 mmu_cmd_id, 303 u32 mmu_synchronicity, 304 u64 mmu_lock_addr, 305 u32 mmu_lock_page_num); 306 void __kbase_tlstream_tl_kbase_new_device( 307 struct kbase_tlstream *stream, 308 u32 kbase_device_id, 309 u32 kbase_device_gpu_core_count, 310 u32 kbase_device_max_num_csgs, 311 u32 kbase_device_as_count, 312 u32 kbase_device_sb_entry_count, 313 u32 kbase_device_has_cross_stream_sync, 314 u32 kbase_device_supports_gpu_sleep); 315 void __kbase_tlstream_tl_kbase_device_program_csg( 316 struct kbase_tlstream *stream, 317 u32 kbase_device_id, 318 u32 kernel_ctx_id, 319 u32 gpu_cmdq_grp_handle, 320 u32 kbase_device_csg_slot_index, 321 u32 kbase_device_csg_slot_resumed); 322 void __kbase_tlstream_tl_kbase_device_deprogram_csg( 323 struct kbase_tlstream *stream, 324 u32 kbase_device_id, 325 u32 kbase_device_csg_slot_index); 326 void __kbase_tlstream_tl_kbase_device_halt_csg( 327 struct kbase_tlstream *stream, 328 u32 kbase_device_id, 329 u32 kbase_device_csg_slot_index); 330 void __kbase_tlstream_tl_kbase_new_ctx( 331 struct kbase_tlstream *stream, 332 u32 kernel_ctx_id, 333 u32 kbase_device_id); 334 void __kbase_tlstream_tl_kbase_del_ctx( 335 struct kbase_tlstream *stream, 336 u32 kernel_ctx_id); 337 void __kbase_tlstream_tl_kbase_ctx_assign_as( 338 struct kbase_tlstream *stream, 339 u32 kernel_ctx_id, 340 u32 kbase_device_as_index); 341 void __kbase_tlstream_tl_kbase_ctx_unassign_as( 342 struct kbase_tlstream *stream, 343 u32 kernel_ctx_id); 344 void __kbase_tlstream_tl_kbase_new_kcpuqueue( 345 struct kbase_tlstream *stream, 346 const void *kcpu_queue, 347 u32 kernel_ctx_id, 348 u32 kcpuq_num_pending_cmds); 349 void __kbase_tlstream_tl_kbase_del_kcpuqueue( 350 struct kbase_tlstream *stream, 351 const void *kcpu_queue); 352 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( 353 struct kbase_tlstream *stream, 354 const void *kcpu_queue, 355 const void *fence); 356 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( 357 struct kbase_tlstream *stream, 358 const void *kcpu_queue, 359 const void *fence); 360 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait( 361 struct kbase_tlstream *stream, 362 const void *kcpu_queue, 363 u64 cqs_obj_gpu_addr, 364 u32 cqs_obj_compare_value, 365 u32 cqs_obj_inherit_error); 366 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set( 367 struct kbase_tlstream *stream, 368 const void *kcpu_queue, 369 u64 cqs_obj_gpu_addr); 370 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( 371 struct kbase_tlstream *stream, 372 const void *kcpu_queue, 373 u64 map_import_buf_gpu_addr); 374 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( 375 struct kbase_tlstream *stream, 376 const void *kcpu_queue, 377 u64 map_import_buf_gpu_addr); 378 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( 379 struct kbase_tlstream *stream, 380 const void *kcpu_queue, 381 u64 map_import_buf_gpu_addr); 382 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier( 383 struct kbase_tlstream *stream, 384 const void *kcpu_queue); 385 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend( 386 struct kbase_tlstream *stream, 387 const void *kcpu_queue, 388 const void *group_suspend_buf, 389 u32 gpu_cmdq_grp_handle); 390 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( 391 struct kbase_tlstream *stream, 392 const void *kcpu_queue); 393 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( 394 struct kbase_tlstream *stream, 395 const void *kcpu_queue, 396 u64 jit_alloc_gpu_alloc_addr_dest, 397 u64 jit_alloc_va_pages, 398 u64 jit_alloc_commit_pages, 399 u64 jit_alloc_extent, 400 u32 jit_alloc_jit_id, 401 u32 jit_alloc_bin_id, 402 u32 jit_alloc_max_allocations, 403 u32 jit_alloc_flags, 404 u32 jit_alloc_usage_id); 405 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( 406 struct kbase_tlstream *stream, 407 const void *kcpu_queue); 408 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( 409 struct kbase_tlstream *stream, 410 const void *kcpu_queue); 411 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( 412 struct kbase_tlstream *stream, 413 const void *kcpu_queue, 414 u32 jit_alloc_jit_id); 415 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( 416 struct kbase_tlstream *stream, 417 const void *kcpu_queue); 418 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( 419 struct kbase_tlstream *stream, 420 const void *kcpu_queue); 421 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( 422 struct kbase_tlstream *stream, 423 const void *kcpu_queue, 424 u32 execute_error); 425 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( 426 struct kbase_tlstream *stream, 427 const void *kcpu_queue); 428 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( 429 struct kbase_tlstream *stream, 430 const void *kcpu_queue, 431 u32 execute_error); 432 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( 433 struct kbase_tlstream *stream, 434 const void *kcpu_queue); 435 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( 436 struct kbase_tlstream *stream, 437 const void *kcpu_queue, 438 u32 execute_error); 439 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( 440 struct kbase_tlstream *stream, 441 const void *kcpu_queue, 442 u32 execute_error); 443 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( 444 struct kbase_tlstream *stream, 445 const void *kcpu_queue); 446 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( 447 struct kbase_tlstream *stream, 448 const void *kcpu_queue, 449 u32 execute_error); 450 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( 451 struct kbase_tlstream *stream, 452 const void *kcpu_queue); 453 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( 454 struct kbase_tlstream *stream, 455 const void *kcpu_queue, 456 u32 execute_error); 457 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( 458 struct kbase_tlstream *stream, 459 const void *kcpu_queue); 460 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( 461 struct kbase_tlstream *stream, 462 const void *kcpu_queue, 463 u32 execute_error); 464 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( 465 struct kbase_tlstream *stream, 466 const void *kcpu_queue); 467 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( 468 struct kbase_tlstream *stream, 469 const void *kcpu_queue); 470 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( 471 struct kbase_tlstream *stream, 472 const void *kcpu_queue, 473 u32 execute_error, 474 u64 jit_alloc_gpu_alloc_addr, 475 u64 jit_alloc_mmu_flags); 476 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( 477 struct kbase_tlstream *stream, 478 const void *kcpu_queue); 479 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( 480 struct kbase_tlstream *stream, 481 const void *kcpu_queue); 482 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( 483 struct kbase_tlstream *stream, 484 const void *kcpu_queue); 485 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( 486 struct kbase_tlstream *stream, 487 const void *kcpu_queue, 488 u32 execute_error, 489 u64 jit_free_pages_used); 490 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( 491 struct kbase_tlstream *stream, 492 const void *kcpu_queue); 493 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier( 494 struct kbase_tlstream *stream, 495 const void *kcpu_queue); 496 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start( 497 struct kbase_tlstream *stream, 498 const void *kcpu_queue); 499 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end( 500 struct kbase_tlstream *stream, 501 const void *kcpu_queue, 502 u32 execute_error); 503 void __kbase_tlstream_tl_kbase_csffw_fw_reloading( 504 struct kbase_tlstream *stream, 505 u64 csffw_cycle); 506 void __kbase_tlstream_tl_kbase_csffw_fw_enabling( 507 struct kbase_tlstream *stream, 508 u64 csffw_cycle); 509 void __kbase_tlstream_tl_kbase_csffw_fw_request_sleep( 510 struct kbase_tlstream *stream, 511 u64 csffw_cycle); 512 void __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup( 513 struct kbase_tlstream *stream, 514 u64 csffw_cycle); 515 void __kbase_tlstream_tl_kbase_csffw_fw_request_halt( 516 struct kbase_tlstream *stream, 517 u64 csffw_cycle); 518 void __kbase_tlstream_tl_kbase_csffw_fw_disabling( 519 struct kbase_tlstream *stream, 520 u64 csffw_cycle); 521 void __kbase_tlstream_tl_kbase_csffw_fw_off( 522 struct kbase_tlstream *stream, 523 u64 csffw_cycle); 524 void __kbase_tlstream_tl_kbase_csffw_tlstream_overflow( 525 struct kbase_tlstream *stream, 526 u64 csffw_timestamp, 527 u64 csffw_cycle); 528 void __kbase_tlstream_tl_js_sched_start( 529 struct kbase_tlstream *stream, 530 u32 dummy); 531 void __kbase_tlstream_tl_js_sched_end( 532 struct kbase_tlstream *stream, 533 u32 dummy); 534 void __kbase_tlstream_tl_jd_submit_atom_start( 535 struct kbase_tlstream *stream, 536 const void *atom); 537 void __kbase_tlstream_tl_jd_submit_atom_end( 538 struct kbase_tlstream *stream, 539 const void *atom); 540 void __kbase_tlstream_tl_jd_done_no_lock_start( 541 struct kbase_tlstream *stream, 542 const void *atom); 543 void __kbase_tlstream_tl_jd_done_no_lock_end( 544 struct kbase_tlstream *stream, 545 const void *atom); 546 void __kbase_tlstream_tl_jd_done_start( 547 struct kbase_tlstream *stream, 548 const void *atom); 549 void __kbase_tlstream_tl_jd_done_end( 550 struct kbase_tlstream *stream, 551 const void *atom); 552 void __kbase_tlstream_tl_jd_atom_complete( 553 struct kbase_tlstream *stream, 554 const void *atom); 555 void __kbase_tlstream_tl_run_atom_start( 556 struct kbase_tlstream *stream, 557 const void *atom, 558 u32 atom_nr); 559 void __kbase_tlstream_tl_run_atom_end( 560 struct kbase_tlstream *stream, 561 const void *atom, 562 u32 atom_nr); 563 564 struct kbase_tlstream; 565 566 /** 567 * KBASE_TLSTREAM_TL_NEW_CTX - 568 * object ctx is created 569 * 570 * @kbdev: Kbase device 571 * @ctx: Name of the context object 572 * @ctx_nr: Kernel context number 573 * @tgid: Thread Group Id 574 */ 575 #define KBASE_TLSTREAM_TL_NEW_CTX( \ 576 kbdev, \ 577 ctx, \ 578 ctx_nr, \ 579 tgid \ 580 ) \ 581 do { \ 582 int enabled = atomic_read(&kbdev->timeline_flags); \ 583 if (enabled & TLSTREAM_ENABLED) \ 584 __kbase_tlstream_tl_new_ctx( \ 585 __TL_DISPATCH_STREAM(kbdev, obj), \ 586 ctx, ctx_nr, tgid); \ 587 } while (0) 588 589 /** 590 * KBASE_TLSTREAM_TL_NEW_GPU - 591 * object gpu is created 592 * 593 * @kbdev: Kbase device 594 * @gpu: Name of the GPU object 595 * @gpu_id: Name of the GPU object 596 * @core_count: Number of cores this GPU hosts 597 */ 598 #define KBASE_TLSTREAM_TL_NEW_GPU( \ 599 kbdev, \ 600 gpu, \ 601 gpu_id, \ 602 core_count \ 603 ) \ 604 do { \ 605 int enabled = atomic_read(&kbdev->timeline_flags); \ 606 if (enabled & TLSTREAM_ENABLED) \ 607 __kbase_tlstream_tl_new_gpu( \ 608 __TL_DISPATCH_STREAM(kbdev, obj), \ 609 gpu, gpu_id, core_count); \ 610 } while (0) 611 612 /** 613 * KBASE_TLSTREAM_TL_NEW_LPU - 614 * object lpu is created 615 * 616 * @kbdev: Kbase device 617 * @lpu: Name of the Logical Processing Unit object 618 * @lpu_nr: Sequential number assigned to the newly created LPU 619 * @lpu_fn: Property describing functional abilities of this LPU 620 */ 621 #define KBASE_TLSTREAM_TL_NEW_LPU( \ 622 kbdev, \ 623 lpu, \ 624 lpu_nr, \ 625 lpu_fn \ 626 ) \ 627 do { \ 628 int enabled = atomic_read(&kbdev->timeline_flags); \ 629 if (enabled & TLSTREAM_ENABLED) \ 630 __kbase_tlstream_tl_new_lpu( \ 631 __TL_DISPATCH_STREAM(kbdev, obj), \ 632 lpu, lpu_nr, lpu_fn); \ 633 } while (0) 634 635 /** 636 * KBASE_TLSTREAM_TL_NEW_ATOM - 637 * object atom is created 638 * 639 * @kbdev: Kbase device 640 * @atom: Atom identifier 641 * @atom_nr: Sequential number of an atom 642 */ 643 #define KBASE_TLSTREAM_TL_NEW_ATOM( \ 644 kbdev, \ 645 atom, \ 646 atom_nr \ 647 ) \ 648 do { \ 649 int enabled = atomic_read(&kbdev->timeline_flags); \ 650 if (enabled & TLSTREAM_ENABLED) \ 651 __kbase_tlstream_tl_new_atom( \ 652 __TL_DISPATCH_STREAM(kbdev, obj), \ 653 atom, atom_nr); \ 654 } while (0) 655 656 /** 657 * KBASE_TLSTREAM_TL_NEW_AS - 658 * address space object is created 659 * 660 * @kbdev: Kbase device 661 * @address_space: Name of the address space object 662 * @as_nr: Address space number 663 */ 664 #define KBASE_TLSTREAM_TL_NEW_AS( \ 665 kbdev, \ 666 address_space, \ 667 as_nr \ 668 ) \ 669 do { \ 670 int enabled = atomic_read(&kbdev->timeline_flags); \ 671 if (enabled & TLSTREAM_ENABLED) \ 672 __kbase_tlstream_tl_new_as( \ 673 __TL_DISPATCH_STREAM(kbdev, obj), \ 674 address_space, as_nr); \ 675 } while (0) 676 677 /** 678 * KBASE_TLSTREAM_TL_DEL_CTX - 679 * context is destroyed 680 * 681 * @kbdev: Kbase device 682 * @ctx: Name of the context object 683 */ 684 #define KBASE_TLSTREAM_TL_DEL_CTX( \ 685 kbdev, \ 686 ctx \ 687 ) \ 688 do { \ 689 int enabled = atomic_read(&kbdev->timeline_flags); \ 690 if (enabled & TLSTREAM_ENABLED) \ 691 __kbase_tlstream_tl_del_ctx( \ 692 __TL_DISPATCH_STREAM(kbdev, obj), \ 693 ctx); \ 694 } while (0) 695 696 /** 697 * KBASE_TLSTREAM_TL_DEL_ATOM - 698 * atom is destroyed 699 * 700 * @kbdev: Kbase device 701 * @atom: Atom identifier 702 */ 703 #define KBASE_TLSTREAM_TL_DEL_ATOM( \ 704 kbdev, \ 705 atom \ 706 ) \ 707 do { \ 708 int enabled = atomic_read(&kbdev->timeline_flags); \ 709 if (enabled & TLSTREAM_ENABLED) \ 710 __kbase_tlstream_tl_del_atom( \ 711 __TL_DISPATCH_STREAM(kbdev, obj), \ 712 atom); \ 713 } while (0) 714 715 /** 716 * KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU - 717 * lpu is deleted with gpu 718 * 719 * @kbdev: Kbase device 720 * @lpu: Name of the Logical Processing Unit object 721 * @gpu: Name of the GPU object 722 */ 723 #define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU( \ 724 kbdev, \ 725 lpu, \ 726 gpu \ 727 ) \ 728 do { \ 729 int enabled = atomic_read(&kbdev->timeline_flags); \ 730 if (enabled & TLSTREAM_ENABLED) \ 731 __kbase_tlstream_tl_lifelink_lpu_gpu( \ 732 __TL_DISPATCH_STREAM(kbdev, obj), \ 733 lpu, gpu); \ 734 } while (0) 735 736 /** 737 * KBASE_TLSTREAM_TL_LIFELINK_AS_GPU - 738 * address space is deleted with gpu 739 * 740 * @kbdev: Kbase device 741 * @address_space: Name of the address space object 742 * @gpu: Name of the GPU object 743 */ 744 #define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU( \ 745 kbdev, \ 746 address_space, \ 747 gpu \ 748 ) \ 749 do { \ 750 int enabled = atomic_read(&kbdev->timeline_flags); \ 751 if (enabled & TLSTREAM_ENABLED) \ 752 __kbase_tlstream_tl_lifelink_as_gpu( \ 753 __TL_DISPATCH_STREAM(kbdev, obj), \ 754 address_space, gpu); \ 755 } while (0) 756 757 /** 758 * KBASE_TLSTREAM_TL_RET_CTX_LPU - 759 * context is retained by lpu 760 * 761 * @kbdev: Kbase device 762 * @ctx: Name of the context object 763 * @lpu: Name of the Logical Processing Unit object 764 */ 765 #define KBASE_TLSTREAM_TL_RET_CTX_LPU( \ 766 kbdev, \ 767 ctx, \ 768 lpu \ 769 ) \ 770 do { \ 771 int enabled = atomic_read(&kbdev->timeline_flags); \ 772 if (enabled & TLSTREAM_ENABLED) \ 773 __kbase_tlstream_tl_ret_ctx_lpu( \ 774 __TL_DISPATCH_STREAM(kbdev, obj), \ 775 ctx, lpu); \ 776 } while (0) 777 778 /** 779 * KBASE_TLSTREAM_TL_RET_ATOM_CTX - 780 * atom is retained by context 781 * 782 * @kbdev: Kbase device 783 * @atom: Atom identifier 784 * @ctx: Name of the context object 785 */ 786 #define KBASE_TLSTREAM_TL_RET_ATOM_CTX( \ 787 kbdev, \ 788 atom, \ 789 ctx \ 790 ) \ 791 do { \ 792 int enabled = atomic_read(&kbdev->timeline_flags); \ 793 if (enabled & TLSTREAM_ENABLED) \ 794 __kbase_tlstream_tl_ret_atom_ctx( \ 795 __TL_DISPATCH_STREAM(kbdev, obj), \ 796 atom, ctx); \ 797 } while (0) 798 799 /** 800 * KBASE_TLSTREAM_TL_RET_ATOM_LPU - 801 * atom is retained by lpu 802 * 803 * @kbdev: Kbase device 804 * @atom: Atom identifier 805 * @lpu: Name of the Logical Processing Unit object 806 * @attrib_match_list: List containing match operator attributes 807 */ 808 #define KBASE_TLSTREAM_TL_RET_ATOM_LPU( \ 809 kbdev, \ 810 atom, \ 811 lpu, \ 812 attrib_match_list \ 813 ) \ 814 do { \ 815 int enabled = atomic_read(&kbdev->timeline_flags); \ 816 if (enabled & TLSTREAM_ENABLED) \ 817 __kbase_tlstream_tl_ret_atom_lpu( \ 818 __TL_DISPATCH_STREAM(kbdev, obj), \ 819 atom, lpu, attrib_match_list); \ 820 } while (0) 821 822 /** 823 * KBASE_TLSTREAM_TL_NRET_CTX_LPU - 824 * context is released by lpu 825 * 826 * @kbdev: Kbase device 827 * @ctx: Name of the context object 828 * @lpu: Name of the Logical Processing Unit object 829 */ 830 #define KBASE_TLSTREAM_TL_NRET_CTX_LPU( \ 831 kbdev, \ 832 ctx, \ 833 lpu \ 834 ) \ 835 do { \ 836 int enabled = atomic_read(&kbdev->timeline_flags); \ 837 if (enabled & TLSTREAM_ENABLED) \ 838 __kbase_tlstream_tl_nret_ctx_lpu( \ 839 __TL_DISPATCH_STREAM(kbdev, obj), \ 840 ctx, lpu); \ 841 } while (0) 842 843 /** 844 * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - 845 * atom is released by context 846 * 847 * @kbdev: Kbase device 848 * @atom: Atom identifier 849 * @ctx: Name of the context object 850 */ 851 #define KBASE_TLSTREAM_TL_NRET_ATOM_CTX( \ 852 kbdev, \ 853 atom, \ 854 ctx \ 855 ) \ 856 do { \ 857 int enabled = atomic_read(&kbdev->timeline_flags); \ 858 if (enabled & TLSTREAM_ENABLED) \ 859 __kbase_tlstream_tl_nret_atom_ctx( \ 860 __TL_DISPATCH_STREAM(kbdev, obj), \ 861 atom, ctx); \ 862 } while (0) 863 864 /** 865 * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - 866 * atom is released by lpu 867 * 868 * @kbdev: Kbase device 869 * @atom: Atom identifier 870 * @lpu: Name of the Logical Processing Unit object 871 */ 872 #define KBASE_TLSTREAM_TL_NRET_ATOM_LPU( \ 873 kbdev, \ 874 atom, \ 875 lpu \ 876 ) \ 877 do { \ 878 int enabled = atomic_read(&kbdev->timeline_flags); \ 879 if (enabled & TLSTREAM_ENABLED) \ 880 __kbase_tlstream_tl_nret_atom_lpu( \ 881 __TL_DISPATCH_STREAM(kbdev, obj), \ 882 atom, lpu); \ 883 } while (0) 884 885 /** 886 * KBASE_TLSTREAM_TL_RET_AS_CTX - 887 * address space is retained by context 888 * 889 * @kbdev: Kbase device 890 * @address_space: Name of the address space object 891 * @ctx: Name of the context object 892 */ 893 #define KBASE_TLSTREAM_TL_RET_AS_CTX( \ 894 kbdev, \ 895 address_space, \ 896 ctx \ 897 ) \ 898 do { \ 899 int enabled = atomic_read(&kbdev->timeline_flags); \ 900 if (enabled & TLSTREAM_ENABLED) \ 901 __kbase_tlstream_tl_ret_as_ctx( \ 902 __TL_DISPATCH_STREAM(kbdev, obj), \ 903 address_space, ctx); \ 904 } while (0) 905 906 /** 907 * KBASE_TLSTREAM_TL_NRET_AS_CTX - 908 * address space is released by context 909 * 910 * @kbdev: Kbase device 911 * @address_space: Name of the address space object 912 * @ctx: Name of the context object 913 */ 914 #define KBASE_TLSTREAM_TL_NRET_AS_CTX( \ 915 kbdev, \ 916 address_space, \ 917 ctx \ 918 ) \ 919 do { \ 920 int enabled = atomic_read(&kbdev->timeline_flags); \ 921 if (enabled & TLSTREAM_ENABLED) \ 922 __kbase_tlstream_tl_nret_as_ctx( \ 923 __TL_DISPATCH_STREAM(kbdev, obj), \ 924 address_space, ctx); \ 925 } while (0) 926 927 /** 928 * KBASE_TLSTREAM_TL_RET_ATOM_AS - 929 * atom is retained by address space 930 * 931 * @kbdev: Kbase device 932 * @atom: Atom identifier 933 * @address_space: Name of the address space object 934 */ 935 #define KBASE_TLSTREAM_TL_RET_ATOM_AS( \ 936 kbdev, \ 937 atom, \ 938 address_space \ 939 ) \ 940 do { \ 941 int enabled = atomic_read(&kbdev->timeline_flags); \ 942 if (enabled & TLSTREAM_ENABLED) \ 943 __kbase_tlstream_tl_ret_atom_as( \ 944 __TL_DISPATCH_STREAM(kbdev, obj), \ 945 atom, address_space); \ 946 } while (0) 947 948 /** 949 * KBASE_TLSTREAM_TL_NRET_ATOM_AS - 950 * atom is released by address space 951 * 952 * @kbdev: Kbase device 953 * @atom: Atom identifier 954 * @address_space: Name of the address space object 955 */ 956 #define KBASE_TLSTREAM_TL_NRET_ATOM_AS( \ 957 kbdev, \ 958 atom, \ 959 address_space \ 960 ) \ 961 do { \ 962 int enabled = atomic_read(&kbdev->timeline_flags); \ 963 if (enabled & TLSTREAM_ENABLED) \ 964 __kbase_tlstream_tl_nret_atom_as( \ 965 __TL_DISPATCH_STREAM(kbdev, obj), \ 966 atom, address_space); \ 967 } while (0) 968 969 /** 970 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - 971 * atom job slot attributes 972 * 973 * @kbdev: Kbase device 974 * @atom: Atom identifier 975 * @descriptor: Job descriptor address 976 * @affinity: Job affinity 977 * @config: Job config 978 */ 979 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG( \ 980 kbdev, \ 981 atom, \ 982 descriptor, \ 983 affinity, \ 984 config \ 985 ) \ 986 do { \ 987 int enabled = atomic_read(&kbdev->timeline_flags); \ 988 if (enabled & TLSTREAM_ENABLED) \ 989 __kbase_tlstream_tl_attrib_atom_config( \ 990 __TL_DISPATCH_STREAM(kbdev, obj), \ 991 atom, descriptor, affinity, config); \ 992 } while (0) 993 994 /** 995 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - 996 * atom priority 997 * 998 * @kbdev: Kbase device 999 * @atom: Atom identifier 1000 * @prio: Atom priority 1001 */ 1002 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY( \ 1003 kbdev, \ 1004 atom, \ 1005 prio \ 1006 ) \ 1007 do { \ 1008 int enabled = atomic_read(&kbdev->timeline_flags); \ 1009 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1010 __kbase_tlstream_tl_attrib_atom_priority( \ 1011 __TL_DISPATCH_STREAM(kbdev, obj), \ 1012 atom, prio); \ 1013 } while (0) 1014 1015 /** 1016 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - 1017 * atom state 1018 * 1019 * @kbdev: Kbase device 1020 * @atom: Atom identifier 1021 * @state: Atom state 1022 */ 1023 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE( \ 1024 kbdev, \ 1025 atom, \ 1026 state \ 1027 ) \ 1028 do { \ 1029 int enabled = atomic_read(&kbdev->timeline_flags); \ 1030 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1031 __kbase_tlstream_tl_attrib_atom_state( \ 1032 __TL_DISPATCH_STREAM(kbdev, obj), \ 1033 atom, state); \ 1034 } while (0) 1035 1036 /** 1037 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED - 1038 * atom caused priority change 1039 * 1040 * @kbdev: Kbase device 1041 * @atom: Atom identifier 1042 */ 1043 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED( \ 1044 kbdev, \ 1045 atom \ 1046 ) \ 1047 do { \ 1048 int enabled = atomic_read(&kbdev->timeline_flags); \ 1049 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1050 __kbase_tlstream_tl_attrib_atom_prioritized( \ 1051 __TL_DISPATCH_STREAM(kbdev, obj), \ 1052 atom); \ 1053 } while (0) 1054 1055 /** 1056 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - 1057 * jit done for atom 1058 * 1059 * @kbdev: Kbase device 1060 * @atom: Atom identifier 1061 * @edit_addr: Address edited by jit 1062 * @new_addr: Address placed into the edited location 1063 * @jit_flags: Flags specifying the special requirements for 1064 * the JIT allocation. 1065 * @mem_flags: Flags defining the properties of a memory region 1066 * @j_id: Unique ID provided by the caller, this is used 1067 * to pair allocation and free requests. 1068 * @com_pgs: The minimum number of physical pages which 1069 * should back the allocation. 1070 * @extent: Granularity of physical pages to grow the 1071 * allocation by during a fault. 1072 * @va_pgs: The minimum number of virtual pages required 1073 */ 1074 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( \ 1075 kbdev, \ 1076 atom, \ 1077 edit_addr, \ 1078 new_addr, \ 1079 jit_flags, \ 1080 mem_flags, \ 1081 j_id, \ 1082 com_pgs, \ 1083 extent, \ 1084 va_pgs \ 1085 ) \ 1086 do { \ 1087 int enabled = atomic_read(&kbdev->timeline_flags); \ 1088 if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \ 1089 __kbase_tlstream_tl_attrib_atom_jit( \ 1090 __TL_DISPATCH_STREAM(kbdev, obj), \ 1091 atom, edit_addr, new_addr, jit_flags, mem_flags, j_id, com_pgs, extent, va_pgs); \ 1092 } while (0) 1093 1094 /** 1095 * KBASE_TLSTREAM_TL_JIT_USEDPAGES - 1096 * used pages for jit 1097 * 1098 * @kbdev: Kbase device 1099 * @used_pages: Number of pages used for jit 1100 * @j_id: Unique ID provided by the caller, this is used 1101 * to pair allocation and free requests. 1102 */ 1103 #define KBASE_TLSTREAM_TL_JIT_USEDPAGES( \ 1104 kbdev, \ 1105 used_pages, \ 1106 j_id \ 1107 ) \ 1108 do { \ 1109 int enabled = atomic_read(&kbdev->timeline_flags); \ 1110 if (enabled & TLSTREAM_ENABLED) \ 1111 __kbase_tlstream_tl_jit_usedpages( \ 1112 __TL_DISPATCH_STREAM(kbdev, obj), \ 1113 used_pages, j_id); \ 1114 } while (0) 1115 1116 /** 1117 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO - 1118 * Information about JIT allocations 1119 * 1120 * @kbdev: Kbase device 1121 * @atom: Atom identifier 1122 * @va_pgs: The minimum number of virtual pages required 1123 * @com_pgs: The minimum number of physical pages which 1124 * should back the allocation. 1125 * @extent: Granularity of physical pages to grow the 1126 * allocation by during a fault. 1127 * @j_id: Unique ID provided by the caller, this is used 1128 * to pair allocation and free requests. 1129 * @bin_id: The JIT allocation bin, used in conjunction with 1130 * max_allocations to limit the number of each 1131 * type of JIT allocation. 1132 * @max_allocs: Maximum allocations allowed in this bin. 1133 * @jit_flags: Flags specifying the special requirements for 1134 * the JIT allocation. 1135 * @usg_id: A hint about which allocation should be reused. 1136 */ 1137 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO( \ 1138 kbdev, \ 1139 atom, \ 1140 va_pgs, \ 1141 com_pgs, \ 1142 extent, \ 1143 j_id, \ 1144 bin_id, \ 1145 max_allocs, \ 1146 jit_flags, \ 1147 usg_id \ 1148 ) \ 1149 do { \ 1150 int enabled = atomic_read(&kbdev->timeline_flags); \ 1151 if (enabled & TLSTREAM_ENABLED) \ 1152 __kbase_tlstream_tl_attrib_atom_jitallocinfo( \ 1153 __TL_DISPATCH_STREAM(kbdev, obj), \ 1154 atom, va_pgs, com_pgs, extent, j_id, bin_id, max_allocs, jit_flags, usg_id); \ 1155 } while (0) 1156 1157 /** 1158 * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO - 1159 * Information about JIT frees 1160 * 1161 * @kbdev: Kbase device 1162 * @atom: Atom identifier 1163 * @j_id: Unique ID provided by the caller, this is used 1164 * to pair allocation and free requests. 1165 */ 1166 #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO( \ 1167 kbdev, \ 1168 atom, \ 1169 j_id \ 1170 ) \ 1171 do { \ 1172 int enabled = atomic_read(&kbdev->timeline_flags); \ 1173 if (enabled & TLSTREAM_ENABLED) \ 1174 __kbase_tlstream_tl_attrib_atom_jitfreeinfo( \ 1175 __TL_DISPATCH_STREAM(kbdev, obj), \ 1176 atom, j_id); \ 1177 } while (0) 1178 1179 /** 1180 * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - 1181 * address space attributes 1182 * 1183 * @kbdev: Kbase device 1184 * @address_space: Name of the address space object 1185 * @transtab: Configuration of the TRANSTAB register 1186 * @memattr: Configuration of the MEMATTR register 1187 * @transcfg: Configuration of the TRANSCFG register (or zero if not present) 1188 */ 1189 #define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG( \ 1190 kbdev, \ 1191 address_space, \ 1192 transtab, \ 1193 memattr, \ 1194 transcfg \ 1195 ) \ 1196 do { \ 1197 int enabled = atomic_read(&kbdev->timeline_flags); \ 1198 if (enabled & TLSTREAM_ENABLED) \ 1199 __kbase_tlstream_tl_attrib_as_config( \ 1200 __TL_DISPATCH_STREAM(kbdev, obj), \ 1201 address_space, transtab, memattr, transcfg); \ 1202 } while (0) 1203 1204 /** 1205 * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP - 1206 * softstop event on given lpu 1207 * 1208 * @kbdev: Kbase device 1209 * @lpu: Name of the Logical Processing Unit object 1210 */ 1211 #define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( \ 1212 kbdev, \ 1213 lpu \ 1214 ) \ 1215 do { \ 1216 int enabled = atomic_read(&kbdev->timeline_flags); \ 1217 if (enabled & TLSTREAM_ENABLED) \ 1218 __kbase_tlstream_tl_event_lpu_softstop( \ 1219 __TL_DISPATCH_STREAM(kbdev, obj), \ 1220 lpu); \ 1221 } while (0) 1222 1223 /** 1224 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX - 1225 * atom softstopped 1226 * 1227 * @kbdev: Kbase device 1228 * @atom: Atom identifier 1229 */ 1230 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX( \ 1231 kbdev, \ 1232 atom \ 1233 ) \ 1234 do { \ 1235 int enabled = atomic_read(&kbdev->timeline_flags); \ 1236 if (enabled & TLSTREAM_ENABLED) \ 1237 __kbase_tlstream_tl_event_atom_softstop_ex( \ 1238 __TL_DISPATCH_STREAM(kbdev, obj), \ 1239 atom); \ 1240 } while (0) 1241 1242 /** 1243 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE - 1244 * atom softstop issued 1245 * 1246 * @kbdev: Kbase device 1247 * @atom: Atom identifier 1248 */ 1249 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE( \ 1250 kbdev, \ 1251 atom \ 1252 ) \ 1253 do { \ 1254 int enabled = atomic_read(&kbdev->timeline_flags); \ 1255 if (enabled & TLSTREAM_ENABLED) \ 1256 __kbase_tlstream_tl_event_atom_softstop_issue( \ 1257 __TL_DISPATCH_STREAM(kbdev, obj), \ 1258 atom); \ 1259 } while (0) 1260 1261 /** 1262 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START - 1263 * atom soft job has started 1264 * 1265 * @kbdev: Kbase device 1266 * @atom: Atom identifier 1267 */ 1268 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START( \ 1269 kbdev, \ 1270 atom \ 1271 ) \ 1272 do { \ 1273 int enabled = atomic_read(&kbdev->timeline_flags); \ 1274 if (enabled & TLSTREAM_ENABLED) \ 1275 __kbase_tlstream_tl_event_atom_softjob_start( \ 1276 __TL_DISPATCH_STREAM(kbdev, obj), \ 1277 atom); \ 1278 } while (0) 1279 1280 /** 1281 * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END - 1282 * atom soft job has completed 1283 * 1284 * @kbdev: Kbase device 1285 * @atom: Atom identifier 1286 */ 1287 #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END( \ 1288 kbdev, \ 1289 atom \ 1290 ) \ 1291 do { \ 1292 int enabled = atomic_read(&kbdev->timeline_flags); \ 1293 if (enabled & TLSTREAM_ENABLED) \ 1294 __kbase_tlstream_tl_event_atom_softjob_end( \ 1295 __TL_DISPATCH_STREAM(kbdev, obj), \ 1296 atom); \ 1297 } while (0) 1298 1299 /** 1300 * KBASE_TLSTREAM_TL_ARBITER_GRANTED - 1301 * Arbiter has granted gpu access 1302 * 1303 * @kbdev: Kbase device 1304 * @gpu: Name of the GPU object 1305 */ 1306 #define KBASE_TLSTREAM_TL_ARBITER_GRANTED( \ 1307 kbdev, \ 1308 gpu \ 1309 ) \ 1310 do { \ 1311 int enabled = atomic_read(&kbdev->timeline_flags); \ 1312 if (enabled & TLSTREAM_ENABLED) \ 1313 __kbase_tlstream_tl_arbiter_granted( \ 1314 __TL_DISPATCH_STREAM(kbdev, obj), \ 1315 gpu); \ 1316 } while (0) 1317 1318 /** 1319 * KBASE_TLSTREAM_TL_ARBITER_STARTED - 1320 * Driver is running again and able to process jobs 1321 * 1322 * @kbdev: Kbase device 1323 * @gpu: Name of the GPU object 1324 */ 1325 #define KBASE_TLSTREAM_TL_ARBITER_STARTED( \ 1326 kbdev, \ 1327 gpu \ 1328 ) \ 1329 do { \ 1330 int enabled = atomic_read(&kbdev->timeline_flags); \ 1331 if (enabled & TLSTREAM_ENABLED) \ 1332 __kbase_tlstream_tl_arbiter_started( \ 1333 __TL_DISPATCH_STREAM(kbdev, obj), \ 1334 gpu); \ 1335 } while (0) 1336 1337 /** 1338 * KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED - 1339 * Arbiter has requested driver to stop using gpu 1340 * 1341 * @kbdev: Kbase device 1342 * @gpu: Name of the GPU object 1343 */ 1344 #define KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED( \ 1345 kbdev, \ 1346 gpu \ 1347 ) \ 1348 do { \ 1349 int enabled = atomic_read(&kbdev->timeline_flags); \ 1350 if (enabled & TLSTREAM_ENABLED) \ 1351 __kbase_tlstream_tl_arbiter_stop_requested( \ 1352 __TL_DISPATCH_STREAM(kbdev, obj), \ 1353 gpu); \ 1354 } while (0) 1355 1356 /** 1357 * KBASE_TLSTREAM_TL_ARBITER_STOPPED - 1358 * Driver has stopped using gpu 1359 * 1360 * @kbdev: Kbase device 1361 * @gpu: Name of the GPU object 1362 */ 1363 #define KBASE_TLSTREAM_TL_ARBITER_STOPPED( \ 1364 kbdev, \ 1365 gpu \ 1366 ) \ 1367 do { \ 1368 int enabled = atomic_read(&kbdev->timeline_flags); \ 1369 if (enabled & TLSTREAM_ENABLED) \ 1370 __kbase_tlstream_tl_arbiter_stopped( \ 1371 __TL_DISPATCH_STREAM(kbdev, obj), \ 1372 gpu); \ 1373 } while (0) 1374 1375 /** 1376 * KBASE_TLSTREAM_TL_ARBITER_REQUESTED - 1377 * Driver has requested the arbiter for gpu access 1378 * 1379 * @kbdev: Kbase device 1380 * @gpu: Name of the GPU object 1381 */ 1382 #define KBASE_TLSTREAM_TL_ARBITER_REQUESTED( \ 1383 kbdev, \ 1384 gpu \ 1385 ) \ 1386 do { \ 1387 int enabled = atomic_read(&kbdev->timeline_flags); \ 1388 if (enabled & TLSTREAM_ENABLED) \ 1389 __kbase_tlstream_tl_arbiter_requested( \ 1390 __TL_DISPATCH_STREAM(kbdev, obj), \ 1391 gpu); \ 1392 } while (0) 1393 1394 /** 1395 * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - 1396 * gpu soft reset 1397 * 1398 * @kbdev: Kbase device 1399 * @gpu: Name of the GPU object 1400 */ 1401 #define KBASE_TLSTREAM_JD_GPU_SOFT_RESET( \ 1402 kbdev, \ 1403 gpu \ 1404 ) \ 1405 do { \ 1406 int enabled = atomic_read(&kbdev->timeline_flags); \ 1407 if (enabled & TLSTREAM_ENABLED) \ 1408 __kbase_tlstream_jd_gpu_soft_reset( \ 1409 __TL_DISPATCH_STREAM(kbdev, obj), \ 1410 gpu); \ 1411 } while (0) 1412 1413 /** 1414 * KBASE_TLSTREAM_AUX_PM_STATE - 1415 * PM state 1416 * 1417 * @kbdev: Kbase device 1418 * @core_type: Core type (shader, tiler, l2 cache, l3 cache) 1419 * @core_state_bitset: 64bits bitmask reporting power state of the cores 1420 * (1-ON, 0-OFF) 1421 */ 1422 #define KBASE_TLSTREAM_AUX_PM_STATE( \ 1423 kbdev, \ 1424 core_type, \ 1425 core_state_bitset \ 1426 ) \ 1427 do { \ 1428 int enabled = atomic_read(&kbdev->timeline_flags); \ 1429 if (enabled & TLSTREAM_ENABLED) \ 1430 __kbase_tlstream_aux_pm_state( \ 1431 __TL_DISPATCH_STREAM(kbdev, aux), \ 1432 core_type, core_state_bitset); \ 1433 } while (0) 1434 1435 /** 1436 * KBASE_TLSTREAM_AUX_PAGEFAULT - 1437 * Page fault 1438 * 1439 * @kbdev: Kbase device 1440 * @ctx_nr: Kernel context number 1441 * @as_nr: Address space number 1442 * @page_cnt_change: Number of pages to be added 1443 */ 1444 #define KBASE_TLSTREAM_AUX_PAGEFAULT( \ 1445 kbdev, \ 1446 ctx_nr, \ 1447 as_nr, \ 1448 page_cnt_change \ 1449 ) \ 1450 do { \ 1451 int enabled = atomic_read(&kbdev->timeline_flags); \ 1452 if (enabled & TLSTREAM_ENABLED) \ 1453 __kbase_tlstream_aux_pagefault( \ 1454 __TL_DISPATCH_STREAM(kbdev, aux), \ 1455 ctx_nr, as_nr, page_cnt_change); \ 1456 } while (0) 1457 1458 /** 1459 * KBASE_TLSTREAM_AUX_PAGESALLOC - 1460 * Total alloc pages change 1461 * 1462 * @kbdev: Kbase device 1463 * @ctx_nr: Kernel context number 1464 * @page_cnt: Number of pages used by the context 1465 */ 1466 #define KBASE_TLSTREAM_AUX_PAGESALLOC( \ 1467 kbdev, \ 1468 ctx_nr, \ 1469 page_cnt \ 1470 ) \ 1471 do { \ 1472 int enabled = atomic_read(&kbdev->timeline_flags); \ 1473 if (enabled & TLSTREAM_ENABLED) \ 1474 __kbase_tlstream_aux_pagesalloc( \ 1475 __TL_DISPATCH_STREAM(kbdev, aux), \ 1476 ctx_nr, page_cnt); \ 1477 } while (0) 1478 1479 /** 1480 * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - 1481 * New device frequency target 1482 * 1483 * @kbdev: Kbase device 1484 * @target_freq: New target frequency 1485 */ 1486 #define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET( \ 1487 kbdev, \ 1488 target_freq \ 1489 ) \ 1490 do { \ 1491 int enabled = atomic_read(&kbdev->timeline_flags); \ 1492 if (enabled & TLSTREAM_ENABLED) \ 1493 __kbase_tlstream_aux_devfreq_target( \ 1494 __TL_DISPATCH_STREAM(kbdev, aux), \ 1495 target_freq); \ 1496 } while (0) 1497 1498 /** 1499 * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - 1500 * enter protected mode start 1501 * 1502 * @kbdev: Kbase device 1503 * @gpu: Name of the GPU object 1504 */ 1505 #define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START( \ 1506 kbdev, \ 1507 gpu \ 1508 ) \ 1509 do { \ 1510 int enabled = atomic_read(&kbdev->timeline_flags); \ 1511 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1512 __kbase_tlstream_aux_protected_enter_start( \ 1513 __TL_DISPATCH_STREAM(kbdev, aux), \ 1514 gpu); \ 1515 } while (0) 1516 1517 /** 1518 * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - 1519 * enter protected mode end 1520 * 1521 * @kbdev: Kbase device 1522 * @gpu: Name of the GPU object 1523 */ 1524 #define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END( \ 1525 kbdev, \ 1526 gpu \ 1527 ) \ 1528 do { \ 1529 int enabled = atomic_read(&kbdev->timeline_flags); \ 1530 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1531 __kbase_tlstream_aux_protected_enter_end( \ 1532 __TL_DISPATCH_STREAM(kbdev, aux), \ 1533 gpu); \ 1534 } while (0) 1535 1536 /** 1537 * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - 1538 * leave protected mode start 1539 * 1540 * @kbdev: Kbase device 1541 * @gpu: Name of the GPU object 1542 */ 1543 #define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START( \ 1544 kbdev, \ 1545 gpu \ 1546 ) \ 1547 do { \ 1548 int enabled = atomic_read(&kbdev->timeline_flags); \ 1549 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1550 __kbase_tlstream_aux_protected_leave_start( \ 1551 __TL_DISPATCH_STREAM(kbdev, aux), \ 1552 gpu); \ 1553 } while (0) 1554 1555 /** 1556 * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - 1557 * leave protected mode end 1558 * 1559 * @kbdev: Kbase device 1560 * @gpu: Name of the GPU object 1561 */ 1562 #define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END( \ 1563 kbdev, \ 1564 gpu \ 1565 ) \ 1566 do { \ 1567 int enabled = atomic_read(&kbdev->timeline_flags); \ 1568 if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ 1569 __kbase_tlstream_aux_protected_leave_end( \ 1570 __TL_DISPATCH_STREAM(kbdev, aux), \ 1571 gpu); \ 1572 } while (0) 1573 1574 /** 1575 * KBASE_TLSTREAM_AUX_JIT_STATS - 1576 * per-bin JIT statistics 1577 * 1578 * @kbdev: Kbase device 1579 * @ctx_nr: Kernel context number 1580 * @bid: JIT bin id 1581 * @max_allocs: Maximum allocations allowed in this bin. 1582 * @allocs: Number of active allocations in this bin 1583 * @va_pages: Number of virtual pages allocated in this bin 1584 * @ph_pages: Number of physical pages allocated in this bin 1585 */ 1586 #define KBASE_TLSTREAM_AUX_JIT_STATS( \ 1587 kbdev, \ 1588 ctx_nr, \ 1589 bid, \ 1590 max_allocs, \ 1591 allocs, \ 1592 va_pages, \ 1593 ph_pages \ 1594 ) \ 1595 do { \ 1596 int enabled = atomic_read(&kbdev->timeline_flags); \ 1597 if (enabled & TLSTREAM_ENABLED) \ 1598 __kbase_tlstream_aux_jit_stats( \ 1599 __TL_DISPATCH_STREAM(kbdev, aux), \ 1600 ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages); \ 1601 } while (0) 1602 1603 /** 1604 * KBASE_TLSTREAM_AUX_TILER_HEAP_STATS - 1605 * Tiler Heap statistics 1606 * 1607 * @kbdev: Kbase device 1608 * @ctx_nr: Kernel context number 1609 * @heap_id: Unique id used to represent a heap under a context 1610 * @va_pages: Number of virtual pages allocated in this bin 1611 * @ph_pages: Number of physical pages allocated in this bin 1612 * @max_chunks: The maximum number of chunks that the heap should be allowed to use 1613 * @chunk_size: Size of each chunk in tiler heap, in bytes 1614 * @chunk_count: The number of chunks currently allocated in the tiler heap 1615 * @target_in_flight: Number of render-passes that the driver should attempt 1616 * to keep in flight for which allocation of new chunks is allowed 1617 * @nr_in_flight: Number of render-passes that are in flight 1618 */ 1619 #define KBASE_TLSTREAM_AUX_TILER_HEAP_STATS( \ 1620 kbdev, \ 1621 ctx_nr, \ 1622 heap_id, \ 1623 va_pages, \ 1624 ph_pages, \ 1625 max_chunks, \ 1626 chunk_size, \ 1627 chunk_count, \ 1628 target_in_flight, \ 1629 nr_in_flight \ 1630 ) \ 1631 do { \ 1632 int enabled = atomic_read(&kbdev->timeline_flags); \ 1633 if (enabled & TLSTREAM_ENABLED) \ 1634 __kbase_tlstream_aux_tiler_heap_stats( \ 1635 __TL_DISPATCH_STREAM(kbdev, aux), \ 1636 ctx_nr, heap_id, va_pages, ph_pages, max_chunks, chunk_size, chunk_count, target_in_flight, nr_in_flight); \ 1637 } while (0) 1638 1639 /** 1640 * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT - 1641 * event on a given job slot 1642 * 1643 * @kbdev: Kbase device 1644 * @ctx: Name of the context object 1645 * @slot_nr: Job slot number 1646 * @atom_nr: Sequential number of an atom 1647 * @event: Event type. One of TL_JS_EVENT values 1648 */ 1649 #define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT( \ 1650 kbdev, \ 1651 ctx, \ 1652 slot_nr, \ 1653 atom_nr, \ 1654 event \ 1655 ) \ 1656 do { \ 1657 int enabled = atomic_read(&kbdev->timeline_flags); \ 1658 if (enabled & TLSTREAM_ENABLED) \ 1659 __kbase_tlstream_aux_event_job_slot( \ 1660 __TL_DISPATCH_STREAM(kbdev, aux), \ 1661 ctx, slot_nr, atom_nr, event); \ 1662 } while (0) 1663 1664 /** 1665 * KBASE_TLSTREAM_AUX_MMU_COMMAND - 1666 * mmu commands with synchronicity info 1667 * 1668 * @kbdev: Kbase device 1669 * @kernel_ctx_id: Unique ID for the KBase Context 1670 * @mmu_cmd_id: MMU Command ID (e.g AS_COMMAND_UPDATE) 1671 * @mmu_synchronicity: Indicates whether the command is related to current running job 1672 * that needs to be resolved to make it progress (synchronous, e.g. 1673 * grow on page fault, JIT) or not (asynchronous, e.g. IOCTL calls 1674 * from user-space). This param will be 0 if it is an asynchronous 1675 * operation. 1676 * @mmu_lock_addr: start address of regions to be locked/unlocked/invalidated 1677 * @mmu_lock_page_num: number of pages to be locked/unlocked/invalidated 1678 */ 1679 #define KBASE_TLSTREAM_AUX_MMU_COMMAND( \ 1680 kbdev, \ 1681 kernel_ctx_id, \ 1682 mmu_cmd_id, \ 1683 mmu_synchronicity, \ 1684 mmu_lock_addr, \ 1685 mmu_lock_page_num \ 1686 ) \ 1687 do { \ 1688 int enabled = atomic_read(&kbdev->timeline_flags); \ 1689 if (enabled & TLSTREAM_ENABLED) \ 1690 __kbase_tlstream_aux_mmu_command( \ 1691 __TL_DISPATCH_STREAM(kbdev, aux), \ 1692 kernel_ctx_id, mmu_cmd_id, mmu_synchronicity, mmu_lock_addr, mmu_lock_page_num); \ 1693 } while (0) 1694 1695 /** 1696 * KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE - 1697 * New KBase Device 1698 * 1699 * @kbdev: Kbase device 1700 * @kbase_device_id: The ID of the physical hardware 1701 * @kbase_device_gpu_core_count: The number of gpu cores in the physical hardware 1702 * @kbase_device_max_num_csgs: The max number of CSGs the physical hardware supports 1703 * @kbase_device_as_count: The number of address spaces the physical hardware has available 1704 * @kbase_device_sb_entry_count: The number of entries each scoreboard set in the 1705 * physical hardware has available 1706 * @kbase_device_has_cross_stream_sync: Whether cross-stream synchronization is supported 1707 * @kbase_device_supports_gpu_sleep: Whether GPU sleep is supported 1708 */ 1709 #if MALI_USE_CSF 1710 #define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \ 1711 kbdev, \ 1712 kbase_device_id, \ 1713 kbase_device_gpu_core_count, \ 1714 kbase_device_max_num_csgs, \ 1715 kbase_device_as_count, \ 1716 kbase_device_sb_entry_count, \ 1717 kbase_device_has_cross_stream_sync, \ 1718 kbase_device_supports_gpu_sleep \ 1719 ) \ 1720 do { \ 1721 int enabled = atomic_read(&kbdev->timeline_flags); \ 1722 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1723 __kbase_tlstream_tl_kbase_new_device( \ 1724 __TL_DISPATCH_STREAM(kbdev, obj), \ 1725 kbase_device_id, kbase_device_gpu_core_count, kbase_device_max_num_csgs, kbase_device_as_count, kbase_device_sb_entry_count, kbase_device_has_cross_stream_sync, kbase_device_supports_gpu_sleep); \ 1726 } while (0) 1727 #else 1728 #define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \ 1729 kbdev, \ 1730 kbase_device_id, \ 1731 kbase_device_gpu_core_count, \ 1732 kbase_device_max_num_csgs, \ 1733 kbase_device_as_count, \ 1734 kbase_device_sb_entry_count, \ 1735 kbase_device_has_cross_stream_sync, \ 1736 kbase_device_supports_gpu_sleep \ 1737 ) \ 1738 do { } while (0) 1739 #endif /* MALI_USE_CSF */ 1740 1741 /** 1742 * KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG - 1743 * CSG is programmed to a slot 1744 * 1745 * @kbdev: Kbase device 1746 * @kbase_device_id: The ID of the physical hardware 1747 * @kernel_ctx_id: Unique ID for the KBase Context 1748 * @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace 1749 * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed 1750 * @kbase_device_csg_slot_resumed: Whether the csg is being resumed 1751 */ 1752 #if MALI_USE_CSF 1753 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \ 1754 kbdev, \ 1755 kbase_device_id, \ 1756 kernel_ctx_id, \ 1757 gpu_cmdq_grp_handle, \ 1758 kbase_device_csg_slot_index, \ 1759 kbase_device_csg_slot_resumed \ 1760 ) \ 1761 do { \ 1762 int enabled = atomic_read(&kbdev->timeline_flags); \ 1763 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1764 __kbase_tlstream_tl_kbase_device_program_csg( \ 1765 __TL_DISPATCH_STREAM(kbdev, obj), \ 1766 kbase_device_id, kernel_ctx_id, gpu_cmdq_grp_handle, kbase_device_csg_slot_index, kbase_device_csg_slot_resumed); \ 1767 } while (0) 1768 #else 1769 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \ 1770 kbdev, \ 1771 kbase_device_id, \ 1772 kernel_ctx_id, \ 1773 gpu_cmdq_grp_handle, \ 1774 kbase_device_csg_slot_index, \ 1775 kbase_device_csg_slot_resumed \ 1776 ) \ 1777 do { } while (0) 1778 #endif /* MALI_USE_CSF */ 1779 1780 /** 1781 * KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG - 1782 * CSG is deprogrammed from a slot 1783 * 1784 * @kbdev: Kbase device 1785 * @kbase_device_id: The ID of the physical hardware 1786 * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed 1787 */ 1788 #if MALI_USE_CSF 1789 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \ 1790 kbdev, \ 1791 kbase_device_id, \ 1792 kbase_device_csg_slot_index \ 1793 ) \ 1794 do { \ 1795 int enabled = atomic_read(&kbdev->timeline_flags); \ 1796 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1797 __kbase_tlstream_tl_kbase_device_deprogram_csg( \ 1798 __TL_DISPATCH_STREAM(kbdev, obj), \ 1799 kbase_device_id, kbase_device_csg_slot_index); \ 1800 } while (0) 1801 #else 1802 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \ 1803 kbdev, \ 1804 kbase_device_id, \ 1805 kbase_device_csg_slot_index \ 1806 ) \ 1807 do { } while (0) 1808 #endif /* MALI_USE_CSF */ 1809 1810 /** 1811 * KBASE_TLSTREAM_TL_KBASE_DEVICE_HALT_CSG - 1812 * CSG is halted 1813 * 1814 * @kbdev: Kbase device 1815 * @kbase_device_id: The ID of the physical hardware 1816 * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed 1817 */ 1818 #define KBASE_TLSTREAM_TL_KBASE_DEVICE_HALT_CSG( \ 1819 kbdev, \ 1820 kbase_device_id, \ 1821 kbase_device_csg_slot_index \ 1822 ) \ 1823 do { \ 1824 int enabled = atomic_read(&kbdev->timeline_flags); \ 1825 if (enabled & TLSTREAM_ENABLED) \ 1826 __kbase_tlstream_tl_kbase_device_halt_csg( \ 1827 __TL_DISPATCH_STREAM(kbdev, obj), \ 1828 kbase_device_id, kbase_device_csg_slot_index); \ 1829 } while (0) 1830 1831 /** 1832 * KBASE_TLSTREAM_TL_KBASE_NEW_CTX - 1833 * New KBase Context 1834 * 1835 * @kbdev: Kbase device 1836 * @kernel_ctx_id: Unique ID for the KBase Context 1837 * @kbase_device_id: The ID of the physical hardware 1838 */ 1839 #if MALI_USE_CSF 1840 #define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \ 1841 kbdev, \ 1842 kernel_ctx_id, \ 1843 kbase_device_id \ 1844 ) \ 1845 do { \ 1846 int enabled = atomic_read(&kbdev->timeline_flags); \ 1847 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1848 __kbase_tlstream_tl_kbase_new_ctx( \ 1849 __TL_DISPATCH_STREAM(kbdev, obj), \ 1850 kernel_ctx_id, kbase_device_id); \ 1851 } while (0) 1852 #else 1853 #define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \ 1854 kbdev, \ 1855 kernel_ctx_id, \ 1856 kbase_device_id \ 1857 ) \ 1858 do { } while (0) 1859 #endif /* MALI_USE_CSF */ 1860 1861 /** 1862 * KBASE_TLSTREAM_TL_KBASE_DEL_CTX - 1863 * Delete KBase Context 1864 * 1865 * @kbdev: Kbase device 1866 * @kernel_ctx_id: Unique ID for the KBase Context 1867 */ 1868 #if MALI_USE_CSF 1869 #define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \ 1870 kbdev, \ 1871 kernel_ctx_id \ 1872 ) \ 1873 do { \ 1874 int enabled = atomic_read(&kbdev->timeline_flags); \ 1875 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1876 __kbase_tlstream_tl_kbase_del_ctx( \ 1877 __TL_DISPATCH_STREAM(kbdev, obj), \ 1878 kernel_ctx_id); \ 1879 } while (0) 1880 #else 1881 #define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \ 1882 kbdev, \ 1883 kernel_ctx_id \ 1884 ) \ 1885 do { } while (0) 1886 #endif /* MALI_USE_CSF */ 1887 1888 /** 1889 * KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS - 1890 * Address Space is assigned to a KBase context 1891 * 1892 * @kbdev: Kbase device 1893 * @kernel_ctx_id: Unique ID for the KBase Context 1894 * @kbase_device_as_index: The index of the device address space being assigned 1895 */ 1896 #if MALI_USE_CSF 1897 #define KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( \ 1898 kbdev, \ 1899 kernel_ctx_id, \ 1900 kbase_device_as_index \ 1901 ) \ 1902 do { \ 1903 int enabled = atomic_read(&kbdev->timeline_flags); \ 1904 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1905 __kbase_tlstream_tl_kbase_ctx_assign_as( \ 1906 __TL_DISPATCH_STREAM(kbdev, obj), \ 1907 kernel_ctx_id, kbase_device_as_index); \ 1908 } while (0) 1909 #else 1910 #define KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( \ 1911 kbdev, \ 1912 kernel_ctx_id, \ 1913 kbase_device_as_index \ 1914 ) \ 1915 do { } while (0) 1916 #endif /* MALI_USE_CSF */ 1917 1918 /** 1919 * KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS - 1920 * Address Space is unassigned from a KBase context 1921 * 1922 * @kbdev: Kbase device 1923 * @kernel_ctx_id: Unique ID for the KBase Context 1924 */ 1925 #if MALI_USE_CSF 1926 #define KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( \ 1927 kbdev, \ 1928 kernel_ctx_id \ 1929 ) \ 1930 do { \ 1931 int enabled = atomic_read(&kbdev->timeline_flags); \ 1932 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1933 __kbase_tlstream_tl_kbase_ctx_unassign_as( \ 1934 __TL_DISPATCH_STREAM(kbdev, obj), \ 1935 kernel_ctx_id); \ 1936 } while (0) 1937 #else 1938 #define KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( \ 1939 kbdev, \ 1940 kernel_ctx_id \ 1941 ) \ 1942 do { } while (0) 1943 #endif /* MALI_USE_CSF */ 1944 1945 /** 1946 * KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE - 1947 * New KCPU Queue 1948 * 1949 * @kbdev: Kbase device 1950 * @kcpu_queue: KCPU queue 1951 * @kernel_ctx_id: Unique ID for the KBase Context 1952 * @kcpuq_num_pending_cmds: Number of commands already enqueued 1953 * in the KCPU queue 1954 */ 1955 #if MALI_USE_CSF 1956 #define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \ 1957 kbdev, \ 1958 kcpu_queue, \ 1959 kernel_ctx_id, \ 1960 kcpuq_num_pending_cmds \ 1961 ) \ 1962 do { \ 1963 int enabled = atomic_read(&kbdev->timeline_flags); \ 1964 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1965 __kbase_tlstream_tl_kbase_new_kcpuqueue( \ 1966 __TL_DISPATCH_STREAM(kbdev, obj), \ 1967 kcpu_queue, kernel_ctx_id, kcpuq_num_pending_cmds); \ 1968 } while (0) 1969 #else 1970 #define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \ 1971 kbdev, \ 1972 kcpu_queue, \ 1973 kernel_ctx_id, \ 1974 kcpuq_num_pending_cmds \ 1975 ) \ 1976 do { } while (0) 1977 #endif /* MALI_USE_CSF */ 1978 1979 /** 1980 * KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE - 1981 * Delete KCPU Queue 1982 * 1983 * @kbdev: Kbase device 1984 * @kcpu_queue: KCPU queue 1985 */ 1986 #if MALI_USE_CSF 1987 #define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \ 1988 kbdev, \ 1989 kcpu_queue \ 1990 ) \ 1991 do { \ 1992 int enabled = atomic_read(&kbdev->timeline_flags); \ 1993 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 1994 __kbase_tlstream_tl_kbase_del_kcpuqueue( \ 1995 __TL_DISPATCH_STREAM(kbdev, obj), \ 1996 kcpu_queue); \ 1997 } while (0) 1998 #else 1999 #define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \ 2000 kbdev, \ 2001 kcpu_queue \ 2002 ) \ 2003 do { } while (0) 2004 #endif /* MALI_USE_CSF */ 2005 2006 /** 2007 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL - 2008 * KCPU Queue enqueues Signal on Fence 2009 * 2010 * @kbdev: Kbase device 2011 * @kcpu_queue: KCPU queue 2012 * @fence: Fence object handle 2013 */ 2014 #if MALI_USE_CSF 2015 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \ 2016 kbdev, \ 2017 kcpu_queue, \ 2018 fence \ 2019 ) \ 2020 do { \ 2021 int enabled = atomic_read(&kbdev->timeline_flags); \ 2022 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2023 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( \ 2024 __TL_DISPATCH_STREAM(kbdev, obj), \ 2025 kcpu_queue, fence); \ 2026 } while (0) 2027 #else 2028 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \ 2029 kbdev, \ 2030 kcpu_queue, \ 2031 fence \ 2032 ) \ 2033 do { } while (0) 2034 #endif /* MALI_USE_CSF */ 2035 2036 /** 2037 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT - 2038 * KCPU Queue enqueues Wait on Fence 2039 * 2040 * @kbdev: Kbase device 2041 * @kcpu_queue: KCPU queue 2042 * @fence: Fence object handle 2043 */ 2044 #if MALI_USE_CSF 2045 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \ 2046 kbdev, \ 2047 kcpu_queue, \ 2048 fence \ 2049 ) \ 2050 do { \ 2051 int enabled = atomic_read(&kbdev->timeline_flags); \ 2052 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2053 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( \ 2054 __TL_DISPATCH_STREAM(kbdev, obj), \ 2055 kcpu_queue, fence); \ 2056 } while (0) 2057 #else 2058 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \ 2059 kbdev, \ 2060 kcpu_queue, \ 2061 fence \ 2062 ) \ 2063 do { } while (0) 2064 #endif /* MALI_USE_CSF */ 2065 2066 /** 2067 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT - 2068 * KCPU Queue enqueues Wait on Cross Queue Sync Object 2069 * 2070 * @kbdev: Kbase device 2071 * @kcpu_queue: KCPU queue 2072 * @cqs_obj_gpu_addr: CQS Object GPU pointer 2073 * @cqs_obj_compare_value: Semaphore value that should be exceeded 2074 * for the WAIT to pass 2075 * @cqs_obj_inherit_error: Flag which indicates if the CQS object error state should be inherited by the queue 2076 */ 2077 #if MALI_USE_CSF 2078 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ 2079 kbdev, \ 2080 kcpu_queue, \ 2081 cqs_obj_gpu_addr, \ 2082 cqs_obj_compare_value, \ 2083 cqs_obj_inherit_error \ 2084 ) \ 2085 do { \ 2086 int enabled = atomic_read(&kbdev->timeline_flags); \ 2087 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2088 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait( \ 2089 __TL_DISPATCH_STREAM(kbdev, obj), \ 2090 kcpu_queue, cqs_obj_gpu_addr, cqs_obj_compare_value, cqs_obj_inherit_error); \ 2091 } while (0) 2092 #else 2093 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ 2094 kbdev, \ 2095 kcpu_queue, \ 2096 cqs_obj_gpu_addr, \ 2097 cqs_obj_compare_value, \ 2098 cqs_obj_inherit_error \ 2099 ) \ 2100 do { } while (0) 2101 #endif /* MALI_USE_CSF */ 2102 2103 /** 2104 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET - 2105 * KCPU Queue enqueues Set on Cross Queue Sync Object 2106 * 2107 * @kbdev: Kbase device 2108 * @kcpu_queue: KCPU queue 2109 * @cqs_obj_gpu_addr: CQS Object GPU pointer 2110 */ 2111 #if MALI_USE_CSF 2112 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET( \ 2113 kbdev, \ 2114 kcpu_queue, \ 2115 cqs_obj_gpu_addr \ 2116 ) \ 2117 do { \ 2118 int enabled = atomic_read(&kbdev->timeline_flags); \ 2119 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2120 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set( \ 2121 __TL_DISPATCH_STREAM(kbdev, obj), \ 2122 kcpu_queue, cqs_obj_gpu_addr); \ 2123 } while (0) 2124 #else 2125 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET( \ 2126 kbdev, \ 2127 kcpu_queue, \ 2128 cqs_obj_gpu_addr \ 2129 ) \ 2130 do { } while (0) 2131 #endif /* MALI_USE_CSF */ 2132 2133 /** 2134 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT - 2135 * KCPU Queue enqueues Map Import 2136 * 2137 * @kbdev: Kbase device 2138 * @kcpu_queue: KCPU queue 2139 * @map_import_buf_gpu_addr: Map import buffer GPU pointer 2140 */ 2141 #if MALI_USE_CSF 2142 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \ 2143 kbdev, \ 2144 kcpu_queue, \ 2145 map_import_buf_gpu_addr \ 2146 ) \ 2147 do { \ 2148 int enabled = atomic_read(&kbdev->timeline_flags); \ 2149 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2150 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( \ 2151 __TL_DISPATCH_STREAM(kbdev, obj), \ 2152 kcpu_queue, map_import_buf_gpu_addr); \ 2153 } while (0) 2154 #else 2155 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \ 2156 kbdev, \ 2157 kcpu_queue, \ 2158 map_import_buf_gpu_addr \ 2159 ) \ 2160 do { } while (0) 2161 #endif /* MALI_USE_CSF */ 2162 2163 /** 2164 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT - 2165 * KCPU Queue enqueues Unmap Import 2166 * 2167 * @kbdev: Kbase device 2168 * @kcpu_queue: KCPU queue 2169 * @map_import_buf_gpu_addr: Map import buffer GPU pointer 2170 */ 2171 #if MALI_USE_CSF 2172 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \ 2173 kbdev, \ 2174 kcpu_queue, \ 2175 map_import_buf_gpu_addr \ 2176 ) \ 2177 do { \ 2178 int enabled = atomic_read(&kbdev->timeline_flags); \ 2179 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2180 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( \ 2181 __TL_DISPATCH_STREAM(kbdev, obj), \ 2182 kcpu_queue, map_import_buf_gpu_addr); \ 2183 } while (0) 2184 #else 2185 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \ 2186 kbdev, \ 2187 kcpu_queue, \ 2188 map_import_buf_gpu_addr \ 2189 ) \ 2190 do { } while (0) 2191 #endif /* MALI_USE_CSF */ 2192 2193 /** 2194 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE - 2195 * KCPU Queue enqueues Unmap Import ignoring reference count 2196 * 2197 * @kbdev: Kbase device 2198 * @kcpu_queue: KCPU queue 2199 * @map_import_buf_gpu_addr: Map import buffer GPU pointer 2200 */ 2201 #if MALI_USE_CSF 2202 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \ 2203 kbdev, \ 2204 kcpu_queue, \ 2205 map_import_buf_gpu_addr \ 2206 ) \ 2207 do { \ 2208 int enabled = atomic_read(&kbdev->timeline_flags); \ 2209 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2210 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( \ 2211 __TL_DISPATCH_STREAM(kbdev, obj), \ 2212 kcpu_queue, map_import_buf_gpu_addr); \ 2213 } while (0) 2214 #else 2215 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \ 2216 kbdev, \ 2217 kcpu_queue, \ 2218 map_import_buf_gpu_addr \ 2219 ) \ 2220 do { } while (0) 2221 #endif /* MALI_USE_CSF */ 2222 2223 /** 2224 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER - 2225 * KCPU Queue enqueues Error Barrier 2226 * 2227 * @kbdev: Kbase device 2228 * @kcpu_queue: KCPU queue 2229 */ 2230 #if MALI_USE_CSF 2231 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER( \ 2232 kbdev, \ 2233 kcpu_queue \ 2234 ) \ 2235 do { \ 2236 int enabled = atomic_read(&kbdev->timeline_flags); \ 2237 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2238 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier( \ 2239 __TL_DISPATCH_STREAM(kbdev, obj), \ 2240 kcpu_queue); \ 2241 } while (0) 2242 #else 2243 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER( \ 2244 kbdev, \ 2245 kcpu_queue \ 2246 ) \ 2247 do { } while (0) 2248 #endif /* MALI_USE_CSF */ 2249 2250 /** 2251 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND - 2252 * KCPU Queue enqueues Group Suspend 2253 * 2254 * @kbdev: Kbase device 2255 * @kcpu_queue: KCPU queue 2256 * @group_suspend_buf: Pointer to the suspend buffer structure 2257 * @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace 2258 */ 2259 #if MALI_USE_CSF 2260 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND( \ 2261 kbdev, \ 2262 kcpu_queue, \ 2263 group_suspend_buf, \ 2264 gpu_cmdq_grp_handle \ 2265 ) \ 2266 do { \ 2267 int enabled = atomic_read(&kbdev->timeline_flags); \ 2268 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2269 __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend( \ 2270 __TL_DISPATCH_STREAM(kbdev, obj), \ 2271 kcpu_queue, group_suspend_buf, gpu_cmdq_grp_handle); \ 2272 } while (0) 2273 #else 2274 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND( \ 2275 kbdev, \ 2276 kcpu_queue, \ 2277 group_suspend_buf, \ 2278 gpu_cmdq_grp_handle \ 2279 ) \ 2280 do { } while (0) 2281 #endif /* MALI_USE_CSF */ 2282 2283 /** 2284 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC - 2285 * Begin array of KCPU Queue enqueues JIT Alloc 2286 * 2287 * @kbdev: Kbase device 2288 * @kcpu_queue: KCPU queue 2289 */ 2290 #if MALI_USE_CSF 2291 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2292 kbdev, \ 2293 kcpu_queue \ 2294 ) \ 2295 do { \ 2296 int enabled = atomic_read(&kbdev->timeline_flags); \ 2297 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2298 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( \ 2299 __TL_DISPATCH_STREAM(kbdev, obj), \ 2300 kcpu_queue); \ 2301 } while (0) 2302 #else 2303 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2304 kbdev, \ 2305 kcpu_queue \ 2306 ) \ 2307 do { } while (0) 2308 #endif /* MALI_USE_CSF */ 2309 2310 /** 2311 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC - 2312 * Array item of KCPU Queue enqueues JIT Alloc 2313 * 2314 * @kbdev: Kbase device 2315 * @kcpu_queue: KCPU queue 2316 * @jit_alloc_gpu_alloc_addr_dest: The GPU virtual address to write 2317 * the JIT allocated GPU virtual address to 2318 * @jit_alloc_va_pages: The minimum number of virtual pages required 2319 * @jit_alloc_commit_pages: The minimum number of physical pages which 2320 * should back the allocation 2321 * @jit_alloc_extent: Granularity of physical pages to grow the allocation 2322 * by during a fault 2323 * @jit_alloc_jit_id: Unique ID provided by the caller, this is used 2324 * to pair allocation and free requests. Zero is not a valid value 2325 * @jit_alloc_bin_id: The JIT allocation bin, used in conjunction with 2326 * max_allocations to limit the number of each type of JIT allocation 2327 * @jit_alloc_max_allocations: The maximum number of allocations 2328 * allowed within the bin specified by bin_id. Should be the same for all 2329 * JIT allocations within the same bin. 2330 * @jit_alloc_flags: Flags specifying the special requirements for the 2331 * JIT allocation 2332 * @jit_alloc_usage_id: A hint about which allocation should be 2333 * reused. The kernel should attempt to use a previous allocation with the same 2334 * usage_id 2335 */ 2336 #if MALI_USE_CSF 2337 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2338 kbdev, \ 2339 kcpu_queue, \ 2340 jit_alloc_gpu_alloc_addr_dest, \ 2341 jit_alloc_va_pages, \ 2342 jit_alloc_commit_pages, \ 2343 jit_alloc_extent, \ 2344 jit_alloc_jit_id, \ 2345 jit_alloc_bin_id, \ 2346 jit_alloc_max_allocations, \ 2347 jit_alloc_flags, \ 2348 jit_alloc_usage_id \ 2349 ) \ 2350 do { \ 2351 int enabled = atomic_read(&kbdev->timeline_flags); \ 2352 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2353 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( \ 2354 __TL_DISPATCH_STREAM(kbdev, obj), \ 2355 kcpu_queue, jit_alloc_gpu_alloc_addr_dest, jit_alloc_va_pages, jit_alloc_commit_pages, jit_alloc_extent, jit_alloc_jit_id, jit_alloc_bin_id, jit_alloc_max_allocations, jit_alloc_flags, jit_alloc_usage_id); \ 2356 } while (0) 2357 #else 2358 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2359 kbdev, \ 2360 kcpu_queue, \ 2361 jit_alloc_gpu_alloc_addr_dest, \ 2362 jit_alloc_va_pages, \ 2363 jit_alloc_commit_pages, \ 2364 jit_alloc_extent, \ 2365 jit_alloc_jit_id, \ 2366 jit_alloc_bin_id, \ 2367 jit_alloc_max_allocations, \ 2368 jit_alloc_flags, \ 2369 jit_alloc_usage_id \ 2370 ) \ 2371 do { } while (0) 2372 #endif /* MALI_USE_CSF */ 2373 2374 /** 2375 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC - 2376 * End array of KCPU Queue enqueues JIT Alloc 2377 * 2378 * @kbdev: Kbase device 2379 * @kcpu_queue: KCPU queue 2380 */ 2381 #if MALI_USE_CSF 2382 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2383 kbdev, \ 2384 kcpu_queue \ 2385 ) \ 2386 do { \ 2387 int enabled = atomic_read(&kbdev->timeline_flags); \ 2388 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2389 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( \ 2390 __TL_DISPATCH_STREAM(kbdev, obj), \ 2391 kcpu_queue); \ 2392 } while (0) 2393 #else 2394 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ 2395 kbdev, \ 2396 kcpu_queue \ 2397 ) \ 2398 do { } while (0) 2399 #endif /* MALI_USE_CSF */ 2400 2401 /** 2402 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE - 2403 * Begin array of KCPU Queue enqueues JIT Free 2404 * 2405 * @kbdev: Kbase device 2406 * @kcpu_queue: KCPU queue 2407 */ 2408 #if MALI_USE_CSF 2409 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2410 kbdev, \ 2411 kcpu_queue \ 2412 ) \ 2413 do { \ 2414 int enabled = atomic_read(&kbdev->timeline_flags); \ 2415 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2416 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( \ 2417 __TL_DISPATCH_STREAM(kbdev, obj), \ 2418 kcpu_queue); \ 2419 } while (0) 2420 #else 2421 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2422 kbdev, \ 2423 kcpu_queue \ 2424 ) \ 2425 do { } while (0) 2426 #endif /* MALI_USE_CSF */ 2427 2428 /** 2429 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE - 2430 * Array item of KCPU Queue enqueues JIT Free 2431 * 2432 * @kbdev: Kbase device 2433 * @kcpu_queue: KCPU queue 2434 * @jit_alloc_jit_id: Unique ID provided by the caller, this is used 2435 * to pair allocation and free requests. Zero is not a valid value 2436 */ 2437 #if MALI_USE_CSF 2438 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2439 kbdev, \ 2440 kcpu_queue, \ 2441 jit_alloc_jit_id \ 2442 ) \ 2443 do { \ 2444 int enabled = atomic_read(&kbdev->timeline_flags); \ 2445 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2446 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( \ 2447 __TL_DISPATCH_STREAM(kbdev, obj), \ 2448 kcpu_queue, jit_alloc_jit_id); \ 2449 } while (0) 2450 #else 2451 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2452 kbdev, \ 2453 kcpu_queue, \ 2454 jit_alloc_jit_id \ 2455 ) \ 2456 do { } while (0) 2457 #endif /* MALI_USE_CSF */ 2458 2459 /** 2460 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE - 2461 * End array of KCPU Queue enqueues JIT Free 2462 * 2463 * @kbdev: Kbase device 2464 * @kcpu_queue: KCPU queue 2465 */ 2466 #if MALI_USE_CSF 2467 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2468 kbdev, \ 2469 kcpu_queue \ 2470 ) \ 2471 do { \ 2472 int enabled = atomic_read(&kbdev->timeline_flags); \ 2473 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2474 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( \ 2475 __TL_DISPATCH_STREAM(kbdev, obj), \ 2476 kcpu_queue); \ 2477 } while (0) 2478 #else 2479 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \ 2480 kbdev, \ 2481 kcpu_queue \ 2482 ) \ 2483 do { } while (0) 2484 #endif /* MALI_USE_CSF */ 2485 2486 /** 2487 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START - 2488 * KCPU Queue starts a Signal on Fence 2489 * 2490 * @kbdev: Kbase device 2491 * @kcpu_queue: KCPU queue 2492 */ 2493 #if MALI_USE_CSF 2494 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \ 2495 kbdev, \ 2496 kcpu_queue \ 2497 ) \ 2498 do { \ 2499 int enabled = atomic_read(&kbdev->timeline_flags); \ 2500 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2501 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( \ 2502 __TL_DISPATCH_STREAM(kbdev, obj), \ 2503 kcpu_queue); \ 2504 } while (0) 2505 #else 2506 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \ 2507 kbdev, \ 2508 kcpu_queue \ 2509 ) \ 2510 do { } while (0) 2511 #endif /* MALI_USE_CSF */ 2512 2513 /** 2514 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END - 2515 * KCPU Queue ends a Signal on Fence 2516 * 2517 * @kbdev: Kbase device 2518 * @kcpu_queue: KCPU queue 2519 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2520 */ 2521 #if MALI_USE_CSF 2522 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \ 2523 kbdev, \ 2524 kcpu_queue, \ 2525 execute_error \ 2526 ) \ 2527 do { \ 2528 int enabled = atomic_read(&kbdev->timeline_flags); \ 2529 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2530 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( \ 2531 __TL_DISPATCH_STREAM(kbdev, obj), \ 2532 kcpu_queue, execute_error); \ 2533 } while (0) 2534 #else 2535 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \ 2536 kbdev, \ 2537 kcpu_queue, \ 2538 execute_error \ 2539 ) \ 2540 do { } while (0) 2541 #endif /* MALI_USE_CSF */ 2542 2543 /** 2544 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START - 2545 * KCPU Queue starts a Wait on Fence 2546 * 2547 * @kbdev: Kbase device 2548 * @kcpu_queue: KCPU queue 2549 */ 2550 #if MALI_USE_CSF 2551 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \ 2552 kbdev, \ 2553 kcpu_queue \ 2554 ) \ 2555 do { \ 2556 int enabled = atomic_read(&kbdev->timeline_flags); \ 2557 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2558 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( \ 2559 __TL_DISPATCH_STREAM(kbdev, obj), \ 2560 kcpu_queue); \ 2561 } while (0) 2562 #else 2563 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \ 2564 kbdev, \ 2565 kcpu_queue \ 2566 ) \ 2567 do { } while (0) 2568 #endif /* MALI_USE_CSF */ 2569 2570 /** 2571 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END - 2572 * KCPU Queue ends a Wait on Fence 2573 * 2574 * @kbdev: Kbase device 2575 * @kcpu_queue: KCPU queue 2576 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2577 */ 2578 #if MALI_USE_CSF 2579 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \ 2580 kbdev, \ 2581 kcpu_queue, \ 2582 execute_error \ 2583 ) \ 2584 do { \ 2585 int enabled = atomic_read(&kbdev->timeline_flags); \ 2586 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2587 __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( \ 2588 __TL_DISPATCH_STREAM(kbdev, obj), \ 2589 kcpu_queue, execute_error); \ 2590 } while (0) 2591 #else 2592 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \ 2593 kbdev, \ 2594 kcpu_queue, \ 2595 execute_error \ 2596 ) \ 2597 do { } while (0) 2598 #endif /* MALI_USE_CSF */ 2599 2600 /** 2601 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START - 2602 * KCPU Queue starts a Wait on an array of Cross Queue Sync Objects 2603 * 2604 * @kbdev: Kbase device 2605 * @kcpu_queue: KCPU queue 2606 */ 2607 #if MALI_USE_CSF 2608 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \ 2609 kbdev, \ 2610 kcpu_queue \ 2611 ) \ 2612 do { \ 2613 int enabled = atomic_read(&kbdev->timeline_flags); \ 2614 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2615 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( \ 2616 __TL_DISPATCH_STREAM(kbdev, obj), \ 2617 kcpu_queue); \ 2618 } while (0) 2619 #else 2620 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \ 2621 kbdev, \ 2622 kcpu_queue \ 2623 ) \ 2624 do { } while (0) 2625 #endif /* MALI_USE_CSF */ 2626 2627 /** 2628 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END - 2629 * KCPU Queue ends a Wait on an array of Cross Queue Sync Objects 2630 * 2631 * @kbdev: Kbase device 2632 * @kcpu_queue: KCPU queue 2633 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2634 */ 2635 #if MALI_USE_CSF 2636 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \ 2637 kbdev, \ 2638 kcpu_queue, \ 2639 execute_error \ 2640 ) \ 2641 do { \ 2642 int enabled = atomic_read(&kbdev->timeline_flags); \ 2643 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2644 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( \ 2645 __TL_DISPATCH_STREAM(kbdev, obj), \ 2646 kcpu_queue, execute_error); \ 2647 } while (0) 2648 #else 2649 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \ 2650 kbdev, \ 2651 kcpu_queue, \ 2652 execute_error \ 2653 ) \ 2654 do { } while (0) 2655 #endif /* MALI_USE_CSF */ 2656 2657 /** 2658 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET - 2659 * KCPU Queue executes a Set on an array of Cross Queue Sync Objects 2660 * 2661 * @kbdev: Kbase device 2662 * @kcpu_queue: KCPU queue 2663 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2664 */ 2665 #if MALI_USE_CSF 2666 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \ 2667 kbdev, \ 2668 kcpu_queue, \ 2669 execute_error \ 2670 ) \ 2671 do { \ 2672 int enabled = atomic_read(&kbdev->timeline_flags); \ 2673 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2674 __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( \ 2675 __TL_DISPATCH_STREAM(kbdev, obj), \ 2676 kcpu_queue, execute_error); \ 2677 } while (0) 2678 #else 2679 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \ 2680 kbdev, \ 2681 kcpu_queue, \ 2682 execute_error \ 2683 ) \ 2684 do { } while (0) 2685 #endif /* MALI_USE_CSF */ 2686 2687 /** 2688 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START - 2689 * KCPU Queue starts a Map Import 2690 * 2691 * @kbdev: Kbase device 2692 * @kcpu_queue: KCPU queue 2693 */ 2694 #if MALI_USE_CSF 2695 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \ 2696 kbdev, \ 2697 kcpu_queue \ 2698 ) \ 2699 do { \ 2700 int enabled = atomic_read(&kbdev->timeline_flags); \ 2701 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2702 __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( \ 2703 __TL_DISPATCH_STREAM(kbdev, obj), \ 2704 kcpu_queue); \ 2705 } while (0) 2706 #else 2707 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \ 2708 kbdev, \ 2709 kcpu_queue \ 2710 ) \ 2711 do { } while (0) 2712 #endif /* MALI_USE_CSF */ 2713 2714 /** 2715 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END - 2716 * KCPU Queue ends a Map Import 2717 * 2718 * @kbdev: Kbase device 2719 * @kcpu_queue: KCPU queue 2720 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2721 */ 2722 #if MALI_USE_CSF 2723 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \ 2724 kbdev, \ 2725 kcpu_queue, \ 2726 execute_error \ 2727 ) \ 2728 do { \ 2729 int enabled = atomic_read(&kbdev->timeline_flags); \ 2730 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2731 __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( \ 2732 __TL_DISPATCH_STREAM(kbdev, obj), \ 2733 kcpu_queue, execute_error); \ 2734 } while (0) 2735 #else 2736 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \ 2737 kbdev, \ 2738 kcpu_queue, \ 2739 execute_error \ 2740 ) \ 2741 do { } while (0) 2742 #endif /* MALI_USE_CSF */ 2743 2744 /** 2745 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START - 2746 * KCPU Queue starts an Unmap Import 2747 * 2748 * @kbdev: Kbase device 2749 * @kcpu_queue: KCPU queue 2750 */ 2751 #if MALI_USE_CSF 2752 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \ 2753 kbdev, \ 2754 kcpu_queue \ 2755 ) \ 2756 do { \ 2757 int enabled = atomic_read(&kbdev->timeline_flags); \ 2758 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2759 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( \ 2760 __TL_DISPATCH_STREAM(kbdev, obj), \ 2761 kcpu_queue); \ 2762 } while (0) 2763 #else 2764 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \ 2765 kbdev, \ 2766 kcpu_queue \ 2767 ) \ 2768 do { } while (0) 2769 #endif /* MALI_USE_CSF */ 2770 2771 /** 2772 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END - 2773 * KCPU Queue ends an Unmap Import 2774 * 2775 * @kbdev: Kbase device 2776 * @kcpu_queue: KCPU queue 2777 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2778 */ 2779 #if MALI_USE_CSF 2780 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \ 2781 kbdev, \ 2782 kcpu_queue, \ 2783 execute_error \ 2784 ) \ 2785 do { \ 2786 int enabled = atomic_read(&kbdev->timeline_flags); \ 2787 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2788 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( \ 2789 __TL_DISPATCH_STREAM(kbdev, obj), \ 2790 kcpu_queue, execute_error); \ 2791 } while (0) 2792 #else 2793 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \ 2794 kbdev, \ 2795 kcpu_queue, \ 2796 execute_error \ 2797 ) \ 2798 do { } while (0) 2799 #endif /* MALI_USE_CSF */ 2800 2801 /** 2802 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START - 2803 * KCPU Queue starts an Unmap Import ignoring reference count 2804 * 2805 * @kbdev: Kbase device 2806 * @kcpu_queue: KCPU queue 2807 */ 2808 #if MALI_USE_CSF 2809 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \ 2810 kbdev, \ 2811 kcpu_queue \ 2812 ) \ 2813 do { \ 2814 int enabled = atomic_read(&kbdev->timeline_flags); \ 2815 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2816 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( \ 2817 __TL_DISPATCH_STREAM(kbdev, obj), \ 2818 kcpu_queue); \ 2819 } while (0) 2820 #else 2821 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \ 2822 kbdev, \ 2823 kcpu_queue \ 2824 ) \ 2825 do { } while (0) 2826 #endif /* MALI_USE_CSF */ 2827 2828 /** 2829 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END - 2830 * KCPU Queue ends an Unmap Import ignoring reference count 2831 * 2832 * @kbdev: Kbase device 2833 * @kcpu_queue: KCPU queue 2834 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2835 */ 2836 #if MALI_USE_CSF 2837 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \ 2838 kbdev, \ 2839 kcpu_queue, \ 2840 execute_error \ 2841 ) \ 2842 do { \ 2843 int enabled = atomic_read(&kbdev->timeline_flags); \ 2844 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2845 __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( \ 2846 __TL_DISPATCH_STREAM(kbdev, obj), \ 2847 kcpu_queue, execute_error); \ 2848 } while (0) 2849 #else 2850 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \ 2851 kbdev, \ 2852 kcpu_queue, \ 2853 execute_error \ 2854 ) \ 2855 do { } while (0) 2856 #endif /* MALI_USE_CSF */ 2857 2858 /** 2859 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START - 2860 * KCPU Queue starts an array of JIT Allocs 2861 * 2862 * @kbdev: Kbase device 2863 * @kcpu_queue: KCPU queue 2864 */ 2865 #if MALI_USE_CSF 2866 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \ 2867 kbdev, \ 2868 kcpu_queue \ 2869 ) \ 2870 do { \ 2871 int enabled = atomic_read(&kbdev->timeline_flags); \ 2872 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2873 __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( \ 2874 __TL_DISPATCH_STREAM(kbdev, obj), \ 2875 kcpu_queue); \ 2876 } while (0) 2877 #else 2878 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \ 2879 kbdev, \ 2880 kcpu_queue \ 2881 ) \ 2882 do { } while (0) 2883 #endif /* MALI_USE_CSF */ 2884 2885 /** 2886 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - 2887 * Begin array of KCPU Queue ends an array of JIT Allocs 2888 * 2889 * @kbdev: Kbase device 2890 * @kcpu_queue: KCPU queue 2891 */ 2892 #if MALI_USE_CSF 2893 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 2894 kbdev, \ 2895 kcpu_queue \ 2896 ) \ 2897 do { \ 2898 int enabled = atomic_read(&kbdev->timeline_flags); \ 2899 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2900 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( \ 2901 __TL_DISPATCH_STREAM(kbdev, obj), \ 2902 kcpu_queue); \ 2903 } while (0) 2904 #else 2905 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 2906 kbdev, \ 2907 kcpu_queue \ 2908 ) \ 2909 do { } while (0) 2910 #endif /* MALI_USE_CSF */ 2911 2912 /** 2913 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - 2914 * Array item of KCPU Queue ends an array of JIT Allocs 2915 * 2916 * @kbdev: Kbase device 2917 * @kcpu_queue: KCPU queue 2918 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 2919 * @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address 2920 * @jit_alloc_mmu_flags: The MMU flags for the JIT allocation 2921 */ 2922 #if MALI_USE_CSF 2923 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 2924 kbdev, \ 2925 kcpu_queue, \ 2926 execute_error, \ 2927 jit_alloc_gpu_alloc_addr, \ 2928 jit_alloc_mmu_flags \ 2929 ) \ 2930 do { \ 2931 int enabled = atomic_read(&kbdev->timeline_flags); \ 2932 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2933 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( \ 2934 __TL_DISPATCH_STREAM(kbdev, obj), \ 2935 kcpu_queue, execute_error, jit_alloc_gpu_alloc_addr, jit_alloc_mmu_flags); \ 2936 } while (0) 2937 #else 2938 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 2939 kbdev, \ 2940 kcpu_queue, \ 2941 execute_error, \ 2942 jit_alloc_gpu_alloc_addr, \ 2943 jit_alloc_mmu_flags \ 2944 ) \ 2945 do { } while (0) 2946 #endif /* MALI_USE_CSF */ 2947 2948 /** 2949 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - 2950 * End array of KCPU Queue ends an array of JIT Allocs 2951 * 2952 * @kbdev: Kbase device 2953 * @kcpu_queue: KCPU queue 2954 */ 2955 #if MALI_USE_CSF 2956 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 2957 kbdev, \ 2958 kcpu_queue \ 2959 ) \ 2960 do { \ 2961 int enabled = atomic_read(&kbdev->timeline_flags); \ 2962 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2963 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( \ 2964 __TL_DISPATCH_STREAM(kbdev, obj), \ 2965 kcpu_queue); \ 2966 } while (0) 2967 #else 2968 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ 2969 kbdev, \ 2970 kcpu_queue \ 2971 ) \ 2972 do { } while (0) 2973 #endif /* MALI_USE_CSF */ 2974 2975 /** 2976 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START - 2977 * KCPU Queue starts an array of JIT Frees 2978 * 2979 * @kbdev: Kbase device 2980 * @kcpu_queue: KCPU queue 2981 */ 2982 #if MALI_USE_CSF 2983 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \ 2984 kbdev, \ 2985 kcpu_queue \ 2986 ) \ 2987 do { \ 2988 int enabled = atomic_read(&kbdev->timeline_flags); \ 2989 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 2990 __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( \ 2991 __TL_DISPATCH_STREAM(kbdev, obj), \ 2992 kcpu_queue); \ 2993 } while (0) 2994 #else 2995 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \ 2996 kbdev, \ 2997 kcpu_queue \ 2998 ) \ 2999 do { } while (0) 3000 #endif /* MALI_USE_CSF */ 3001 3002 /** 3003 * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END - 3004 * Begin array of KCPU Queue ends an array of JIT Frees 3005 * 3006 * @kbdev: Kbase device 3007 * @kcpu_queue: KCPU queue 3008 */ 3009 #if MALI_USE_CSF 3010 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3011 kbdev, \ 3012 kcpu_queue \ 3013 ) \ 3014 do { \ 3015 int enabled = atomic_read(&kbdev->timeline_flags); \ 3016 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3017 __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( \ 3018 __TL_DISPATCH_STREAM(kbdev, obj), \ 3019 kcpu_queue); \ 3020 } while (0) 3021 #else 3022 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3023 kbdev, \ 3024 kcpu_queue \ 3025 ) \ 3026 do { } while (0) 3027 #endif /* MALI_USE_CSF */ 3028 3029 /** 3030 * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END - 3031 * Array item of KCPU Queue ends an array of JIT Frees 3032 * 3033 * @kbdev: Kbase device 3034 * @kcpu_queue: KCPU queue 3035 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3036 * @jit_free_pages_used: The actual number of pages used by the JIT 3037 * allocation 3038 */ 3039 #if MALI_USE_CSF 3040 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3041 kbdev, \ 3042 kcpu_queue, \ 3043 execute_error, \ 3044 jit_free_pages_used \ 3045 ) \ 3046 do { \ 3047 int enabled = atomic_read(&kbdev->timeline_flags); \ 3048 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3049 __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( \ 3050 __TL_DISPATCH_STREAM(kbdev, obj), \ 3051 kcpu_queue, execute_error, jit_free_pages_used); \ 3052 } while (0) 3053 #else 3054 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3055 kbdev, \ 3056 kcpu_queue, \ 3057 execute_error, \ 3058 jit_free_pages_used \ 3059 ) \ 3060 do { } while (0) 3061 #endif /* MALI_USE_CSF */ 3062 3063 /** 3064 * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END - 3065 * End array of KCPU Queue ends an array of JIT Frees 3066 * 3067 * @kbdev: Kbase device 3068 * @kcpu_queue: KCPU queue 3069 */ 3070 #if MALI_USE_CSF 3071 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3072 kbdev, \ 3073 kcpu_queue \ 3074 ) \ 3075 do { \ 3076 int enabled = atomic_read(&kbdev->timeline_flags); \ 3077 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3078 __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( \ 3079 __TL_DISPATCH_STREAM(kbdev, obj), \ 3080 kcpu_queue); \ 3081 } while (0) 3082 #else 3083 #define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ 3084 kbdev, \ 3085 kcpu_queue \ 3086 ) \ 3087 do { } while (0) 3088 #endif /* MALI_USE_CSF */ 3089 3090 /** 3091 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER - 3092 * KCPU Queue executes an Error Barrier 3093 * 3094 * @kbdev: Kbase device 3095 * @kcpu_queue: KCPU queue 3096 */ 3097 #if MALI_USE_CSF 3098 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER( \ 3099 kbdev, \ 3100 kcpu_queue \ 3101 ) \ 3102 do { \ 3103 int enabled = atomic_read(&kbdev->timeline_flags); \ 3104 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3105 __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier( \ 3106 __TL_DISPATCH_STREAM(kbdev, obj), \ 3107 kcpu_queue); \ 3108 } while (0) 3109 #else 3110 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER( \ 3111 kbdev, \ 3112 kcpu_queue \ 3113 ) \ 3114 do { } while (0) 3115 #endif /* MALI_USE_CSF */ 3116 3117 /** 3118 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START - 3119 * KCPU Queue starts a group suspend 3120 * 3121 * @kbdev: Kbase device 3122 * @kcpu_queue: KCPU queue 3123 */ 3124 #if MALI_USE_CSF 3125 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START( \ 3126 kbdev, \ 3127 kcpu_queue \ 3128 ) \ 3129 do { \ 3130 int enabled = atomic_read(&kbdev->timeline_flags); \ 3131 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3132 __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start( \ 3133 __TL_DISPATCH_STREAM(kbdev, obj), \ 3134 kcpu_queue); \ 3135 } while (0) 3136 #else 3137 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START( \ 3138 kbdev, \ 3139 kcpu_queue \ 3140 ) \ 3141 do { } while (0) 3142 #endif /* MALI_USE_CSF */ 3143 3144 /** 3145 * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END - 3146 * KCPU Queue ends a group suspend 3147 * 3148 * @kbdev: Kbase device 3149 * @kcpu_queue: KCPU queue 3150 * @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero 3151 */ 3152 #if MALI_USE_CSF 3153 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END( \ 3154 kbdev, \ 3155 kcpu_queue, \ 3156 execute_error \ 3157 ) \ 3158 do { \ 3159 int enabled = atomic_read(&kbdev->timeline_flags); \ 3160 if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \ 3161 __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end( \ 3162 __TL_DISPATCH_STREAM(kbdev, obj), \ 3163 kcpu_queue, execute_error); \ 3164 } while (0) 3165 #else 3166 #define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END( \ 3167 kbdev, \ 3168 kcpu_queue, \ 3169 execute_error \ 3170 ) \ 3171 do { } while (0) 3172 #endif /* MALI_USE_CSF */ 3173 3174 /** 3175 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING - 3176 * CSF FW is being reloaded 3177 * 3178 * @kbdev: Kbase device 3179 * @csffw_cycle: Cycle number of a CSFFW event 3180 */ 3181 #if MALI_USE_CSF 3182 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING( \ 3183 kbdev, \ 3184 csffw_cycle \ 3185 ) \ 3186 do { \ 3187 int enabled = atomic_read(&kbdev->timeline_flags); \ 3188 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3189 __kbase_tlstream_tl_kbase_csffw_fw_reloading( \ 3190 __TL_DISPATCH_STREAM(kbdev, obj), \ 3191 csffw_cycle); \ 3192 } while (0) 3193 #else 3194 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING( \ 3195 kbdev, \ 3196 csffw_cycle \ 3197 ) \ 3198 do { } while (0) 3199 #endif /* MALI_USE_CSF */ 3200 3201 /** 3202 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING - 3203 * CSF FW is being enabled 3204 * 3205 * @kbdev: Kbase device 3206 * @csffw_cycle: Cycle number of a CSFFW event 3207 */ 3208 #if MALI_USE_CSF 3209 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING( \ 3210 kbdev, \ 3211 csffw_cycle \ 3212 ) \ 3213 do { \ 3214 int enabled = atomic_read(&kbdev->timeline_flags); \ 3215 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3216 __kbase_tlstream_tl_kbase_csffw_fw_enabling( \ 3217 __TL_DISPATCH_STREAM(kbdev, obj), \ 3218 csffw_cycle); \ 3219 } while (0) 3220 #else 3221 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING( \ 3222 kbdev, \ 3223 csffw_cycle \ 3224 ) \ 3225 do { } while (0) 3226 #endif /* MALI_USE_CSF */ 3227 3228 /** 3229 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP - 3230 * CSF FW sleep is requested 3231 * 3232 * @kbdev: Kbase device 3233 * @csffw_cycle: Cycle number of a CSFFW event 3234 */ 3235 #if MALI_USE_CSF 3236 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP( \ 3237 kbdev, \ 3238 csffw_cycle \ 3239 ) \ 3240 do { \ 3241 int enabled = atomic_read(&kbdev->timeline_flags); \ 3242 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3243 __kbase_tlstream_tl_kbase_csffw_fw_request_sleep( \ 3244 __TL_DISPATCH_STREAM(kbdev, obj), \ 3245 csffw_cycle); \ 3246 } while (0) 3247 #else 3248 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP( \ 3249 kbdev, \ 3250 csffw_cycle \ 3251 ) \ 3252 do { } while (0) 3253 #endif /* MALI_USE_CSF */ 3254 3255 /** 3256 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP - 3257 * CSF FW wake up is requested 3258 * 3259 * @kbdev: Kbase device 3260 * @csffw_cycle: Cycle number of a CSFFW event 3261 */ 3262 #if MALI_USE_CSF 3263 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP( \ 3264 kbdev, \ 3265 csffw_cycle \ 3266 ) \ 3267 do { \ 3268 int enabled = atomic_read(&kbdev->timeline_flags); \ 3269 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3270 __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup( \ 3271 __TL_DISPATCH_STREAM(kbdev, obj), \ 3272 csffw_cycle); \ 3273 } while (0) 3274 #else 3275 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP( \ 3276 kbdev, \ 3277 csffw_cycle \ 3278 ) \ 3279 do { } while (0) 3280 #endif /* MALI_USE_CSF */ 3281 3282 /** 3283 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT - 3284 * CSF FW halt is requested 3285 * 3286 * @kbdev: Kbase device 3287 * @csffw_cycle: Cycle number of a CSFFW event 3288 */ 3289 #if MALI_USE_CSF 3290 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT( \ 3291 kbdev, \ 3292 csffw_cycle \ 3293 ) \ 3294 do { \ 3295 int enabled = atomic_read(&kbdev->timeline_flags); \ 3296 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3297 __kbase_tlstream_tl_kbase_csffw_fw_request_halt( \ 3298 __TL_DISPATCH_STREAM(kbdev, obj), \ 3299 csffw_cycle); \ 3300 } while (0) 3301 #else 3302 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT( \ 3303 kbdev, \ 3304 csffw_cycle \ 3305 ) \ 3306 do { } while (0) 3307 #endif /* MALI_USE_CSF */ 3308 3309 /** 3310 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING - 3311 * CSF FW is being disabled 3312 * 3313 * @kbdev: Kbase device 3314 * @csffw_cycle: Cycle number of a CSFFW event 3315 */ 3316 #if MALI_USE_CSF 3317 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING( \ 3318 kbdev, \ 3319 csffw_cycle \ 3320 ) \ 3321 do { \ 3322 int enabled = atomic_read(&kbdev->timeline_flags); \ 3323 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3324 __kbase_tlstream_tl_kbase_csffw_fw_disabling( \ 3325 __TL_DISPATCH_STREAM(kbdev, obj), \ 3326 csffw_cycle); \ 3327 } while (0) 3328 #else 3329 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING( \ 3330 kbdev, \ 3331 csffw_cycle \ 3332 ) \ 3333 do { } while (0) 3334 #endif /* MALI_USE_CSF */ 3335 3336 /** 3337 * KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF - 3338 * CSF FW is off 3339 * 3340 * @kbdev: Kbase device 3341 * @csffw_cycle: Cycle number of a CSFFW event 3342 */ 3343 #if MALI_USE_CSF 3344 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF( \ 3345 kbdev, \ 3346 csffw_cycle \ 3347 ) \ 3348 do { \ 3349 int enabled = atomic_read(&kbdev->timeline_flags); \ 3350 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3351 __kbase_tlstream_tl_kbase_csffw_fw_off( \ 3352 __TL_DISPATCH_STREAM(kbdev, obj), \ 3353 csffw_cycle); \ 3354 } while (0) 3355 #else 3356 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF( \ 3357 kbdev, \ 3358 csffw_cycle \ 3359 ) \ 3360 do { } while (0) 3361 #endif /* MALI_USE_CSF */ 3362 3363 /** 3364 * KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW - 3365 * An overflow has happened with the CSFFW Timeline stream 3366 * 3367 * @kbdev: Kbase device 3368 * @csffw_timestamp: Timestamp of a CSFFW event 3369 * @csffw_cycle: Cycle number of a CSFFW event 3370 */ 3371 #if MALI_USE_CSF 3372 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW( \ 3373 kbdev, \ 3374 csffw_timestamp, \ 3375 csffw_cycle \ 3376 ) \ 3377 do { \ 3378 int enabled = atomic_read(&kbdev->timeline_flags); \ 3379 if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \ 3380 __kbase_tlstream_tl_kbase_csffw_tlstream_overflow( \ 3381 __TL_DISPATCH_STREAM(kbdev, obj), \ 3382 csffw_timestamp, csffw_cycle); \ 3383 } while (0) 3384 #else 3385 #define KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW( \ 3386 kbdev, \ 3387 csffw_timestamp, \ 3388 csffw_cycle \ 3389 ) \ 3390 do { } while (0) 3391 #endif /* MALI_USE_CSF */ 3392 3393 /** 3394 * KBASE_TLSTREAM_TL_JS_SCHED_START - 3395 * Scheduling starts 3396 * 3397 * @kbdev: Kbase device 3398 * @dummy: dummy argument 3399 */ 3400 #define KBASE_TLSTREAM_TL_JS_SCHED_START( \ 3401 kbdev, \ 3402 dummy \ 3403 ) \ 3404 do { \ 3405 int enabled = atomic_read(&kbdev->timeline_flags); \ 3406 if (enabled & TLSTREAM_ENABLED) \ 3407 __kbase_tlstream_tl_js_sched_start( \ 3408 __TL_DISPATCH_STREAM(kbdev, obj), \ 3409 dummy); \ 3410 } while (0) 3411 3412 /** 3413 * KBASE_TLSTREAM_TL_JS_SCHED_END - 3414 * Scheduling ends 3415 * 3416 * @kbdev: Kbase device 3417 * @dummy: dummy argument 3418 */ 3419 #define KBASE_TLSTREAM_TL_JS_SCHED_END( \ 3420 kbdev, \ 3421 dummy \ 3422 ) \ 3423 do { \ 3424 int enabled = atomic_read(&kbdev->timeline_flags); \ 3425 if (enabled & TLSTREAM_ENABLED) \ 3426 __kbase_tlstream_tl_js_sched_end( \ 3427 __TL_DISPATCH_STREAM(kbdev, obj), \ 3428 dummy); \ 3429 } while (0) 3430 3431 /** 3432 * KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START - 3433 * Submitting an atom starts 3434 * 3435 * @kbdev: Kbase device 3436 * @atom: Atom identifier 3437 */ 3438 #define KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START( \ 3439 kbdev, \ 3440 atom \ 3441 ) \ 3442 do { \ 3443 int enabled = atomic_read(&kbdev->timeline_flags); \ 3444 if (enabled & TLSTREAM_ENABLED) \ 3445 __kbase_tlstream_tl_jd_submit_atom_start( \ 3446 __TL_DISPATCH_STREAM(kbdev, obj), \ 3447 atom); \ 3448 } while (0) 3449 3450 /** 3451 * KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END - 3452 * Submitting an atom ends 3453 * 3454 * @kbdev: Kbase device 3455 * @atom: Atom identifier 3456 */ 3457 #define KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END( \ 3458 kbdev, \ 3459 atom \ 3460 ) \ 3461 do { \ 3462 int enabled = atomic_read(&kbdev->timeline_flags); \ 3463 if (enabled & TLSTREAM_ENABLED) \ 3464 __kbase_tlstream_tl_jd_submit_atom_end( \ 3465 __TL_DISPATCH_STREAM(kbdev, obj), \ 3466 atom); \ 3467 } while (0) 3468 3469 /** 3470 * KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START - 3471 * Within function jd_done_nolock 3472 * 3473 * @kbdev: Kbase device 3474 * @atom: Atom identifier 3475 */ 3476 #define KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START( \ 3477 kbdev, \ 3478 atom \ 3479 ) \ 3480 do { \ 3481 int enabled = atomic_read(&kbdev->timeline_flags); \ 3482 if (enabled & TLSTREAM_ENABLED) \ 3483 __kbase_tlstream_tl_jd_done_no_lock_start( \ 3484 __TL_DISPATCH_STREAM(kbdev, obj), \ 3485 atom); \ 3486 } while (0) 3487 3488 /** 3489 * KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END - 3490 * Within function jd_done_nolock - end 3491 * 3492 * @kbdev: Kbase device 3493 * @atom: Atom identifier 3494 */ 3495 #define KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END( \ 3496 kbdev, \ 3497 atom \ 3498 ) \ 3499 do { \ 3500 int enabled = atomic_read(&kbdev->timeline_flags); \ 3501 if (enabled & TLSTREAM_ENABLED) \ 3502 __kbase_tlstream_tl_jd_done_no_lock_end( \ 3503 __TL_DISPATCH_STREAM(kbdev, obj), \ 3504 atom); \ 3505 } while (0) 3506 3507 /** 3508 * KBASE_TLSTREAM_TL_JD_DONE_START - 3509 * Start of kbase_jd_done 3510 * 3511 * @kbdev: Kbase device 3512 * @atom: Atom identifier 3513 */ 3514 #define KBASE_TLSTREAM_TL_JD_DONE_START( \ 3515 kbdev, \ 3516 atom \ 3517 ) \ 3518 do { \ 3519 int enabled = atomic_read(&kbdev->timeline_flags); \ 3520 if (enabled & TLSTREAM_ENABLED) \ 3521 __kbase_tlstream_tl_jd_done_start( \ 3522 __TL_DISPATCH_STREAM(kbdev, obj), \ 3523 atom); \ 3524 } while (0) 3525 3526 /** 3527 * KBASE_TLSTREAM_TL_JD_DONE_END - 3528 * End of kbase_jd_done 3529 * 3530 * @kbdev: Kbase device 3531 * @atom: Atom identifier 3532 */ 3533 #define KBASE_TLSTREAM_TL_JD_DONE_END( \ 3534 kbdev, \ 3535 atom \ 3536 ) \ 3537 do { \ 3538 int enabled = atomic_read(&kbdev->timeline_flags); \ 3539 if (enabled & TLSTREAM_ENABLED) \ 3540 __kbase_tlstream_tl_jd_done_end( \ 3541 __TL_DISPATCH_STREAM(kbdev, obj), \ 3542 atom); \ 3543 } while (0) 3544 3545 /** 3546 * KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE - 3547 * Atom marked complete 3548 * 3549 * @kbdev: Kbase device 3550 * @atom: Atom identifier 3551 */ 3552 #define KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE( \ 3553 kbdev, \ 3554 atom \ 3555 ) \ 3556 do { \ 3557 int enabled = atomic_read(&kbdev->timeline_flags); \ 3558 if (enabled & TLSTREAM_ENABLED) \ 3559 __kbase_tlstream_tl_jd_atom_complete( \ 3560 __TL_DISPATCH_STREAM(kbdev, obj), \ 3561 atom); \ 3562 } while (0) 3563 3564 /** 3565 * KBASE_TLSTREAM_TL_RUN_ATOM_START - 3566 * Running of atom starts 3567 * 3568 * @kbdev: Kbase device 3569 * @atom: Atom identifier 3570 * @atom_nr: Sequential number of an atom 3571 */ 3572 #define KBASE_TLSTREAM_TL_RUN_ATOM_START( \ 3573 kbdev, \ 3574 atom, \ 3575 atom_nr \ 3576 ) \ 3577 do { \ 3578 int enabled = atomic_read(&kbdev->timeline_flags); \ 3579 if (enabled & TLSTREAM_ENABLED) \ 3580 __kbase_tlstream_tl_run_atom_start( \ 3581 __TL_DISPATCH_STREAM(kbdev, obj), \ 3582 atom, atom_nr); \ 3583 } while (0) 3584 3585 /** 3586 * KBASE_TLSTREAM_TL_RUN_ATOM_END - 3587 * Running of atom ends 3588 * 3589 * @kbdev: Kbase device 3590 * @atom: Atom identifier 3591 * @atom_nr: Sequential number of an atom 3592 */ 3593 #define KBASE_TLSTREAM_TL_RUN_ATOM_END( \ 3594 kbdev, \ 3595 atom, \ 3596 atom_nr \ 3597 ) \ 3598 do { \ 3599 int enabled = atomic_read(&kbdev->timeline_flags); \ 3600 if (enabled & TLSTREAM_ENABLED) \ 3601 __kbase_tlstream_tl_run_atom_end( \ 3602 __TL_DISPATCH_STREAM(kbdev, obj), \ 3603 atom, atom_nr); \ 3604 } while (0) 3605 3606 3607 /* Gator tracepoints are hooked into TLSTREAM interface. 3608 * When the following tracepoints are called, corresponding 3609 * Gator tracepoint will be called as well. 3610 */ 3611 3612 #if defined(CONFIG_MALI_BIFROST_GATOR_SUPPORT) 3613 /* `event` is one of TL_JS_EVENT values here. 3614 * The values of TL_JS_EVENT are guaranteed to match 3615 * with corresponding GATOR_JOB_SLOT values. 3616 */ 3617 #undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT 3618 #define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \ 3619 context, slot_nr, atom_nr, event) \ 3620 do { \ 3621 int enabled = atomic_read(&kbdev->timeline_flags); \ 3622 kbase_trace_mali_job_slots_event(kbdev->id, \ 3623 GATOR_MAKE_EVENT(event, slot_nr), \ 3624 context, (u8) atom_nr); \ 3625 if (enabled & TLSTREAM_ENABLED) \ 3626 __kbase_tlstream_aux_event_job_slot( \ 3627 __TL_DISPATCH_STREAM(kbdev, aux), \ 3628 context, slot_nr, atom_nr, event); \ 3629 } while (0) 3630 3631 #undef KBASE_TLSTREAM_AUX_PM_STATE 3632 #define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \ 3633 do { \ 3634 int enabled = atomic_read(&kbdev->timeline_flags); \ 3635 kbase_trace_mali_pm_status(kbdev->id, \ 3636 core_type, state); \ 3637 if (enabled & TLSTREAM_ENABLED) \ 3638 __kbase_tlstream_aux_pm_state( \ 3639 __TL_DISPATCH_STREAM(kbdev, aux), \ 3640 core_type, state); \ 3641 } while (0) 3642 3643 #undef KBASE_TLSTREAM_AUX_PAGEFAULT 3644 #define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \ 3645 ctx_nr, as_nr, page_cnt_change) \ 3646 do { \ 3647 int enabled = atomic_read(&kbdev->timeline_flags); \ 3648 kbase_trace_mali_page_fault_insert_pages(kbdev->id, \ 3649 as_nr, \ 3650 page_cnt_change); \ 3651 if (enabled & TLSTREAM_ENABLED) \ 3652 __kbase_tlstream_aux_pagefault( \ 3653 __TL_DISPATCH_STREAM(kbdev, aux), \ 3654 ctx_nr, as_nr, page_cnt_change); \ 3655 } while (0) 3656 3657 /* kbase_trace_mali_total_alloc_pages_change is handled differently here. 3658 * We stream the total amount of pages allocated for `kbdev` rather 3659 * than `page_count`, which is per-context. 3660 */ 3661 #undef KBASE_TLSTREAM_AUX_PAGESALLOC 3662 #define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \ 3663 do { \ 3664 int enabled = atomic_read(&kbdev->timeline_flags); \ 3665 u32 global_pages_count = \ 3666 atomic_read(&kbdev->memdev.used_pages); \ 3667 \ 3668 kbase_trace_mali_total_alloc_pages_change(kbdev->id, \ 3669 global_pages_count); \ 3670 if (enabled & TLSTREAM_ENABLED) \ 3671 __kbase_tlstream_aux_pagesalloc( \ 3672 __TL_DISPATCH_STREAM(kbdev, aux), \ 3673 ctx_nr, page_cnt); \ 3674 } while (0) 3675 #endif /* CONFIG_MALI_BIFROST_GATOR_SUPPORT */ 3676 3677 /* clang-format on */ 3678 #endif 3679