1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note 2 * 3 * Copyright 2016-2020 HabanaLabs, Ltd. 4 * All Rights Reserved. 5 * 6 */ 7 8 #ifndef HABANALABS_H_ 9 #define HABANALABS_H_ 10 11 #include <linux/types.h> 12 #include <linux/ioctl.h> 13 14 /* 15 * Defines that are asic-specific but constitutes as ABI between kernel driver 16 * and userspace 17 */ 18 #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ 19 #define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */ 20 21 #define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 48 22 #define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 24 23 /* 24 * Goya queue Numbering 25 * 26 * The external queues (PCI DMA channels) MUST be before the internal queues 27 * and each group (PCI DMA channels and internal) must be contiguous inside 28 * itself but there can be a gap between the two groups (although not 29 * recommended) 30 */ 31 32 enum goya_queue_id { 33 GOYA_QUEUE_ID_DMA_0 = 0, 34 GOYA_QUEUE_ID_DMA_1 = 1, 35 GOYA_QUEUE_ID_DMA_2 = 2, 36 GOYA_QUEUE_ID_DMA_3 = 3, 37 GOYA_QUEUE_ID_DMA_4 = 4, 38 GOYA_QUEUE_ID_CPU_PQ = 5, 39 GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */ 40 GOYA_QUEUE_ID_TPC0 = 7, 41 GOYA_QUEUE_ID_TPC1 = 8, 42 GOYA_QUEUE_ID_TPC2 = 9, 43 GOYA_QUEUE_ID_TPC3 = 10, 44 GOYA_QUEUE_ID_TPC4 = 11, 45 GOYA_QUEUE_ID_TPC5 = 12, 46 GOYA_QUEUE_ID_TPC6 = 13, 47 GOYA_QUEUE_ID_TPC7 = 14, 48 GOYA_QUEUE_ID_SIZE 49 }; 50 51 /* 52 * Gaudi queue Numbering 53 * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*. 54 * Except one CPU queue, all the rest are internal queues. 55 */ 56 57 enum gaudi_queue_id { 58 GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */ 59 GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */ 60 GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */ 61 GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */ 62 GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */ 63 GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */ 64 GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */ 65 GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */ 66 GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */ 67 GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */ 68 GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */ 69 GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */ 70 GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */ 71 GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */ 72 GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */ 73 GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */ 74 GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */ 75 GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */ 76 GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */ 77 GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */ 78 GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */ 79 GAUDI_QUEUE_ID_DMA_5_0 = 21, /* external */ 80 GAUDI_QUEUE_ID_DMA_5_1 = 22, /* external */ 81 GAUDI_QUEUE_ID_DMA_5_2 = 23, /* external */ 82 GAUDI_QUEUE_ID_DMA_5_3 = 24, /* external */ 83 GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */ 84 GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */ 85 GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */ 86 GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */ 87 GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */ 88 GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */ 89 GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */ 90 GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */ 91 GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */ 92 GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */ 93 GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */ 94 GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */ 95 GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */ 96 GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */ 97 GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */ 98 GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */ 99 GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */ 100 GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */ 101 GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */ 102 GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */ 103 GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */ 104 GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */ 105 GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */ 106 GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */ 107 GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */ 108 GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */ 109 GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */ 110 GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */ 111 GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */ 112 GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */ 113 GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */ 114 GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */ 115 GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */ 116 GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */ 117 GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */ 118 GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */ 119 GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */ 120 GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */ 121 GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */ 122 GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */ 123 GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */ 124 GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */ 125 GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */ 126 GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */ 127 GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */ 128 GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */ 129 GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */ 130 GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */ 131 GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */ 132 GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */ 133 GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */ 134 GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */ 135 GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */ 136 GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */ 137 GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */ 138 GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */ 139 GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */ 140 GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */ 141 GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */ 142 GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */ 143 GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */ 144 GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */ 145 GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */ 146 GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */ 147 GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */ 148 GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */ 149 GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */ 150 GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */ 151 GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */ 152 GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */ 153 GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */ 154 GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */ 155 GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */ 156 GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */ 157 GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */ 158 GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */ 159 GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */ 160 GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */ 161 GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */ 162 GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */ 163 GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */ 164 GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */ 165 GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */ 166 GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */ 167 GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */ 168 GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */ 169 GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */ 170 GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */ 171 GAUDI_QUEUE_ID_SIZE 172 }; 173 174 /* 175 * Engine Numbering 176 * 177 * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle' 178 */ 179 180 enum goya_engine_id { 181 GOYA_ENGINE_ID_DMA_0 = 0, 182 GOYA_ENGINE_ID_DMA_1, 183 GOYA_ENGINE_ID_DMA_2, 184 GOYA_ENGINE_ID_DMA_3, 185 GOYA_ENGINE_ID_DMA_4, 186 GOYA_ENGINE_ID_MME_0, 187 GOYA_ENGINE_ID_TPC_0, 188 GOYA_ENGINE_ID_TPC_1, 189 GOYA_ENGINE_ID_TPC_2, 190 GOYA_ENGINE_ID_TPC_3, 191 GOYA_ENGINE_ID_TPC_4, 192 GOYA_ENGINE_ID_TPC_5, 193 GOYA_ENGINE_ID_TPC_6, 194 GOYA_ENGINE_ID_TPC_7, 195 GOYA_ENGINE_ID_SIZE 196 }; 197 198 enum gaudi_engine_id { 199 GAUDI_ENGINE_ID_DMA_0 = 0, 200 GAUDI_ENGINE_ID_DMA_1, 201 GAUDI_ENGINE_ID_DMA_2, 202 GAUDI_ENGINE_ID_DMA_3, 203 GAUDI_ENGINE_ID_DMA_4, 204 GAUDI_ENGINE_ID_DMA_5, 205 GAUDI_ENGINE_ID_DMA_6, 206 GAUDI_ENGINE_ID_DMA_7, 207 GAUDI_ENGINE_ID_MME_0, 208 GAUDI_ENGINE_ID_MME_1, 209 GAUDI_ENGINE_ID_MME_2, 210 GAUDI_ENGINE_ID_MME_3, 211 GAUDI_ENGINE_ID_TPC_0, 212 GAUDI_ENGINE_ID_TPC_1, 213 GAUDI_ENGINE_ID_TPC_2, 214 GAUDI_ENGINE_ID_TPC_3, 215 GAUDI_ENGINE_ID_TPC_4, 216 GAUDI_ENGINE_ID_TPC_5, 217 GAUDI_ENGINE_ID_TPC_6, 218 GAUDI_ENGINE_ID_TPC_7, 219 GAUDI_ENGINE_ID_NIC_0, 220 GAUDI_ENGINE_ID_NIC_1, 221 GAUDI_ENGINE_ID_NIC_2, 222 GAUDI_ENGINE_ID_NIC_3, 223 GAUDI_ENGINE_ID_NIC_4, 224 GAUDI_ENGINE_ID_NIC_5, 225 GAUDI_ENGINE_ID_NIC_6, 226 GAUDI_ENGINE_ID_NIC_7, 227 GAUDI_ENGINE_ID_NIC_8, 228 GAUDI_ENGINE_ID_NIC_9, 229 GAUDI_ENGINE_ID_SIZE 230 }; 231 232 enum hl_device_status { 233 HL_DEVICE_STATUS_OPERATIONAL, 234 HL_DEVICE_STATUS_IN_RESET, 235 HL_DEVICE_STATUS_MALFUNCTION 236 }; 237 238 /* Opcode for management ioctl 239 * 240 * HW_IP_INFO - Receive information about different IP blocks in the 241 * device. 242 * HL_INFO_HW_EVENTS - Receive an array describing how many times each event 243 * occurred since the last hard reset. 244 * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the 245 * specific context. This is relevant only for devices 246 * where the dram is managed by the kernel driver 247 * HL_INFO_HW_IDLE - Retrieve information about the idle status of each 248 * internal engine. 249 * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't 250 * require an open context. 251 * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device 252 * over the last period specified by the user. 253 * The period can be between 100ms to 1s, in 254 * resolution of 100ms. The return value is a 255 * percentage of the utilization rate. 256 * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each 257 * event occurred since the driver was loaded. 258 * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate 259 * of the device in MHz. The maximum clock rate is 260 * configurable via sysfs parameter 261 * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset 262 * operations performed on the device since the last 263 * time the driver was loaded. 264 * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time 265 * for synchronization. 266 * HL_INFO_CS_COUNTERS - Retrieve command submission counters 267 * HL_INFO_PCI_COUNTERS - Retrieve PCI counters 268 * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason 269 * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore 270 * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption 271 */ 272 #define HL_INFO_HW_IP_INFO 0 273 #define HL_INFO_HW_EVENTS 1 274 #define HL_INFO_DRAM_USAGE 2 275 #define HL_INFO_HW_IDLE 3 276 #define HL_INFO_DEVICE_STATUS 4 277 #define HL_INFO_DEVICE_UTILIZATION 6 278 #define HL_INFO_HW_EVENTS_AGGREGATE 7 279 #define HL_INFO_CLK_RATE 8 280 #define HL_INFO_RESET_COUNT 9 281 #define HL_INFO_TIME_SYNC 10 282 #define HL_INFO_CS_COUNTERS 11 283 #define HL_INFO_PCI_COUNTERS 12 284 #define HL_INFO_CLK_THROTTLE_REASON 13 285 #define HL_INFO_SYNC_MANAGER 14 286 #define HL_INFO_TOTAL_ENERGY 15 287 288 #define HL_INFO_VERSION_MAX_LEN 128 289 #define HL_INFO_CARD_NAME_MAX_LEN 16 290 291 struct hl_info_hw_ip_info { 292 __u64 sram_base_address; 293 __u64 dram_base_address; 294 __u64 dram_size; 295 __u32 sram_size; 296 __u32 num_of_events; 297 __u32 device_id; /* PCI Device ID */ 298 __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */ 299 __u32 reserved[2]; 300 __u32 cpld_version; 301 __u32 psoc_pci_pll_nr; 302 __u32 psoc_pci_pll_nf; 303 __u32 psoc_pci_pll_od; 304 __u32 psoc_pci_pll_div_factor; 305 __u8 tpc_enabled_mask; 306 __u8 dram_enabled; 307 __u8 pad[2]; 308 __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; 309 __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; 310 }; 311 312 struct hl_info_dram_usage { 313 __u64 dram_free_mem; 314 __u64 ctx_dram_mem; 315 }; 316 317 struct hl_info_hw_idle { 318 __u32 is_idle; 319 /* 320 * Bitmask of busy engines. 321 * Bits definition is according to `enum <chip>_enging_id'. 322 */ 323 __u32 busy_engines_mask; 324 325 /* 326 * Extended Bitmask of busy engines. 327 * Bits definition is according to `enum <chip>_enging_id'. 328 */ 329 __u64 busy_engines_mask_ext; 330 }; 331 332 struct hl_info_device_status { 333 __u32 status; 334 __u32 pad; 335 }; 336 337 struct hl_info_device_utilization { 338 __u32 utilization; 339 __u32 pad; 340 }; 341 342 struct hl_info_clk_rate { 343 __u32 cur_clk_rate_mhz; 344 __u32 max_clk_rate_mhz; 345 }; 346 347 struct hl_info_reset_count { 348 __u32 hard_reset_cnt; 349 __u32 soft_reset_cnt; 350 }; 351 352 struct hl_info_time_sync { 353 __u64 device_time; 354 __u64 host_time; 355 }; 356 357 /** 358 * struct hl_info_pci_counters - pci counters 359 * @rx_throughput: PCI rx throughput KBps 360 * @tx_throughput: PCI tx throughput KBps 361 * @replay_cnt: PCI replay counter 362 */ 363 struct hl_info_pci_counters { 364 __u64 rx_throughput; 365 __u64 tx_throughput; 366 __u64 replay_cnt; 367 }; 368 369 #define HL_CLK_THROTTLE_POWER 0x1 370 #define HL_CLK_THROTTLE_THERMAL 0x2 371 372 /** 373 * struct hl_info_clk_throttle - clock throttling reason 374 * @clk_throttling_reason: each bit represents a clk throttling reason 375 */ 376 struct hl_info_clk_throttle { 377 __u32 clk_throttling_reason; 378 }; 379 380 /** 381 * struct hl_info_energy - device energy information 382 * @total_energy_consumption: total device energy consumption 383 */ 384 struct hl_info_energy { 385 __u64 total_energy_consumption; 386 }; 387 388 /** 389 * struct hl_info_sync_manager - sync manager information 390 * @first_available_sync_object: first available sob 391 * @first_available_monitor: first available monitor 392 */ 393 struct hl_info_sync_manager { 394 __u32 first_available_sync_object; 395 __u32 first_available_monitor; 396 }; 397 398 /** 399 * struct hl_info_cs_counters - command submission counters 400 * @out_of_mem_drop_cnt: dropped due to memory allocation issue 401 * @parsing_drop_cnt: dropped due to error in packet parsing 402 * @queue_full_drop_cnt: dropped due to queue full 403 * @device_in_reset_drop_cnt: dropped due to device in reset 404 * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight 405 */ 406 struct hl_cs_counters { 407 __u64 out_of_mem_drop_cnt; 408 __u64 parsing_drop_cnt; 409 __u64 queue_full_drop_cnt; 410 __u64 device_in_reset_drop_cnt; 411 __u64 max_cs_in_flight_drop_cnt; 412 }; 413 414 struct hl_info_cs_counters { 415 struct hl_cs_counters cs_counters; 416 struct hl_cs_counters ctx_cs_counters; 417 }; 418 419 enum gaudi_dcores { 420 HL_GAUDI_WS_DCORE, 421 HL_GAUDI_WN_DCORE, 422 HL_GAUDI_EN_DCORE, 423 HL_GAUDI_ES_DCORE 424 }; 425 426 struct hl_info_args { 427 /* Location of relevant struct in userspace */ 428 __u64 return_pointer; 429 /* 430 * The size of the return value. Just like "size" in "snprintf", 431 * it limits how many bytes the kernel can write 432 * 433 * For hw_events array, the size should be 434 * hl_info_hw_ip_info.num_of_events * sizeof(__u32) 435 */ 436 __u32 return_size; 437 438 /* HL_INFO_* */ 439 __u32 op; 440 441 union { 442 /* Dcore id for which the information is relevant. 443 * For Gaudi refer to 'enum gaudi_dcores' 444 */ 445 __u32 dcore_id; 446 /* Context ID - Currently not in use */ 447 __u32 ctx_id; 448 /* Period value for utilization rate (100ms - 1000ms, in 100ms 449 * resolution. 450 */ 451 __u32 period_ms; 452 }; 453 454 __u32 pad; 455 }; 456 457 /* Opcode to create a new command buffer */ 458 #define HL_CB_OP_CREATE 0 459 /* Opcode to destroy previously created command buffer */ 460 #define HL_CB_OP_DESTROY 1 461 462 /* 2MB minus 32 bytes for 2xMSG_PROT */ 463 #define HL_MAX_CB_SIZE (0x200000 - 32) 464 465 /* Indicates whether the command buffer should be mapped to the device's MMU */ 466 #define HL_CB_FLAGS_MAP 0x1 467 468 struct hl_cb_in { 469 /* Handle of CB or 0 if we want to create one */ 470 __u64 cb_handle; 471 /* HL_CB_OP_* */ 472 __u32 op; 473 /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that 474 * will be allocated, regardless of this parameter's value, is PAGE_SIZE 475 */ 476 __u32 cb_size; 477 /* Context ID - Currently not in use */ 478 __u32 ctx_id; 479 /* HL_CB_FLAGS_* */ 480 __u32 flags; 481 }; 482 483 struct hl_cb_out { 484 /* Handle of CB */ 485 __u64 cb_handle; 486 }; 487 488 union hl_cb_args { 489 struct hl_cb_in in; 490 struct hl_cb_out out; 491 }; 492 493 /* 494 * This structure size must always be fixed to 64-bytes for backward 495 * compatibility 496 */ 497 struct hl_cs_chunk { 498 union { 499 /* For external queue, this represents a Handle of CB on the 500 * Host. 501 * For internal queue in Goya, this represents an SRAM or 502 * a DRAM address of the internal CB. In Gaudi, this might also 503 * represent a mapped host address of the CB. 504 * 505 * A mapped host address is in the device address space, after 506 * a host address was mapped by the device MMU. 507 */ 508 __u64 cb_handle; 509 510 /* Relevant only when HL_CS_FLAGS_WAIT is set. 511 * This holds address of array of u64 values that contain 512 * signal CS sequence numbers. The wait described by this job 513 * will listen on all those signals (wait event per signal) 514 */ 515 __u64 signal_seq_arr; 516 }; 517 518 /* Index of queue to put the CB on */ 519 __u32 queue_index; 520 521 union { 522 /* 523 * Size of command buffer with valid packets 524 * Can be smaller then actual CB size 525 */ 526 __u32 cb_size; 527 528 /* Relevant only when HL_CS_FLAGS_WAIT is set. 529 * Number of entries in signal_seq_arr 530 */ 531 __u32 num_signal_seq_arr; 532 }; 533 534 /* HL_CS_CHUNK_FLAGS_* */ 535 __u32 cs_chunk_flags; 536 537 /* Align structure to 64 bytes */ 538 __u32 pad[11]; 539 }; 540 541 /* SIGNAL and WAIT flags are mutually exclusive */ 542 #define HL_CS_FLAGS_FORCE_RESTORE 0x1 543 #define HL_CS_FLAGS_SIGNAL 0x2 544 #define HL_CS_FLAGS_WAIT 0x4 545 546 #define HL_CS_STATUS_SUCCESS 0 547 548 #define HL_MAX_JOBS_PER_CS 512 549 550 struct hl_cs_in { 551 552 /* this holds address of array of hl_cs_chunk for restore phase */ 553 __u64 chunks_restore; 554 555 /* holds address of array of hl_cs_chunk for execution phase */ 556 __u64 chunks_execute; 557 558 /* this holds address of array of hl_cs_chunk for store phase - 559 * Currently not in use 560 */ 561 __u64 chunks_store; 562 563 /* Number of chunks in restore phase array. Maximum number is 564 * HL_MAX_JOBS_PER_CS 565 */ 566 __u32 num_chunks_restore; 567 568 /* Number of chunks in execution array. Maximum number is 569 * HL_MAX_JOBS_PER_CS 570 */ 571 __u32 num_chunks_execute; 572 573 /* Number of chunks in restore phase array - Currently not in use */ 574 __u32 num_chunks_store; 575 576 /* HL_CS_FLAGS_* */ 577 __u32 cs_flags; 578 579 /* Context ID - Currently not in use */ 580 __u32 ctx_id; 581 }; 582 583 struct hl_cs_out { 584 /* 585 * seq holds the sequence number of the CS to pass to wait ioctl. All 586 * values are valid except for 0 and ULLONG_MAX 587 */ 588 __u64 seq; 589 /* HL_CS_STATUS_* */ 590 __u32 status; 591 __u32 pad; 592 }; 593 594 union hl_cs_args { 595 struct hl_cs_in in; 596 struct hl_cs_out out; 597 }; 598 599 struct hl_wait_cs_in { 600 /* Command submission sequence number */ 601 __u64 seq; 602 /* Absolute timeout to wait in microseconds */ 603 __u64 timeout_us; 604 /* Context ID - Currently not in use */ 605 __u32 ctx_id; 606 __u32 pad; 607 }; 608 609 #define HL_WAIT_CS_STATUS_COMPLETED 0 610 #define HL_WAIT_CS_STATUS_BUSY 1 611 #define HL_WAIT_CS_STATUS_TIMEDOUT 2 612 #define HL_WAIT_CS_STATUS_ABORTED 3 613 #define HL_WAIT_CS_STATUS_INTERRUPTED 4 614 615 struct hl_wait_cs_out { 616 /* HL_WAIT_CS_STATUS_* */ 617 __u32 status; 618 __u32 pad; 619 }; 620 621 union hl_wait_cs_args { 622 struct hl_wait_cs_in in; 623 struct hl_wait_cs_out out; 624 }; 625 626 /* Opcode to allocate device memory */ 627 #define HL_MEM_OP_ALLOC 0 628 /* Opcode to free previously allocated device memory */ 629 #define HL_MEM_OP_FREE 1 630 /* Opcode to map host and device memory */ 631 #define HL_MEM_OP_MAP 2 632 /* Opcode to unmap previously mapped host and device memory */ 633 #define HL_MEM_OP_UNMAP 3 634 635 /* Memory flags */ 636 #define HL_MEM_CONTIGUOUS 0x1 637 #define HL_MEM_SHARED 0x2 638 #define HL_MEM_USERPTR 0x4 639 640 struct hl_mem_in { 641 union { 642 /* HL_MEM_OP_ALLOC- allocate device memory */ 643 struct { 644 /* Size to alloc */ 645 __u64 mem_size; 646 } alloc; 647 648 /* HL_MEM_OP_FREE - free device memory */ 649 struct { 650 /* Handle returned from HL_MEM_OP_ALLOC */ 651 __u64 handle; 652 } free; 653 654 /* HL_MEM_OP_MAP - map device memory */ 655 struct { 656 /* 657 * Requested virtual address of mapped memory. 658 * The driver will try to map the requested region to 659 * this hint address, as long as the address is valid 660 * and not already mapped. The user should check the 661 * returned address of the IOCTL to make sure he got 662 * the hint address. Passing 0 here means that the 663 * driver will choose the address itself. 664 */ 665 __u64 hint_addr; 666 /* Handle returned from HL_MEM_OP_ALLOC */ 667 __u64 handle; 668 } map_device; 669 670 /* HL_MEM_OP_MAP - map host memory */ 671 struct { 672 /* Address of allocated host memory */ 673 __u64 host_virt_addr; 674 /* 675 * Requested virtual address of mapped memory. 676 * The driver will try to map the requested region to 677 * this hint address, as long as the address is valid 678 * and not already mapped. The user should check the 679 * returned address of the IOCTL to make sure he got 680 * the hint address. Passing 0 here means that the 681 * driver will choose the address itself. 682 */ 683 __u64 hint_addr; 684 /* Size of allocated host memory */ 685 __u64 mem_size; 686 } map_host; 687 688 /* HL_MEM_OP_UNMAP - unmap host memory */ 689 struct { 690 /* Virtual address returned from HL_MEM_OP_MAP */ 691 __u64 device_virt_addr; 692 } unmap; 693 }; 694 695 /* HL_MEM_OP_* */ 696 __u32 op; 697 /* HL_MEM_* flags */ 698 __u32 flags; 699 /* Context ID - Currently not in use */ 700 __u32 ctx_id; 701 __u32 pad; 702 }; 703 704 struct hl_mem_out { 705 union { 706 /* 707 * Used for HL_MEM_OP_MAP as the virtual address that was 708 * assigned in the device VA space. 709 * A value of 0 means the requested operation failed. 710 */ 711 __u64 device_virt_addr; 712 713 /* 714 * Used for HL_MEM_OP_ALLOC. This is the assigned 715 * handle for the allocated memory 716 */ 717 __u64 handle; 718 }; 719 }; 720 721 union hl_mem_args { 722 struct hl_mem_in in; 723 struct hl_mem_out out; 724 }; 725 726 #define HL_DEBUG_MAX_AUX_VALUES 10 727 728 struct hl_debug_params_etr { 729 /* Address in memory to allocate buffer */ 730 __u64 buffer_address; 731 732 /* Size of buffer to allocate */ 733 __u64 buffer_size; 734 735 /* Sink operation mode: SW fifo, HW fifo, Circular buffer */ 736 __u32 sink_mode; 737 __u32 pad; 738 }; 739 740 struct hl_debug_params_etf { 741 /* Address in memory to allocate buffer */ 742 __u64 buffer_address; 743 744 /* Size of buffer to allocate */ 745 __u64 buffer_size; 746 747 /* Sink operation mode: SW fifo, HW fifo, Circular buffer */ 748 __u32 sink_mode; 749 __u32 pad; 750 }; 751 752 struct hl_debug_params_stm { 753 /* Two bit masks for HW event and Stimulus Port */ 754 __u64 he_mask; 755 __u64 sp_mask; 756 757 /* Trace source ID */ 758 __u32 id; 759 760 /* Frequency for the timestamp register */ 761 __u32 frequency; 762 }; 763 764 struct hl_debug_params_bmon { 765 /* Two address ranges that the user can request to filter */ 766 __u64 start_addr0; 767 __u64 addr_mask0; 768 769 __u64 start_addr1; 770 __u64 addr_mask1; 771 772 /* Capture window configuration */ 773 __u32 bw_win; 774 __u32 win_capture; 775 776 /* Trace source ID */ 777 __u32 id; 778 __u32 pad; 779 }; 780 781 struct hl_debug_params_spmu { 782 /* Event types selection */ 783 __u64 event_types[HL_DEBUG_MAX_AUX_VALUES]; 784 785 /* Number of event types selection */ 786 __u32 event_types_num; 787 __u32 pad; 788 }; 789 790 /* Opcode for ETR component */ 791 #define HL_DEBUG_OP_ETR 0 792 /* Opcode for ETF component */ 793 #define HL_DEBUG_OP_ETF 1 794 /* Opcode for STM component */ 795 #define HL_DEBUG_OP_STM 2 796 /* Opcode for FUNNEL component */ 797 #define HL_DEBUG_OP_FUNNEL 3 798 /* Opcode for BMON component */ 799 #define HL_DEBUG_OP_BMON 4 800 /* Opcode for SPMU component */ 801 #define HL_DEBUG_OP_SPMU 5 802 /* Opcode for timestamp (deprecated) */ 803 #define HL_DEBUG_OP_TIMESTAMP 6 804 /* Opcode for setting the device into or out of debug mode. The enable 805 * variable should be 1 for enabling debug mode and 0 for disabling it 806 */ 807 #define HL_DEBUG_OP_SET_MODE 7 808 809 struct hl_debug_args { 810 /* 811 * Pointer to user input structure. 812 * This field is relevant to specific opcodes. 813 */ 814 __u64 input_ptr; 815 /* Pointer to user output structure */ 816 __u64 output_ptr; 817 /* Size of user input structure */ 818 __u32 input_size; 819 /* Size of user output structure */ 820 __u32 output_size; 821 /* HL_DEBUG_OP_* */ 822 __u32 op; 823 /* 824 * Register index in the component, taken from the debug_regs_index enum 825 * in the various ASIC header files 826 */ 827 __u32 reg_idx; 828 /* Enable/disable */ 829 __u32 enable; 830 /* Context ID - Currently not in use */ 831 __u32 ctx_id; 832 }; 833 834 /* 835 * Various information operations such as: 836 * - H/W IP information 837 * - Current dram usage 838 * 839 * The user calls this IOCTL with an opcode that describes the required 840 * information. The user should supply a pointer to a user-allocated memory 841 * chunk, which will be filled by the driver with the requested information. 842 * 843 * The user supplies the maximum amount of size to copy into the user's memory, 844 * in order to prevent data corruption in case of differences between the 845 * definitions of structures in kernel and userspace, e.g. in case of old 846 * userspace and new kernel driver 847 */ 848 #define HL_IOCTL_INFO \ 849 _IOWR('H', 0x01, struct hl_info_args) 850 851 /* 852 * Command Buffer 853 * - Request a Command Buffer 854 * - Destroy a Command Buffer 855 * 856 * The command buffers are memory blocks that reside in DMA-able address 857 * space and are physically contiguous so they can be accessed by the device 858 * directly. They are allocated using the coherent DMA API. 859 * 860 * When creating a new CB, the IOCTL returns a handle of it, and the user-space 861 * process needs to use that handle to mmap the buffer so it can access them. 862 * 863 * In some instances, the device must access the command buffer through the 864 * device's MMU, and thus its memory should be mapped. In these cases, user can 865 * indicate the driver that such a mapping is required. 866 * The resulting device virtual address will be used internally by the driver, 867 * and won't be returned to user. 868 * 869 */ 870 #define HL_IOCTL_CB \ 871 _IOWR('H', 0x02, union hl_cb_args) 872 873 /* 874 * Command Submission 875 * 876 * To submit work to the device, the user need to call this IOCTL with a set 877 * of JOBS. That set of JOBS constitutes a CS object. 878 * Each JOB will be enqueued on a specific queue, according to the user's input. 879 * There can be more then one JOB per queue. 880 * 881 * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase, 882 * a second set is for "execution" phase and a third set is for "store" phase. 883 * The JOBS on the "restore" phase are enqueued only after context-switch 884 * (or if its the first CS for this context). The user can also order the 885 * driver to run the "restore" phase explicitly 886 * 887 * There are two types of queues - external and internal. External queues 888 * are DMA queues which transfer data from/to the Host. All other queues are 889 * internal. The driver will get completion notifications from the device only 890 * on JOBS which are enqueued in the external queues. 891 * 892 * For jobs on external queues, the user needs to create command buffers 893 * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on 894 * internal queues, the user needs to prepare a "command buffer" with packets 895 * on either the device SRAM/DRAM or the host, and give the device address of 896 * that buffer to the CS ioctl. 897 * 898 * This IOCTL is asynchronous in regard to the actual execution of the CS. This 899 * means it returns immediately after ALL the JOBS were enqueued on their 900 * relevant queues. Therefore, the user mustn't assume the CS has been completed 901 * or has even started to execute. 902 * 903 * Upon successful enqueue, the IOCTL returns a sequence number which the user 904 * can use with the "Wait for CS" IOCTL to check whether the handle's CS 905 * external JOBS have been completed. Note that if the CS has internal JOBS 906 * which can execute AFTER the external JOBS have finished, the driver might 907 * report that the CS has finished executing BEFORE the internal JOBS have 908 * actually finished executing. 909 * 910 * Even though the sequence number increments per CS, the user can NOT 911 * automatically assume that if CS with sequence number N finished, then CS 912 * with sequence number N-1 also finished. The user can make this assumption if 913 * and only if CS N and CS N-1 are exactly the same (same CBs for the same 914 * queues). 915 */ 916 #define HL_IOCTL_CS \ 917 _IOWR('H', 0x03, union hl_cs_args) 918 919 /* 920 * Wait for Command Submission 921 * 922 * The user can call this IOCTL with a handle it received from the CS IOCTL 923 * to wait until the handle's CS has finished executing. The user will wait 924 * inside the kernel until the CS has finished or until the user-requested 925 * timeout has expired. 926 * 927 * If the timeout value is 0, the driver won't sleep at all. It will check 928 * the status of the CS and return immediately 929 * 930 * The return value of the IOCTL is a standard Linux error code. The possible 931 * values are: 932 * 933 * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal 934 * that the user process received 935 * ETIMEDOUT - The CS has caused a timeout on the device 936 * EIO - The CS was aborted (usually because the device was reset) 937 * ENODEV - The device wants to do hard-reset (so user need to close FD) 938 * 939 * The driver also returns a custom define inside the IOCTL which can be: 940 * 941 * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0) 942 * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0) 943 * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device 944 * (ETIMEDOUT) 945 * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the 946 * device was reset (EIO) 947 * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR) 948 * 949 */ 950 951 #define HL_IOCTL_WAIT_CS \ 952 _IOWR('H', 0x04, union hl_wait_cs_args) 953 954 /* 955 * Memory 956 * - Map host memory to device MMU 957 * - Unmap host memory from device MMU 958 * 959 * This IOCTL allows the user to map host memory to the device MMU 960 * 961 * For host memory, the IOCTL doesn't allocate memory. The user is supposed 962 * to allocate the memory in user-space (malloc/new). The driver pins the 963 * physical pages (up to the allowed limit by the OS), assigns a virtual 964 * address in the device VA space and initializes the device MMU. 965 * 966 * There is an option for the user to specify the requested virtual address. 967 * 968 */ 969 #define HL_IOCTL_MEMORY \ 970 _IOWR('H', 0x05, union hl_mem_args) 971 972 /* 973 * Debug 974 * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces 975 * 976 * This IOCTL allows the user to get debug traces from the chip. 977 * 978 * Before the user can send configuration requests of the various 979 * debug/profile engines, it needs to set the device into debug mode. 980 * This is because the debug/profile infrastructure is shared component in the 981 * device and we can't allow multiple users to access it at the same time. 982 * 983 * Once a user set the device into debug mode, the driver won't allow other 984 * users to "work" with the device, i.e. open a FD. If there are multiple users 985 * opened on the device, the driver won't allow any user to debug the device. 986 * 987 * For each configuration request, the user needs to provide the register index 988 * and essential data such as buffer address and size. 989 * 990 * Once the user has finished using the debug/profile engines, he should 991 * set the device into non-debug mode, i.e. disable debug mode. 992 * 993 * The driver can decide to "kick out" the user if he abuses this interface. 994 * 995 */ 996 #define HL_IOCTL_DEBUG \ 997 _IOWR('H', 0x06, struct hl_debug_args) 998 999 #define HL_COMMAND_START 0x01 1000 #define HL_COMMAND_END 0x07 1001 1002 #endif /* HABANALABS_H_ */ 1003