1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note 2 * 3 * Copyright 2016-2019 HabanaLabs, Ltd. 4 * All Rights Reserved. 5 * 6 */ 7 8 #ifndef HABANALABS_H_ 9 #define HABANALABS_H_ 10 11 #include <linux/types.h> 12 #include <linux/ioctl.h> 13 14 /* 15 * Defines that are asic-specific but constitutes as ABI between kernel driver 16 * and userspace 17 */ 18 #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ 19 20 /* 21 * Queue Numbering 22 * 23 * The external queues (PCI DMA channels) MUST be before the internal queues 24 * and each group (PCI DMA channels and internal) must be contiguous inside 25 * itself but there can be a gap between the two groups (although not 26 * recommended) 27 */ 28 29 enum goya_queue_id { 30 GOYA_QUEUE_ID_DMA_0 = 0, 31 GOYA_QUEUE_ID_DMA_1 = 1, 32 GOYA_QUEUE_ID_DMA_2 = 2, 33 GOYA_QUEUE_ID_DMA_3 = 3, 34 GOYA_QUEUE_ID_DMA_4 = 4, 35 GOYA_QUEUE_ID_CPU_PQ = 5, 36 GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */ 37 GOYA_QUEUE_ID_TPC0 = 7, 38 GOYA_QUEUE_ID_TPC1 = 8, 39 GOYA_QUEUE_ID_TPC2 = 9, 40 GOYA_QUEUE_ID_TPC3 = 10, 41 GOYA_QUEUE_ID_TPC4 = 11, 42 GOYA_QUEUE_ID_TPC5 = 12, 43 GOYA_QUEUE_ID_TPC6 = 13, 44 GOYA_QUEUE_ID_TPC7 = 14, 45 GOYA_QUEUE_ID_SIZE 46 }; 47 48 /* 49 * Engine Numbering 50 * 51 * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle' 52 */ 53 54 enum goya_engine_id { 55 GOYA_ENGINE_ID_DMA_0 = 0, 56 GOYA_ENGINE_ID_DMA_1, 57 GOYA_ENGINE_ID_DMA_2, 58 GOYA_ENGINE_ID_DMA_3, 59 GOYA_ENGINE_ID_DMA_4, 60 GOYA_ENGINE_ID_MME_0, 61 GOYA_ENGINE_ID_TPC_0, 62 GOYA_ENGINE_ID_TPC_1, 63 GOYA_ENGINE_ID_TPC_2, 64 GOYA_ENGINE_ID_TPC_3, 65 GOYA_ENGINE_ID_TPC_4, 66 GOYA_ENGINE_ID_TPC_5, 67 GOYA_ENGINE_ID_TPC_6, 68 GOYA_ENGINE_ID_TPC_7, 69 GOYA_ENGINE_ID_SIZE 70 }; 71 72 enum hl_device_status { 73 HL_DEVICE_STATUS_OPERATIONAL, 74 HL_DEVICE_STATUS_IN_RESET, 75 HL_DEVICE_STATUS_MALFUNCTION 76 }; 77 78 /* Opcode for management ioctl 79 * 80 * HW_IP_INFO - Receive information about different IP blocks in the 81 * device. 82 * HL_INFO_HW_EVENTS - Receive an array describing how many times each event 83 * occurred since the last hard reset. 84 * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the 85 * specific context. This is relevant only for devices 86 * where the dram is managed by the kernel driver 87 * HL_INFO_HW_IDLE - Retrieve information about the idle status of each 88 * internal engine. 89 * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't 90 * require an open context. 91 * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device 92 * over the last period specified by the user. 93 * The period can be between 100ms to 1s, in 94 * resolution of 100ms. The return value is a 95 * percentage of the utilization rate. 96 * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each 97 * event occurred since the driver was loaded. 98 * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate 99 * of the device in MHz. The maximum clock rate is 100 * configurable via sysfs parameter 101 * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset 102 * operations performed on the device since the last 103 * time the driver was loaded. 104 */ 105 #define HL_INFO_HW_IP_INFO 0 106 #define HL_INFO_HW_EVENTS 1 107 #define HL_INFO_DRAM_USAGE 2 108 #define HL_INFO_HW_IDLE 3 109 #define HL_INFO_DEVICE_STATUS 4 110 #define HL_INFO_DEVICE_UTILIZATION 6 111 #define HL_INFO_HW_EVENTS_AGGREGATE 7 112 #define HL_INFO_CLK_RATE 8 113 #define HL_INFO_RESET_COUNT 9 114 115 #define HL_INFO_VERSION_MAX_LEN 128 116 #define HL_INFO_CARD_NAME_MAX_LEN 16 117 118 struct hl_info_hw_ip_info { 119 __u64 sram_base_address; 120 __u64 dram_base_address; 121 __u64 dram_size; 122 __u32 sram_size; 123 __u32 num_of_events; 124 __u32 device_id; /* PCI Device ID */ 125 __u32 reserved[3]; 126 __u32 armcp_cpld_version; 127 __u32 psoc_pci_pll_nr; 128 __u32 psoc_pci_pll_nf; 129 __u32 psoc_pci_pll_od; 130 __u32 psoc_pci_pll_div_factor; 131 __u8 tpc_enabled_mask; 132 __u8 dram_enabled; 133 __u8 pad[2]; 134 __u8 armcp_version[HL_INFO_VERSION_MAX_LEN]; 135 __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; 136 }; 137 138 struct hl_info_dram_usage { 139 __u64 dram_free_mem; 140 __u64 ctx_dram_mem; 141 }; 142 143 struct hl_info_hw_idle { 144 __u32 is_idle; 145 /* 146 * Bitmask of busy engines. 147 * Bits definition is according to `enum <chip>_enging_id'. 148 */ 149 __u32 busy_engines_mask; 150 }; 151 152 struct hl_info_device_status { 153 __u32 status; 154 __u32 pad; 155 }; 156 157 struct hl_info_device_utilization { 158 __u32 utilization; 159 __u32 pad; 160 }; 161 162 struct hl_info_clk_rate { 163 __u32 cur_clk_rate_mhz; 164 __u32 max_clk_rate_mhz; 165 }; 166 167 struct hl_info_reset_count { 168 __u32 hard_reset_cnt; 169 __u32 soft_reset_cnt; 170 }; 171 172 struct hl_info_args { 173 /* Location of relevant struct in userspace */ 174 __u64 return_pointer; 175 /* 176 * The size of the return value. Just like "size" in "snprintf", 177 * it limits how many bytes the kernel can write 178 * 179 * For hw_events array, the size should be 180 * hl_info_hw_ip_info.num_of_events * sizeof(__u32) 181 */ 182 __u32 return_size; 183 184 /* HL_INFO_* */ 185 __u32 op; 186 187 union { 188 /* Context ID - Currently not in use */ 189 __u32 ctx_id; 190 /* Period value for utilization rate (100ms - 1000ms, in 100ms 191 * resolution. 192 */ 193 __u32 period_ms; 194 }; 195 196 __u32 pad; 197 }; 198 199 /* Opcode to create a new command buffer */ 200 #define HL_CB_OP_CREATE 0 201 /* Opcode to destroy previously created command buffer */ 202 #define HL_CB_OP_DESTROY 1 203 204 #define HL_MAX_CB_SIZE 0x200000 /* 2MB */ 205 206 struct hl_cb_in { 207 /* Handle of CB or 0 if we want to create one */ 208 __u64 cb_handle; 209 /* HL_CB_OP_* */ 210 __u32 op; 211 /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that 212 * will be allocated, regardless of this parameter's value, is PAGE_SIZE 213 */ 214 __u32 cb_size; 215 /* Context ID - Currently not in use */ 216 __u32 ctx_id; 217 __u32 pad; 218 }; 219 220 struct hl_cb_out { 221 /* Handle of CB */ 222 __u64 cb_handle; 223 }; 224 225 union hl_cb_args { 226 struct hl_cb_in in; 227 struct hl_cb_out out; 228 }; 229 230 /* 231 * This structure size must always be fixed to 64-bytes for backward 232 * compatibility 233 */ 234 struct hl_cs_chunk { 235 /* 236 * For external queue, this represents a Handle of CB on the Host 237 * For internal queue, this represents an SRAM or DRAM address of the 238 * internal CB 239 */ 240 __u64 cb_handle; 241 /* Index of queue to put the CB on */ 242 __u32 queue_index; 243 /* 244 * Size of command buffer with valid packets 245 * Can be smaller then actual CB size 246 */ 247 __u32 cb_size; 248 /* HL_CS_CHUNK_FLAGS_* */ 249 __u32 cs_chunk_flags; 250 /* Align structure to 64 bytes */ 251 __u32 pad[11]; 252 }; 253 254 #define HL_CS_FLAGS_FORCE_RESTORE 0x1 255 256 #define HL_CS_STATUS_SUCCESS 0 257 258 #define HL_MAX_JOBS_PER_CS 512 259 260 struct hl_cs_in { 261 /* this holds address of array of hl_cs_chunk for restore phase */ 262 __u64 chunks_restore; 263 /* this holds address of array of hl_cs_chunk for execution phase */ 264 __u64 chunks_execute; 265 /* this holds address of array of hl_cs_chunk for store phase - 266 * Currently not in use 267 */ 268 __u64 chunks_store; 269 /* Number of chunks in restore phase array. Maximum number is 270 * HL_MAX_JOBS_PER_CS 271 */ 272 __u32 num_chunks_restore; 273 /* Number of chunks in execution array. Maximum number is 274 * HL_MAX_JOBS_PER_CS 275 */ 276 __u32 num_chunks_execute; 277 /* Number of chunks in restore phase array - Currently not in use */ 278 __u32 num_chunks_store; 279 /* HL_CS_FLAGS_* */ 280 __u32 cs_flags; 281 /* Context ID - Currently not in use */ 282 __u32 ctx_id; 283 }; 284 285 struct hl_cs_out { 286 /* 287 * seq holds the sequence number of the CS to pass to wait ioctl. All 288 * values are valid except for 0 and ULLONG_MAX 289 */ 290 __u64 seq; 291 /* HL_CS_STATUS_* */ 292 __u32 status; 293 __u32 pad; 294 }; 295 296 union hl_cs_args { 297 struct hl_cs_in in; 298 struct hl_cs_out out; 299 }; 300 301 struct hl_wait_cs_in { 302 /* Command submission sequence number */ 303 __u64 seq; 304 /* Absolute timeout to wait in microseconds */ 305 __u64 timeout_us; 306 /* Context ID - Currently not in use */ 307 __u32 ctx_id; 308 __u32 pad; 309 }; 310 311 #define HL_WAIT_CS_STATUS_COMPLETED 0 312 #define HL_WAIT_CS_STATUS_BUSY 1 313 #define HL_WAIT_CS_STATUS_TIMEDOUT 2 314 #define HL_WAIT_CS_STATUS_ABORTED 3 315 #define HL_WAIT_CS_STATUS_INTERRUPTED 4 316 317 struct hl_wait_cs_out { 318 /* HL_WAIT_CS_STATUS_* */ 319 __u32 status; 320 __u32 pad; 321 }; 322 323 union hl_wait_cs_args { 324 struct hl_wait_cs_in in; 325 struct hl_wait_cs_out out; 326 }; 327 328 /* Opcode to alloc device memory */ 329 #define HL_MEM_OP_ALLOC 0 330 /* Opcode to free previously allocated device memory */ 331 #define HL_MEM_OP_FREE 1 332 /* Opcode to map host memory */ 333 #define HL_MEM_OP_MAP 2 334 /* Opcode to unmap previously mapped host memory */ 335 #define HL_MEM_OP_UNMAP 3 336 337 /* Memory flags */ 338 #define HL_MEM_CONTIGUOUS 0x1 339 #define HL_MEM_SHARED 0x2 340 #define HL_MEM_USERPTR 0x4 341 342 struct hl_mem_in { 343 union { 344 /* HL_MEM_OP_ALLOC- allocate device memory */ 345 struct { 346 /* Size to alloc */ 347 __u64 mem_size; 348 } alloc; 349 350 /* HL_MEM_OP_FREE - free device memory */ 351 struct { 352 /* Handle returned from HL_MEM_OP_ALLOC */ 353 __u64 handle; 354 } free; 355 356 /* HL_MEM_OP_MAP - map device memory */ 357 struct { 358 /* 359 * Requested virtual address of mapped memory. 360 * The driver will try to map the requested region to 361 * this hint address, as long as the address is valid 362 * and not already mapped. The user should check the 363 * returned address of the IOCTL to make sure he got 364 * the hint address. Passing 0 here means that the 365 * driver will choose the address itself. 366 */ 367 __u64 hint_addr; 368 /* Handle returned from HL_MEM_OP_ALLOC */ 369 __u64 handle; 370 } map_device; 371 372 /* HL_MEM_OP_MAP - map host memory */ 373 struct { 374 /* Address of allocated host memory */ 375 __u64 host_virt_addr; 376 /* 377 * Requested virtual address of mapped memory. 378 * The driver will try to map the requested region to 379 * this hint address, as long as the address is valid 380 * and not already mapped. The user should check the 381 * returned address of the IOCTL to make sure he got 382 * the hint address. Passing 0 here means that the 383 * driver will choose the address itself. 384 */ 385 __u64 hint_addr; 386 /* Size of allocated host memory */ 387 __u64 mem_size; 388 } map_host; 389 390 /* HL_MEM_OP_UNMAP - unmap host memory */ 391 struct { 392 /* Virtual address returned from HL_MEM_OP_MAP */ 393 __u64 device_virt_addr; 394 } unmap; 395 }; 396 397 /* HL_MEM_OP_* */ 398 __u32 op; 399 /* HL_MEM_* flags */ 400 __u32 flags; 401 /* Context ID - Currently not in use */ 402 __u32 ctx_id; 403 __u32 pad; 404 }; 405 406 struct hl_mem_out { 407 union { 408 /* 409 * Used for HL_MEM_OP_MAP as the virtual address that was 410 * assigned in the device VA space. 411 * A value of 0 means the requested operation failed. 412 */ 413 __u64 device_virt_addr; 414 415 /* 416 * Used for HL_MEM_OP_ALLOC. This is the assigned 417 * handle for the allocated memory 418 */ 419 __u64 handle; 420 }; 421 }; 422 423 union hl_mem_args { 424 struct hl_mem_in in; 425 struct hl_mem_out out; 426 }; 427 428 #define HL_DEBUG_MAX_AUX_VALUES 10 429 430 struct hl_debug_params_etr { 431 /* Address in memory to allocate buffer */ 432 __u64 buffer_address; 433 434 /* Size of buffer to allocate */ 435 __u64 buffer_size; 436 437 /* Sink operation mode: SW fifo, HW fifo, Circular buffer */ 438 __u32 sink_mode; 439 __u32 pad; 440 }; 441 442 struct hl_debug_params_etf { 443 /* Address in memory to allocate buffer */ 444 __u64 buffer_address; 445 446 /* Size of buffer to allocate */ 447 __u64 buffer_size; 448 449 /* Sink operation mode: SW fifo, HW fifo, Circular buffer */ 450 __u32 sink_mode; 451 __u32 pad; 452 }; 453 454 struct hl_debug_params_stm { 455 /* Two bit masks for HW event and Stimulus Port */ 456 __u64 he_mask; 457 __u64 sp_mask; 458 459 /* Trace source ID */ 460 __u32 id; 461 462 /* Frequency for the timestamp register */ 463 __u32 frequency; 464 }; 465 466 struct hl_debug_params_bmon { 467 /* Two address ranges that the user can request to filter */ 468 __u64 start_addr0; 469 __u64 addr_mask0; 470 471 __u64 start_addr1; 472 __u64 addr_mask1; 473 474 /* Capture window configuration */ 475 __u32 bw_win; 476 __u32 win_capture; 477 478 /* Trace source ID */ 479 __u32 id; 480 __u32 pad; 481 }; 482 483 struct hl_debug_params_spmu { 484 /* Event types selection */ 485 __u64 event_types[HL_DEBUG_MAX_AUX_VALUES]; 486 487 /* Number of event types selection */ 488 __u32 event_types_num; 489 __u32 pad; 490 }; 491 492 /* Opcode for ETR component */ 493 #define HL_DEBUG_OP_ETR 0 494 /* Opcode for ETF component */ 495 #define HL_DEBUG_OP_ETF 1 496 /* Opcode for STM component */ 497 #define HL_DEBUG_OP_STM 2 498 /* Opcode for FUNNEL component */ 499 #define HL_DEBUG_OP_FUNNEL 3 500 /* Opcode for BMON component */ 501 #define HL_DEBUG_OP_BMON 4 502 /* Opcode for SPMU component */ 503 #define HL_DEBUG_OP_SPMU 5 504 /* Opcode for timestamp (deprecated) */ 505 #define HL_DEBUG_OP_TIMESTAMP 6 506 /* Opcode for setting the device into or out of debug mode. The enable 507 * variable should be 1 for enabling debug mode and 0 for disabling it 508 */ 509 #define HL_DEBUG_OP_SET_MODE 7 510 511 struct hl_debug_args { 512 /* 513 * Pointer to user input structure. 514 * This field is relevant to specific opcodes. 515 */ 516 __u64 input_ptr; 517 /* Pointer to user output structure */ 518 __u64 output_ptr; 519 /* Size of user input structure */ 520 __u32 input_size; 521 /* Size of user output structure */ 522 __u32 output_size; 523 /* HL_DEBUG_OP_* */ 524 __u32 op; 525 /* 526 * Register index in the component, taken from the debug_regs_index enum 527 * in the various ASIC header files 528 */ 529 __u32 reg_idx; 530 /* Enable/disable */ 531 __u32 enable; 532 /* Context ID - Currently not in use */ 533 __u32 ctx_id; 534 }; 535 536 /* 537 * Various information operations such as: 538 * - H/W IP information 539 * - Current dram usage 540 * 541 * The user calls this IOCTL with an opcode that describes the required 542 * information. The user should supply a pointer to a user-allocated memory 543 * chunk, which will be filled by the driver with the requested information. 544 * 545 * The user supplies the maximum amount of size to copy into the user's memory, 546 * in order to prevent data corruption in case of differences between the 547 * definitions of structures in kernel and userspace, e.g. in case of old 548 * userspace and new kernel driver 549 */ 550 #define HL_IOCTL_INFO \ 551 _IOWR('H', 0x01, struct hl_info_args) 552 553 /* 554 * Command Buffer 555 * - Request a Command Buffer 556 * - Destroy a Command Buffer 557 * 558 * The command buffers are memory blocks that reside in DMA-able address 559 * space and are physically contiguous so they can be accessed by the device 560 * directly. They are allocated using the coherent DMA API. 561 * 562 * When creating a new CB, the IOCTL returns a handle of it, and the user-space 563 * process needs to use that handle to mmap the buffer so it can access them. 564 * 565 */ 566 #define HL_IOCTL_CB \ 567 _IOWR('H', 0x02, union hl_cb_args) 568 569 /* 570 * Command Submission 571 * 572 * To submit work to the device, the user need to call this IOCTL with a set 573 * of JOBS. That set of JOBS constitutes a CS object. 574 * Each JOB will be enqueued on a specific queue, according to the user's input. 575 * There can be more then one JOB per queue. 576 * 577 * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase, 578 * a second set is for "execution" phase and a third set is for "store" phase. 579 * The JOBS on the "restore" phase are enqueued only after context-switch 580 * (or if its the first CS for this context). The user can also order the 581 * driver to run the "restore" phase explicitly 582 * 583 * There are two types of queues - external and internal. External queues 584 * are DMA queues which transfer data from/to the Host. All other queues are 585 * internal. The driver will get completion notifications from the device only 586 * on JOBS which are enqueued in the external queues. 587 * 588 * For jobs on external queues, the user needs to create command buffers 589 * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on 590 * internal queues, the user needs to prepare a "command buffer" with packets 591 * on either the SRAM or DRAM, and give the device address of that buffer to 592 * the CS ioctl. 593 * 594 * This IOCTL is asynchronous in regard to the actual execution of the CS. This 595 * means it returns immediately after ALL the JOBS were enqueued on their 596 * relevant queues. Therefore, the user mustn't assume the CS has been completed 597 * or has even started to execute. 598 * 599 * Upon successful enqueue, the IOCTL returns a sequence number which the user 600 * can use with the "Wait for CS" IOCTL to check whether the handle's CS 601 * external JOBS have been completed. Note that if the CS has internal JOBS 602 * which can execute AFTER the external JOBS have finished, the driver might 603 * report that the CS has finished executing BEFORE the internal JOBS have 604 * actually finish executing. 605 * 606 * Even though the sequence number increments per CS, the user can NOT 607 * automatically assume that if CS with sequence number N finished, then CS 608 * with sequence number N-1 also finished. The user can make this assumption if 609 * and only if CS N and CS N-1 are exactly the same (same CBs for the same 610 * queues). 611 */ 612 #define HL_IOCTL_CS \ 613 _IOWR('H', 0x03, union hl_cs_args) 614 615 /* 616 * Wait for Command Submission 617 * 618 * The user can call this IOCTL with a handle it received from the CS IOCTL 619 * to wait until the handle's CS has finished executing. The user will wait 620 * inside the kernel until the CS has finished or until the user-requested 621 * timeout has expired. 622 * 623 * The return value of the IOCTL is a standard Linux error code. The possible 624 * values are: 625 * 626 * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal 627 * that the user process received 628 * ETIMEDOUT - The CS has caused a timeout on the device 629 * EIO - The CS was aborted (usually because the device was reset) 630 * ENODEV - The device wants to do hard-reset (so user need to close FD) 631 * 632 * The driver also returns a custom define inside the IOCTL which can be: 633 * 634 * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0) 635 * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0) 636 * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device 637 * (ETIMEDOUT) 638 * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the 639 * device was reset (EIO) 640 * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR) 641 * 642 */ 643 644 #define HL_IOCTL_WAIT_CS \ 645 _IOWR('H', 0x04, union hl_wait_cs_args) 646 647 /* 648 * Memory 649 * - Map host memory to device MMU 650 * - Unmap host memory from device MMU 651 * 652 * This IOCTL allows the user to map host memory to the device MMU 653 * 654 * For host memory, the IOCTL doesn't allocate memory. The user is supposed 655 * to allocate the memory in user-space (malloc/new). The driver pins the 656 * physical pages (up to the allowed limit by the OS), assigns a virtual 657 * address in the device VA space and initializes the device MMU. 658 * 659 * There is an option for the user to specify the requested virtual address. 660 * 661 */ 662 #define HL_IOCTL_MEMORY \ 663 _IOWR('H', 0x05, union hl_mem_args) 664 665 /* 666 * Debug 667 * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces 668 * 669 * This IOCTL allows the user to get debug traces from the chip. 670 * 671 * Before the user can send configuration requests of the various 672 * debug/profile engines, it needs to set the device into debug mode. 673 * This is because the debug/profile infrastructure is shared component in the 674 * device and we can't allow multiple users to access it at the same time. 675 * 676 * Once a user set the device into debug mode, the driver won't allow other 677 * users to "work" with the device, i.e. open a FD. If there are multiple users 678 * opened on the device, the driver won't allow any user to debug the device. 679 * 680 * For each configuration request, the user needs to provide the register index 681 * and essential data such as buffer address and size. 682 * 683 * Once the user has finished using the debug/profile engines, he should 684 * set the device into non-debug mode, i.e. disable debug mode. 685 * 686 * The driver can decide to "kick out" the user if he abuses this interface. 687 * 688 */ 689 #define HL_IOCTL_DEBUG \ 690 _IOWR('H', 0x06, struct hl_debug_args) 691 692 #define HL_COMMAND_START 0x01 693 #define HL_COMMAND_END 0x07 694 695 #endif /* HABANALABS_H_ */ 696