1/* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#ifndef KFD_IOCTL_H_INCLUDED 24#define KFD_IOCTL_H_INCLUDED 25 26#include <drm/drm.h> 27#include <linux/ioctl.h> 28 29/* 30 * - 1.1 - initial version 31 * - 1.3 - Add SMI events support 32 * - 1.4 - Indicate new SRAM EDC bit in device properties 33 * - 1.5 - Add SVM API 34 * - 1.6 - Query clear flags in SVM get_attr API 35 * - 1.7 - Checkpoint Restore (CRIU) API 36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs 37 * - 1.9 - Add available memory ioctl 38 * - 1.10 - Add SMI profiler event log 39 * - 1.11 - Add unified memory for ctx save/restore area 40 * - 1.12 - Add DMA buf export ioctl 41 * - 1.13 - Add debugger API 42 * - 1.14 - Update kfd_event_data 43 * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl 44 */ 45#define KFD_IOCTL_MAJOR_VERSION 1 46#define KFD_IOCTL_MINOR_VERSION 15 47 48struct kfd_ioctl_get_version_args { 49 __u32 major_version; /* from KFD */ 50 __u32 minor_version; /* from KFD */ 51}; 52 53/* For kfd_ioctl_create_queue_args.queue_type. */ 54#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0 55#define KFD_IOC_QUEUE_TYPE_SDMA 0x1 56#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2 57#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3 58 59#define KFD_MAX_QUEUE_PERCENTAGE 100 60#define KFD_MAX_QUEUE_PRIORITY 15 61 62struct kfd_ioctl_create_queue_args { 63 __u64 ring_base_address; /* to KFD */ 64 __u64 write_pointer_address; /* from KFD */ 65 __u64 read_pointer_address; /* from KFD */ 66 __u64 doorbell_offset; /* from KFD */ 67 68 __u32 ring_size; /* to KFD */ 69 __u32 gpu_id; /* to KFD */ 70 __u32 queue_type; /* to KFD */ 71 __u32 queue_percentage; /* to KFD */ 72 __u32 queue_priority; /* to KFD */ 73 __u32 queue_id; /* from KFD */ 74 75 __u64 eop_buffer_address; /* to KFD */ 76 __u64 eop_buffer_size; /* to KFD */ 77 __u64 ctx_save_restore_address; /* to KFD */ 78 __u32 ctx_save_restore_size; /* to KFD */ 79 __u32 ctl_stack_size; /* to KFD */ 80}; 81 82struct kfd_ioctl_destroy_queue_args { 83 __u32 queue_id; /* to KFD */ 84 __u32 pad; 85}; 86 87struct kfd_ioctl_update_queue_args { 88 __u64 ring_base_address; /* to KFD */ 89 90 __u32 queue_id; /* to KFD */ 91 __u32 ring_size; /* to KFD */ 92 __u32 queue_percentage; /* to KFD */ 93 __u32 queue_priority; /* to KFD */ 94}; 95 96struct kfd_ioctl_set_cu_mask_args { 97 __u32 queue_id; /* to KFD */ 98 __u32 num_cu_mask; /* to KFD */ 99 __u64 cu_mask_ptr; /* to KFD */ 100}; 101 102struct kfd_ioctl_get_queue_wave_state_args { 103 __u64 ctl_stack_address; /* to KFD */ 104 __u32 ctl_stack_used_size; /* from KFD */ 105 __u32 save_area_used_size; /* from KFD */ 106 __u32 queue_id; /* to KFD */ 107 __u32 pad; 108}; 109 110struct kfd_ioctl_get_available_memory_args { 111 __u64 available; /* from KFD */ 112 __u32 gpu_id; /* to KFD */ 113 __u32 pad; 114}; 115 116struct kfd_dbg_device_info_entry { 117 __u64 exception_status; 118 __u64 lds_base; 119 __u64 lds_limit; 120 __u64 scratch_base; 121 __u64 scratch_limit; 122 __u64 gpuvm_base; 123 __u64 gpuvm_limit; 124 __u32 gpu_id; 125 __u32 location_id; 126 __u32 vendor_id; 127 __u32 device_id; 128 __u32 revision_id; 129 __u32 subsystem_vendor_id; 130 __u32 subsystem_device_id; 131 __u32 fw_version; 132 __u32 gfx_target_version; 133 __u32 simd_count; 134 __u32 max_waves_per_simd; 135 __u32 array_count; 136 __u32 simd_arrays_per_engine; 137 __u32 num_xcc; 138 __u32 capability; 139 __u32 debug_prop; 140}; 141 142/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ 143#define KFD_IOC_CACHE_POLICY_COHERENT 0 144#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 145 146struct kfd_ioctl_set_memory_policy_args { 147 __u64 alternate_aperture_base; /* to KFD */ 148 __u64 alternate_aperture_size; /* to KFD */ 149 150 __u32 gpu_id; /* to KFD */ 151 __u32 default_policy; /* to KFD */ 152 __u32 alternate_policy; /* to KFD */ 153 __u32 pad; 154}; 155 156/* 157 * All counters are monotonic. They are used for profiling of compute jobs. 158 * The profiling is done by userspace. 159 * 160 * In case of GPU reset, the counter should not be affected. 161 */ 162 163struct kfd_ioctl_get_clock_counters_args { 164 __u64 gpu_clock_counter; /* from KFD */ 165 __u64 cpu_clock_counter; /* from KFD */ 166 __u64 system_clock_counter; /* from KFD */ 167 __u64 system_clock_freq; /* from KFD */ 168 169 __u32 gpu_id; /* to KFD */ 170 __u32 pad; 171}; 172 173struct kfd_process_device_apertures { 174 __u64 lds_base; /* from KFD */ 175 __u64 lds_limit; /* from KFD */ 176 __u64 scratch_base; /* from KFD */ 177 __u64 scratch_limit; /* from KFD */ 178 __u64 gpuvm_base; /* from KFD */ 179 __u64 gpuvm_limit; /* from KFD */ 180 __u32 gpu_id; /* from KFD */ 181 __u32 pad; 182}; 183 184/* 185 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use 186 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an 187 * unlimited number of GPUs. 188 */ 189#define NUM_OF_SUPPORTED_GPUS 7 190struct kfd_ioctl_get_process_apertures_args { 191 struct kfd_process_device_apertures 192 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ 193 194 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ 195 __u32 num_of_nodes; 196 __u32 pad; 197}; 198 199struct kfd_ioctl_get_process_apertures_new_args { 200 /* User allocated. Pointer to struct kfd_process_device_apertures 201 * filled in by Kernel 202 */ 203 __u64 kfd_process_device_apertures_ptr; 204 /* to KFD - indicates amount of memory present in 205 * kfd_process_device_apertures_ptr 206 * from KFD - Number of entries filled by KFD. 207 */ 208 __u32 num_of_nodes; 209 __u32 pad; 210}; 211 212#define MAX_ALLOWED_NUM_POINTS 100 213#define MAX_ALLOWED_AW_BUFF_SIZE 4096 214#define MAX_ALLOWED_WAC_BUFF_SIZE 128 215 216struct kfd_ioctl_dbg_register_args { 217 __u32 gpu_id; /* to KFD */ 218 __u32 pad; 219}; 220 221struct kfd_ioctl_dbg_unregister_args { 222 __u32 gpu_id; /* to KFD */ 223 __u32 pad; 224}; 225 226struct kfd_ioctl_dbg_address_watch_args { 227 __u64 content_ptr; /* a pointer to the actual content */ 228 __u32 gpu_id; /* to KFD */ 229 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ 230}; 231 232struct kfd_ioctl_dbg_wave_control_args { 233 __u64 content_ptr; /* a pointer to the actual content */ 234 __u32 gpu_id; /* to KFD */ 235 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ 236}; 237 238#define KFD_INVALID_FD 0xffffffff 239 240/* Matching HSA_EVENTTYPE */ 241#define KFD_IOC_EVENT_SIGNAL 0 242#define KFD_IOC_EVENT_NODECHANGE 1 243#define KFD_IOC_EVENT_DEVICESTATECHANGE 2 244#define KFD_IOC_EVENT_HW_EXCEPTION 3 245#define KFD_IOC_EVENT_SYSTEM_EVENT 4 246#define KFD_IOC_EVENT_DEBUG_EVENT 5 247#define KFD_IOC_EVENT_PROFILE_EVENT 6 248#define KFD_IOC_EVENT_QUEUE_EVENT 7 249#define KFD_IOC_EVENT_MEMORY 8 250 251#define KFD_IOC_WAIT_RESULT_COMPLETE 0 252#define KFD_IOC_WAIT_RESULT_TIMEOUT 1 253#define KFD_IOC_WAIT_RESULT_FAIL 2 254 255#define KFD_SIGNAL_EVENT_LIMIT 4096 256 257/* For kfd_event_data.hw_exception_data.reset_type. */ 258#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0 259#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1 260 261/* For kfd_event_data.hw_exception_data.reset_cause. */ 262#define KFD_HW_EXCEPTION_GPU_HANG 0 263#define KFD_HW_EXCEPTION_ECC 1 264 265/* For kfd_hsa_memory_exception_data.ErrorType */ 266#define KFD_MEM_ERR_NO_RAS 0 267#define KFD_MEM_ERR_SRAM_ECC 1 268#define KFD_MEM_ERR_POISON_CONSUMED 2 269#define KFD_MEM_ERR_GPU_HANG 3 270 271struct kfd_ioctl_create_event_args { 272 __u64 event_page_offset; /* from KFD */ 273 __u32 event_trigger_data; /* from KFD - signal events only */ 274 __u32 event_type; /* to KFD */ 275 __u32 auto_reset; /* to KFD */ 276 __u32 node_id; /* to KFD - only valid for certain 277 event types */ 278 __u32 event_id; /* from KFD */ 279 __u32 event_slot_index; /* from KFD */ 280}; 281 282struct kfd_ioctl_destroy_event_args { 283 __u32 event_id; /* to KFD */ 284 __u32 pad; 285}; 286 287struct kfd_ioctl_set_event_args { 288 __u32 event_id; /* to KFD */ 289 __u32 pad; 290}; 291 292struct kfd_ioctl_reset_event_args { 293 __u32 event_id; /* to KFD */ 294 __u32 pad; 295}; 296 297struct kfd_memory_exception_failure { 298 __u32 NotPresent; /* Page not present or supervisor privilege */ 299 __u32 ReadOnly; /* Write access to a read-only page */ 300 __u32 NoExecute; /* Execute access to a page marked NX */ 301 __u32 imprecise; /* Can't determine the exact fault address */ 302}; 303 304/* memory exception data */ 305struct kfd_hsa_memory_exception_data { 306 struct kfd_memory_exception_failure failure; 307 __u64 va; 308 __u32 gpu_id; 309 __u32 ErrorType; /* 0 = no RAS error, 310 * 1 = ECC_SRAM, 311 * 2 = Link_SYNFLOOD (poison), 312 * 3 = GPU hang (not attributable to a specific cause), 313 * other values reserved 314 */ 315}; 316 317/* hw exception data */ 318struct kfd_hsa_hw_exception_data { 319 __u32 reset_type; 320 __u32 reset_cause; 321 __u32 memory_lost; 322 __u32 gpu_id; 323}; 324 325/* hsa signal event data */ 326struct kfd_hsa_signal_event_data { 327 __u64 last_event_age; /* to and from KFD */ 328}; 329 330/* Event data */ 331struct kfd_event_data { 332 union { 333 /* From KFD */ 334 struct kfd_hsa_memory_exception_data memory_exception_data; 335 struct kfd_hsa_hw_exception_data hw_exception_data; 336 /* To and From KFD */ 337 struct kfd_hsa_signal_event_data signal_event_data; 338 }; 339 __u64 kfd_event_data_ext; /* pointer to an extension structure 340 for future exception types */ 341 __u32 event_id; /* to KFD */ 342 __u32 pad; 343}; 344 345struct kfd_ioctl_wait_events_args { 346 __u64 events_ptr; /* pointed to struct 347 kfd_event_data array, to KFD */ 348 __u32 num_events; /* to KFD */ 349 __u32 wait_for_all; /* to KFD */ 350 __u32 timeout; /* to KFD */ 351 __u32 wait_result; /* from KFD */ 352}; 353 354struct kfd_ioctl_set_scratch_backing_va_args { 355 __u64 va_addr; /* to KFD */ 356 __u32 gpu_id; /* to KFD */ 357 __u32 pad; 358}; 359 360struct kfd_ioctl_get_tile_config_args { 361 /* to KFD: pointer to tile array */ 362 __u64 tile_config_ptr; 363 /* to KFD: pointer to macro tile array */ 364 __u64 macro_tile_config_ptr; 365 /* to KFD: array size allocated by user mode 366 * from KFD: array size filled by kernel 367 */ 368 __u32 num_tile_configs; 369 /* to KFD: array size allocated by user mode 370 * from KFD: array size filled by kernel 371 */ 372 __u32 num_macro_tile_configs; 373 374 __u32 gpu_id; /* to KFD */ 375 __u32 gb_addr_config; /* from KFD */ 376 __u32 num_banks; /* from KFD */ 377 __u32 num_ranks; /* from KFD */ 378 /* struct size can be extended later if needed 379 * without breaking ABI compatibility 380 */ 381}; 382 383struct kfd_ioctl_set_trap_handler_args { 384 __u64 tba_addr; /* to KFD */ 385 __u64 tma_addr; /* to KFD */ 386 __u32 gpu_id; /* to KFD */ 387 __u32 pad; 388}; 389 390struct kfd_ioctl_acquire_vm_args { 391 __u32 drm_fd; /* to KFD */ 392 __u32 gpu_id; /* to KFD */ 393}; 394 395/* Allocation flags: memory types */ 396#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0) 397#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1) 398#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2) 399#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3) 400#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) 401/* Allocation flags: attributes/access options */ 402#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31) 403#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) 404#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29) 405#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) 406#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) 407#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) 408#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) 409#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24) 410 411/* Allocate memory for later SVM (shared virtual memory) mapping. 412 * 413 * @va_addr: virtual address of the memory to be allocated 414 * all later mappings on all GPUs will use this address 415 * @size: size in bytes 416 * @handle: buffer handle returned to user mode, used to refer to 417 * this allocation for mapping, unmapping and freeing 418 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node 419 * for userptrs this is overloaded to specify the CPU address 420 * @gpu_id: device identifier 421 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above 422 */ 423struct kfd_ioctl_alloc_memory_of_gpu_args { 424 __u64 va_addr; /* to KFD */ 425 __u64 size; /* to KFD */ 426 __u64 handle; /* from KFD */ 427 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */ 428 __u32 gpu_id; /* to KFD */ 429 __u32 flags; 430}; 431 432/* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu 433 * 434 * @handle: memory handle returned by alloc 435 */ 436struct kfd_ioctl_free_memory_of_gpu_args { 437 __u64 handle; /* to KFD */ 438}; 439 440/* Map memory to one or more GPUs 441 * 442 * @handle: memory handle returned by alloc 443 * @device_ids_array_ptr: array of gpu_ids (__u32 per device) 444 * @n_devices: number of devices in the array 445 * @n_success: number of devices mapped successfully 446 * 447 * @n_success returns information to the caller how many devices from 448 * the start of the array have mapped the buffer successfully. It can 449 * be passed into a subsequent retry call to skip those devices. For 450 * the first call the caller should initialize it to 0. 451 * 452 * If the ioctl completes with return code 0 (success), n_success == 453 * n_devices. 454 */ 455struct kfd_ioctl_map_memory_to_gpu_args { 456 __u64 handle; /* to KFD */ 457 __u64 device_ids_array_ptr; /* to KFD */ 458 __u32 n_devices; /* to KFD */ 459 __u32 n_success; /* to/from KFD */ 460}; 461 462/* Unmap memory from one or more GPUs 463 * 464 * same arguments as for mapping 465 */ 466struct kfd_ioctl_unmap_memory_from_gpu_args { 467 __u64 handle; /* to KFD */ 468 __u64 device_ids_array_ptr; /* to KFD */ 469 __u32 n_devices; /* to KFD */ 470 __u32 n_success; /* to/from KFD */ 471}; 472 473/* Allocate GWS for specific queue 474 * 475 * @queue_id: queue's id that GWS is allocated for 476 * @num_gws: how many GWS to allocate 477 * @first_gws: index of the first GWS allocated. 478 * only support contiguous GWS allocation 479 */ 480struct kfd_ioctl_alloc_queue_gws_args { 481 __u32 queue_id; /* to KFD */ 482 __u32 num_gws; /* to KFD */ 483 __u32 first_gws; /* from KFD */ 484 __u32 pad; 485}; 486 487struct kfd_ioctl_get_dmabuf_info_args { 488 __u64 size; /* from KFD */ 489 __u64 metadata_ptr; /* to KFD */ 490 __u32 metadata_size; /* to KFD (space allocated by user) 491 * from KFD (actual metadata size) 492 */ 493 __u32 gpu_id; /* from KFD */ 494 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */ 495 __u32 dmabuf_fd; /* to KFD */ 496}; 497 498struct kfd_ioctl_import_dmabuf_args { 499 __u64 va_addr; /* to KFD */ 500 __u64 handle; /* from KFD */ 501 __u32 gpu_id; /* to KFD */ 502 __u32 dmabuf_fd; /* to KFD */ 503}; 504 505struct kfd_ioctl_export_dmabuf_args { 506 __u64 handle; /* to KFD */ 507 __u32 flags; /* to KFD */ 508 __u32 dmabuf_fd; /* from KFD */ 509}; 510 511/* 512 * KFD SMI(System Management Interface) events 513 */ 514enum kfd_smi_event { 515 KFD_SMI_EVENT_NONE = 0, /* not used */ 516 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ 517 KFD_SMI_EVENT_THERMAL_THROTTLE = 2, 518 KFD_SMI_EVENT_GPU_PRE_RESET = 3, 519 KFD_SMI_EVENT_GPU_POST_RESET = 4, 520 KFD_SMI_EVENT_MIGRATE_START = 5, 521 KFD_SMI_EVENT_MIGRATE_END = 6, 522 KFD_SMI_EVENT_PAGE_FAULT_START = 7, 523 KFD_SMI_EVENT_PAGE_FAULT_END = 8, 524 KFD_SMI_EVENT_QUEUE_EVICTION = 9, 525 KFD_SMI_EVENT_QUEUE_RESTORE = 10, 526 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11, 527 528 /* 529 * max event number, as a flag bit to get events from all processes, 530 * this requires super user permission, otherwise will not be able to 531 * receive event from any process. Without this flag to receive events 532 * from same process. 533 */ 534 KFD_SMI_EVENT_ALL_PROCESS = 64 535}; 536 537enum KFD_MIGRATE_TRIGGERS { 538 KFD_MIGRATE_TRIGGER_PREFETCH, 539 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 540 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, 541 KFD_MIGRATE_TRIGGER_TTM_EVICTION 542}; 543 544enum KFD_QUEUE_EVICTION_TRIGGERS { 545 KFD_QUEUE_EVICTION_TRIGGER_SVM, 546 KFD_QUEUE_EVICTION_TRIGGER_USERPTR, 547 KFD_QUEUE_EVICTION_TRIGGER_TTM, 548 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, 549 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, 550 KFD_QUEUE_EVICTION_CRIU_RESTORE 551}; 552 553enum KFD_SVM_UNMAP_TRIGGERS { 554 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, 555 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE, 556 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU 557}; 558 559#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) 560#define KFD_SMI_EVENT_MSG_SIZE 96 561 562struct kfd_ioctl_smi_events_args { 563 __u32 gpuid; /* to KFD */ 564 __u32 anon_fd; /* from KFD */ 565}; 566 567/************************************************************************************************** 568 * CRIU IOCTLs (Checkpoint Restore In Userspace) 569 * 570 * When checkpointing a process, the userspace application will perform: 571 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts 572 * all the queues. 573 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges) 574 * 3. UNPAUSE op to un-evict all the queues 575 * 576 * When restoring a process, the CRIU userspace application will perform: 577 * 578 * 1. RESTORE op to restore process contents 579 * 2. RESUME op to start the process 580 * 581 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User 582 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO. 583 */ 584 585enum kfd_criu_op { 586 KFD_CRIU_OP_PROCESS_INFO, 587 KFD_CRIU_OP_CHECKPOINT, 588 KFD_CRIU_OP_UNPAUSE, 589 KFD_CRIU_OP_RESTORE, 590 KFD_CRIU_OP_RESUME, 591}; 592 593/** 594 * kfd_ioctl_criu_args - Arguments perform CRIU operation 595 * @devices: [in/out] User pointer to memory location for devices information. 596 * This is an array of type kfd_criu_device_bucket. 597 * @bos: [in/out] User pointer to memory location for BOs information 598 * This is an array of type kfd_criu_bo_bucket. 599 * @priv_data: [in/out] User pointer to memory location for private data 600 * @priv_data_size: [in/out] Size of priv_data in bytes 601 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array. 602 * @num_bos [in/out] Number of BOs used by process. Size of @bos array. 603 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to 604 * user application. 605 * @pid: [in/out] PID of the process being checkpointed 606 * @op [in] Type of operation (kfd_criu_op) 607 * 608 * Return: 0 on success, -errno on failure 609 */ 610struct kfd_ioctl_criu_args { 611 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */ 612 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */ 613 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */ 614 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */ 615 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */ 616 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */ 617 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */ 618 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */ 619 __u32 op; 620}; 621 622struct kfd_criu_device_bucket { 623 __u32 user_gpu_id; 624 __u32 actual_gpu_id; 625 __u32 drm_fd; 626 __u32 pad; 627}; 628 629struct kfd_criu_bo_bucket { 630 __u64 addr; 631 __u64 size; 632 __u64 offset; 633 __u64 restored_offset; /* During restore, updated offset for BO */ 634 __u32 gpu_id; /* This is the user_gpu_id */ 635 __u32 alloc_flags; 636 __u32 dmabuf_fd; 637 __u32 pad; 638}; 639 640/* CRIU IOCTLs - END */ 641/**************************************************************************************************/ 642 643/* Register offset inside the remapped mmio page 644 */ 645enum kfd_mmio_remap { 646 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0, 647 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, 648}; 649 650/* Guarantee host access to memory */ 651#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001 652/* Fine grained coherency between all devices with access */ 653#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002 654/* Use any GPU in same hive as preferred device */ 655#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004 656/* GPUs only read, allows replication */ 657#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008 658/* Allow execution on GPU */ 659#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 660/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ 661#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 662/* Keep GPU memory mapping always valid as if XNACK is disable */ 663#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 664/* Fine grained coherency between all devices using device-scope atomics */ 665#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080 666 667/** 668 * kfd_ioctl_svm_op - SVM ioctl operations 669 * 670 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes 671 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes 672 */ 673enum kfd_ioctl_svm_op { 674 KFD_IOCTL_SVM_OP_SET_ATTR, 675 KFD_IOCTL_SVM_OP_GET_ATTR 676}; 677 678/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations 679 * 680 * GPU IDs are used to specify GPUs as preferred and prefetch locations. 681 * Below definitions are used for system memory or for leaving the preferred 682 * location unspecified. 683 */ 684enum kfd_ioctl_svm_location { 685 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0, 686 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff 687}; 688 689/** 690 * kfd_ioctl_svm_attr_type - SVM attribute types 691 * 692 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for 693 * system memory 694 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for 695 * system memory. Setting this triggers an 696 * immediate prefetch (migration). 697 * @KFD_IOCTL_SVM_ATTR_ACCESS: 698 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 699 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given 700 * by the attribute value 701 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see 702 * KFD_IOCTL_SVM_FLAG_...) 703 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear 704 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity 705 * (log2 num pages) 706 */ 707enum kfd_ioctl_svm_attr_type { 708 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC, 709 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC, 710 KFD_IOCTL_SVM_ATTR_ACCESS, 711 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE, 712 KFD_IOCTL_SVM_ATTR_NO_ACCESS, 713 KFD_IOCTL_SVM_ATTR_SET_FLAGS, 714 KFD_IOCTL_SVM_ATTR_CLR_FLAGS, 715 KFD_IOCTL_SVM_ATTR_GRANULARITY 716}; 717 718/** 719 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value 720 * 721 * The meaning of the @value depends on the attribute type. 722 * 723 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type) 724 * @value: attribute value 725 */ 726struct kfd_ioctl_svm_attribute { 727 __u32 type; 728 __u32 value; 729}; 730 731/** 732 * kfd_ioctl_svm_args - Arguments for SVM ioctl 733 * 734 * @op specifies the operation to perform (see enum 735 * @kfd_ioctl_svm_op). @start_addr and @size are common for all 736 * operations. 737 * 738 * A variable number of attributes can be given in @attrs. 739 * @nattr specifies the number of attributes. New attributes can be 740 * added in the future without breaking the ABI. If unknown attributes 741 * are given, the function returns -EINVAL. 742 * 743 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address 744 * range. It may overlap existing virtual address ranges. If it does, 745 * the existing ranges will be split such that the attribute changes 746 * only apply to the specified address range. 747 * 748 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes 749 * over all memory in the given range and returns the result as the 750 * attribute value. If different pages have different preferred or 751 * prefetch locations, 0xffffffff will be returned for 752 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or 753 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For 754 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be 755 * aggregated by bitwise AND. That means, a flag will be set in the 756 * output, if that flag is set for all pages in the range. For 757 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be 758 * aggregated by bitwise NOR. That means, a flag will be set in the 759 * output, if that flag is clear for all pages in the range. 760 * The minimum migration granularity throughout the range will be 761 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY. 762 * 763 * Querying of accessibility attributes works by initializing the 764 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the 765 * GPUID being queried. Multiple attributes can be given to allow 766 * querying multiple GPUIDs. The ioctl function overwrites the 767 * attribute type to indicate the access for the specified GPU. 768 */ 769struct kfd_ioctl_svm_args { 770 __u64 start_addr; 771 __u64 size; 772 __u32 op; 773 __u32 nattr; 774 /* Variable length array of attributes */ 775 struct kfd_ioctl_svm_attribute attrs[]; 776}; 777 778/** 779 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode 780 * 781 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process 782 * 783 * @xnack_enabled indicates whether recoverable page faults should be 784 * enabled for the current process. 0 means disabled, positive means 785 * enabled, negative means leave unchanged. If enabled, virtual address 786 * translations on GFXv9 and later AMD GPUs can return XNACK and retry 787 * the access until a valid PTE is available. This is used to implement 788 * device page faults. 789 * 790 * On output, @xnack_enabled returns the (new) current mode (0 or 791 * positive). Therefore, a negative input value can be used to query 792 * the current mode without changing it. 793 * 794 * The XNACK mode fundamentally changes the way SVM managed memory works 795 * in the driver, with subtle effects on application performance and 796 * functionality. 797 * 798 * Enabling XNACK mode requires shader programs to be compiled 799 * differently. Furthermore, not all GPUs support changing the mode 800 * per-process. Therefore changing the mode is only allowed while no 801 * user mode queues exist in the process. This ensure that no shader 802 * code is running that may be compiled for the wrong mode. And GPUs 803 * that cannot change to the requested mode will prevent the XNACK 804 * mode from occurring. All GPUs used by the process must be in the 805 * same XNACK mode. 806 * 807 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM. 808 * Therefore those GPUs are not considered for the XNACK mode switch. 809 * 810 * Return: 0 on success, -errno on failure 811 */ 812struct kfd_ioctl_set_xnack_mode_args { 813 __s32 xnack_enabled; 814}; 815 816/* Wave launch override modes */ 817enum kfd_dbg_trap_override_mode { 818 KFD_DBG_TRAP_OVERRIDE_OR = 0, 819 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1 820}; 821 822/* Wave launch overrides */ 823enum kfd_dbg_trap_mask { 824 KFD_DBG_TRAP_MASK_FP_INVALID = 1, 825 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2, 826 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4, 827 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8, 828 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16, 829 KFD_DBG_TRAP_MASK_FP_INEXACT = 32, 830 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64, 831 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128, 832 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256, 833 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30), 834 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31) 835}; 836 837/* Wave launch modes */ 838enum kfd_dbg_trap_wave_launch_mode { 839 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0, 840 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1, 841 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3 842}; 843 844/* Address watch modes */ 845enum kfd_dbg_trap_address_watch_mode { 846 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0, 847 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1, 848 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2, 849 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3 850}; 851 852/* Additional wave settings */ 853enum kfd_dbg_trap_flags { 854 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1, 855}; 856 857/* Trap exceptions */ 858enum kfd_dbg_trap_exception_code { 859 EC_NONE = 0, 860 /* per queue */ 861 EC_QUEUE_WAVE_ABORT = 1, 862 EC_QUEUE_WAVE_TRAP = 2, 863 EC_QUEUE_WAVE_MATH_ERROR = 3, 864 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4, 865 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5, 866 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6, 867 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16, 868 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17, 869 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18, 870 EC_QUEUE_PACKET_RESERVED = 19, 871 EC_QUEUE_PACKET_UNSUPPORTED = 20, 872 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21, 873 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22, 874 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23, 875 EC_QUEUE_PREEMPTION_ERROR = 30, 876 EC_QUEUE_NEW = 31, 877 /* per device */ 878 EC_DEVICE_QUEUE_DELETE = 32, 879 EC_DEVICE_MEMORY_VIOLATION = 33, 880 EC_DEVICE_RAS_ERROR = 34, 881 EC_DEVICE_FATAL_HALT = 35, 882 EC_DEVICE_NEW = 36, 883 /* per process */ 884 EC_PROCESS_RUNTIME = 48, 885 EC_PROCESS_DEVICE_REMOVE = 49, 886 EC_MAX 887}; 888 889/* Mask generated by ecode in kfd_dbg_trap_exception_code */ 890#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1)) 891 892/* Masks for exception code type checks below */ 893#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \ 894 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \ 895 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \ 896 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \ 897 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \ 898 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \ 899 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ 900 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ 901 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ 902 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ 903 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ 904 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ 905 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ 906 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \ 907 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \ 908 KFD_EC_MASK(EC_QUEUE_NEW)) 909#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \ 910 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \ 911 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \ 912 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \ 913 KFD_EC_MASK(EC_DEVICE_NEW)) 914#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ 915 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) 916#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ 917 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ 918 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ 919 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ 920 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ 921 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ 922 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ 923 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)) 924 925/* Checks for exception code types for KFD search */ 926#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX) 927#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ 928 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) 929#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ 930 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) 931#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ 932 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) 933#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \ 934 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET)) 935 936 937/* Runtime enable states */ 938enum kfd_dbg_runtime_state { 939 DEBUG_RUNTIME_STATE_DISABLED = 0, 940 DEBUG_RUNTIME_STATE_ENABLED = 1, 941 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2, 942 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3 943}; 944 945/* Runtime enable status */ 946struct kfd_runtime_info { 947 __u64 r_debug; 948 __u32 runtime_state; 949 __u32 ttmp_setup; 950}; 951 952/* Enable modes for runtime enable */ 953#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1 954#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2 955 956/** 957 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable 958 * 959 * Coordinates debug exception signalling and debug device enablement with runtime. 960 * 961 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger 962 * @mode_mask - mask to set mode 963 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable 964 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable) 965 * @capabilities_mask - mask to notify runtime on what KFD supports 966 * 967 * Return - 0 on SUCCESS. 968 * - EBUSY if runtime enable call already pending. 969 * - EEXIST if user queues already active prior to call. 970 * If process is debug enabled, runtime enable will enable debug devices and 971 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME 972 * to unblock - see kfd_ioctl_dbg_trap_args. 973 * 974 */ 975struct kfd_ioctl_runtime_enable_args { 976 __u64 r_debug; 977 __u32 mode_mask; 978 __u32 capabilities_mask; 979}; 980 981/* Queue information */ 982struct kfd_queue_snapshot_entry { 983 __u64 exception_status; 984 __u64 ring_base_address; 985 __u64 write_pointer_address; 986 __u64 read_pointer_address; 987 __u64 ctx_save_restore_address; 988 __u32 queue_id; 989 __u32 gpu_id; 990 __u32 ring_size; 991 __u32 queue_type; 992 __u32 ctx_save_restore_area_size; 993 __u32 reserved; 994}; 995 996/* Queue status return for suspend/resume */ 997#define KFD_DBG_QUEUE_ERROR_BIT 30 998#define KFD_DBG_QUEUE_INVALID_BIT 31 999#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT) 1000#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT) 1001 1002/* Context save area header information */ 1003struct kfd_context_save_area_header { 1004 struct { 1005 __u32 control_stack_offset; 1006 __u32 control_stack_size; 1007 __u32 wave_state_offset; 1008 __u32 wave_state_size; 1009 } wave_state; 1010 __u32 debug_offset; 1011 __u32 debug_size; 1012 __u64 err_payload_addr; 1013 __u32 err_event_id; 1014 __u32 reserved1; 1015}; 1016 1017/* 1018 * Debug operations 1019 * 1020 * For specifics on usage and return values, see documentation per operation 1021 * below. Otherwise, generic error returns apply: 1022 * - ESRCH if the process to debug does not exist. 1023 * 1024 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation 1025 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior. 1026 * Also returns this error if GPU hardware scheduling is not supported. 1027 * 1028 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not 1029 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow 1030 * clean up of debug mode as long as process is debug enabled. 1031 * 1032 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when 1033 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior. 1034 * 1035 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call. 1036 * 1037 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU 1038 * is in a fatal state. 1039 * 1040 */ 1041enum kfd_dbg_trap_operations { 1042 KFD_IOC_DBG_TRAP_ENABLE = 0, 1043 KFD_IOC_DBG_TRAP_DISABLE = 1, 1044 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2, 1045 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3, 1046 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */ 1047 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */ 1048 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */ 1049 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */ 1050 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */ 1051 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */ 1052 KFD_IOC_DBG_TRAP_SET_FLAGS = 10, 1053 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11, 1054 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12, 1055 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13, 1056 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14 1057}; 1058 1059/** 1060 * kfd_ioctl_dbg_trap_enable_args 1061 * 1062 * Arguments for KFD_IOC_DBG_TRAP_ENABLE. 1063 * 1064 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in 1065 * kfd_ioctl_dbg_trap_args to disable debug session. 1066 * 1067 * @exception_mask (IN) - exceptions to raise to the debugger 1068 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info) 1069 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes 1070 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised 1071 * exceptions set in exception_mask. 1072 * 1073 * Generic errors apply (see kfd_dbg_trap_operations). 1074 * Return - 0 on SUCCESS. 1075 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable. 1076 * Size of kfd_runtime saved by the KFD returned to @rinfo_size. 1077 * - EBADF if KFD cannot get a reference to dbg_fd. 1078 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr. 1079 * - EINVAL if target process is already debug enabled. 1080 * 1081 */ 1082struct kfd_ioctl_dbg_trap_enable_args { 1083 __u64 exception_mask; 1084 __u64 rinfo_ptr; 1085 __u32 rinfo_size; 1086 __u32 dbg_fd; 1087}; 1088 1089/** 1090 * kfd_ioctl_dbg_trap_send_runtime_event_args 1091 * 1092 * 1093 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT. 1094 * Raises exceptions to runtime. 1095 * 1096 * @exception_mask (IN) - exceptions to raise to runtime 1097 * @gpu_id (IN) - target device id 1098 * @queue_id (IN) - target queue id 1099 * 1100 * Generic errors apply (see kfd_dbg_trap_operations). 1101 * Return - 0 on SUCCESS. 1102 * - ENODEV if gpu_id not found. 1103 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending 1104 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args. 1105 * All other exceptions are raised to runtime through err_payload_addr. 1106 * See kfd_context_save_area_header. 1107 */ 1108struct kfd_ioctl_dbg_trap_send_runtime_event_args { 1109 __u64 exception_mask; 1110 __u32 gpu_id; 1111 __u32 queue_id; 1112}; 1113 1114/** 1115 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args 1116 * 1117 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED 1118 * Set new exceptions to be raised to the debugger. 1119 * 1120 * @exception_mask (IN) - new exceptions to raise the debugger 1121 * 1122 * Generic errors apply (see kfd_dbg_trap_operations). 1123 * Return - 0 on SUCCESS. 1124 */ 1125struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args { 1126 __u64 exception_mask; 1127}; 1128 1129/** 1130 * kfd_ioctl_dbg_trap_set_wave_launch_override_args 1131 * 1132 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE 1133 * Enable HW exceptions to raise trap. 1134 * 1135 * @override_mode (IN) - see kfd_dbg_trap_override_mode 1136 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask. 1137 * IN is the override modes requested to be enabled. 1138 * OUT is referenced in Return below. 1139 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask. 1140 * IN is the override modes requested for support check. 1141 * OUT is referenced in Return below. 1142 * 1143 * Generic errors apply (see kfd_dbg_trap_operations). 1144 * Return - 0 on SUCCESS. 1145 * Previous enablement is returned in @enable_mask. 1146 * Actual override support is returned in @support_request_mask. 1147 * - EINVAL if override mode is not supported. 1148 * - EACCES if trap support requested is not actually supported. 1149 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT). 1150 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations). 1151 */ 1152struct kfd_ioctl_dbg_trap_set_wave_launch_override_args { 1153 __u32 override_mode; 1154 __u32 enable_mask; 1155 __u32 support_request_mask; 1156 __u32 pad; 1157}; 1158 1159/** 1160 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args 1161 * 1162 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE 1163 * Set wave launch mode. 1164 * 1165 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode 1166 * 1167 * Generic errors apply (see kfd_dbg_trap_operations). 1168 * Return - 0 on SUCCESS. 1169 */ 1170struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args { 1171 __u32 launch_mode; 1172 __u32 pad; 1173}; 1174 1175/** 1176 * kfd_ioctl_dbg_trap_suspend_queues_ags 1177 * 1178 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES 1179 * Suspend queues. 1180 * 1181 * @exception_mask (IN) - raised exceptions to clear 1182 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) 1183 * to suspend 1184 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr 1185 * @grace_period (IN) - wave time allowance before preemption 1186 * per 1K GPU clock cycle unit 1187 * 1188 * Generic errors apply (see kfd_dbg_trap_operations). 1189 * Destruction of a suspended queue is blocked until the queue is 1190 * resumed. This allows the debugger to access queue information and 1191 * the its context save area without running into a race condition on 1192 * queue destruction. 1193 * Automatically copies per queue context save area header information 1194 * into the save area base 1195 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header). 1196 * 1197 * Return - Number of queues suspended on SUCCESS. 1198 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked 1199 * for each queue id in @queue_array_ptr array reports unsuccessful 1200 * suspend reason. 1201 * KFD_DBG_QUEUE_ERROR_MASK = HW failure. 1202 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or 1203 * is being destroyed. 1204 */ 1205struct kfd_ioctl_dbg_trap_suspend_queues_args { 1206 __u64 exception_mask; 1207 __u64 queue_array_ptr; 1208 __u32 num_queues; 1209 __u32 grace_period; 1210}; 1211 1212/** 1213 * kfd_ioctl_dbg_trap_resume_queues_args 1214 * 1215 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES 1216 * Resume queues. 1217 * 1218 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) 1219 * to resume 1220 * @num_queues (IN) - number of queues to resume in @queue_array_ptr 1221 * 1222 * Generic errors apply (see kfd_dbg_trap_operations). 1223 * Return - Number of queues resumed on SUCCESS. 1224 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask 1225 * for each queue id in @queue_array_ptr array reports unsuccessful 1226 * resume reason. 1227 * KFD_DBG_QUEUE_ERROR_MASK = HW failure. 1228 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist. 1229 */ 1230struct kfd_ioctl_dbg_trap_resume_queues_args { 1231 __u64 queue_array_ptr; 1232 __u32 num_queues; 1233 __u32 pad; 1234}; 1235 1236/** 1237 * kfd_ioctl_dbg_trap_set_node_address_watch_args 1238 * 1239 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH 1240 * Sets address watch for device. 1241 * 1242 * @address (IN) - watch address to set 1243 * @mode (IN) - see kfd_dbg_trap_address_watch_mode 1244 * @mask (IN) - watch address mask 1245 * @gpu_id (IN) - target gpu to set watch point 1246 * @id (OUT) - watch id allocated 1247 * 1248 * Generic errors apply (see kfd_dbg_trap_operations). 1249 * Return - 0 on SUCCESS. 1250 * Allocated watch ID returned to @id. 1251 * - ENODEV if gpu_id not found. 1252 * - ENOMEM if watch IDs can be allocated 1253 */ 1254struct kfd_ioctl_dbg_trap_set_node_address_watch_args { 1255 __u64 address; 1256 __u32 mode; 1257 __u32 mask; 1258 __u32 gpu_id; 1259 __u32 id; 1260}; 1261 1262/** 1263 * kfd_ioctl_dbg_trap_clear_node_address_watch_args 1264 * 1265 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH 1266 * Clear address watch for device. 1267 * 1268 * @gpu_id (IN) - target device to clear watch point 1269 * @id (IN) - allocated watch id to clear 1270 * 1271 * Generic errors apply (see kfd_dbg_trap_operations). 1272 * Return - 0 on SUCCESS. 1273 * - ENODEV if gpu_id not found. 1274 * - EINVAL if watch ID has not been allocated. 1275 */ 1276struct kfd_ioctl_dbg_trap_clear_node_address_watch_args { 1277 __u32 gpu_id; 1278 __u32 id; 1279}; 1280 1281/** 1282 * kfd_ioctl_dbg_trap_set_flags_args 1283 * 1284 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS 1285 * Sets flags for wave behaviour. 1286 * 1287 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled 1288 * 1289 * Generic errors apply (see kfd_dbg_trap_operations). 1290 * Return - 0 on SUCCESS. 1291 * - EACCESS if any debug device does not allow flag options. 1292 */ 1293struct kfd_ioctl_dbg_trap_set_flags_args { 1294 __u32 flags; 1295 __u32 pad; 1296}; 1297 1298/** 1299 * kfd_ioctl_dbg_trap_query_debug_event_args 1300 * 1301 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT 1302 * 1303 * Find one or more raised exceptions. This function can return multiple 1304 * exceptions from a single queue or a single device with one call. To find 1305 * all raised exceptions, this function must be called repeatedly until it 1306 * returns -EAGAIN. Returned exceptions can optionally be cleared by 1307 * setting the corresponding bit in the @exception_mask input parameter. 1308 * However, clearing an exception prevents retrieving further information 1309 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO. 1310 * 1311 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT) 1312 * @gpu_id (OUT) - gpu id of exceptions raised 1313 * @queue_id (OUT) - queue id of exceptions raised 1314 * 1315 * Generic errors apply (see kfd_dbg_trap_operations). 1316 * Return - 0 on raised exception found 1317 * Raised exceptions found are returned in @exception mask 1318 * with reported source id returned in @gpu_id or @queue_id. 1319 * - EAGAIN if no raised exception has been found 1320 */ 1321struct kfd_ioctl_dbg_trap_query_debug_event_args { 1322 __u64 exception_mask; 1323 __u32 gpu_id; 1324 __u32 queue_id; 1325}; 1326 1327/** 1328 * kfd_ioctl_dbg_trap_query_exception_info_args 1329 * 1330 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO 1331 * Get additional info on raised exception. 1332 * 1333 * @info_ptr (IN) - pointer to exception info buffer to copy to 1334 * @info_size (IN/OUT) - exception info buffer size (bytes) 1335 * @source_id (IN) - target gpu or queue id 1336 * @exception_code (IN) - target exception 1337 * @clear_exception (IN) - clear raised @exception_code exception 1338 * (0 = false, 1 = true) 1339 * 1340 * Generic errors apply (see kfd_dbg_trap_operations). 1341 * Return - 0 on SUCCESS. 1342 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT) 1343 * bytes of memory exception data to @info_ptr. 1344 * If @exception_code is EC_PROCESS_RUNTIME, copy saved 1345 * kfd_runtime_info to @info_ptr. 1346 * Actual required @info_ptr size (bytes) is returned in @info_size. 1347 */ 1348struct kfd_ioctl_dbg_trap_query_exception_info_args { 1349 __u64 info_ptr; 1350 __u32 info_size; 1351 __u32 source_id; 1352 __u32 exception_code; 1353 __u32 clear_exception; 1354}; 1355 1356/** 1357 * kfd_ioctl_dbg_trap_get_queue_snapshot_args 1358 * 1359 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT 1360 * Get queue information. 1361 * 1362 * @exception_mask (IN) - exceptions raised to clear 1363 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry) 1364 * @num_queues (IN/OUT) - number of queue snapshot entries 1365 * The debugger specifies the size of the array allocated in @num_queues. 1366 * KFD returns the number of queues that actually existed. If this is 1367 * larger than the size specified by the debugger, KFD will not overflow 1368 * the array allocated by the debugger. 1369 * 1370 * @entry_size (IN/OUT) - size per entry in bytes 1371 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in 1372 * @entry_size. KFD returns the number of bytes actually populated per 1373 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine, 1374 * which fields in struct kfd_queue_snapshot_entry are valid. This allows 1375 * growing the ABI in a backwards compatible manner. 1376 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the 1377 * event that it's larger than actual kfd_queue_snapshot_entry. 1378 * 1379 * Generic errors apply (see kfd_dbg_trap_operations). 1380 * Return - 0 on SUCCESS. 1381 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN) 1382 * into @snapshot_buf_ptr if @num_queues(IN) > 0. 1383 * Otherwise return @num_queues(OUT) queue snapshot entries that exist. 1384 */ 1385struct kfd_ioctl_dbg_trap_queue_snapshot_args { 1386 __u64 exception_mask; 1387 __u64 snapshot_buf_ptr; 1388 __u32 num_queues; 1389 __u32 entry_size; 1390}; 1391 1392/** 1393 * kfd_ioctl_dbg_trap_get_device_snapshot_args 1394 * 1395 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT 1396 * Get device information. 1397 * 1398 * @exception_mask (IN) - exceptions raised to clear 1399 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry) 1400 * @num_devices (IN/OUT) - number of debug devices to snapshot 1401 * The debugger specifies the size of the array allocated in @num_devices. 1402 * KFD returns the number of devices that actually existed. If this is 1403 * larger than the size specified by the debugger, KFD will not overflow 1404 * the array allocated by the debugger. 1405 * 1406 * @entry_size (IN/OUT) - size per entry in bytes 1407 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in 1408 * @entry_size. KFD returns the number of bytes actually populated. The 1409 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields 1410 * in struct kfd_dbg_device_info_entry are valid. This allows growing the 1411 * ABI in a backwards compatible manner. 1412 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the 1413 * event that it's larger than actual kfd_dbg_device_info_entry. 1414 * 1415 * Generic errors apply (see kfd_dbg_trap_operations). 1416 * Return - 0 on SUCCESS. 1417 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN) 1418 * into @snapshot_buf_ptr if @num_devices(IN) > 0. 1419 * Otherwise return @num_devices(OUT) queue snapshot entries that exist. 1420 */ 1421struct kfd_ioctl_dbg_trap_device_snapshot_args { 1422 __u64 exception_mask; 1423 __u64 snapshot_buf_ptr; 1424 __u32 num_devices; 1425 __u32 entry_size; 1426}; 1427 1428/** 1429 * kfd_ioctl_dbg_trap_args 1430 * 1431 * Arguments to debug target process. 1432 * 1433 * @pid - target process to debug 1434 * @op - debug operation (see kfd_dbg_trap_operations) 1435 * 1436 * @op determines which union struct args to use. 1437 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct. 1438 */ 1439struct kfd_ioctl_dbg_trap_args { 1440 __u32 pid; 1441 __u32 op; 1442 1443 union { 1444 struct kfd_ioctl_dbg_trap_enable_args enable; 1445 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event; 1446 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled; 1447 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override; 1448 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode; 1449 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues; 1450 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues; 1451 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch; 1452 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch; 1453 struct kfd_ioctl_dbg_trap_set_flags_args set_flags; 1454 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event; 1455 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info; 1456 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot; 1457 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot; 1458 }; 1459}; 1460 1461#define AMDKFD_IOCTL_BASE 'K' 1462#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 1463#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 1464#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) 1465#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) 1466 1467#define AMDKFD_IOC_GET_VERSION \ 1468 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) 1469 1470#define AMDKFD_IOC_CREATE_QUEUE \ 1471 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) 1472 1473#define AMDKFD_IOC_DESTROY_QUEUE \ 1474 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) 1475 1476#define AMDKFD_IOC_SET_MEMORY_POLICY \ 1477 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) 1478 1479#define AMDKFD_IOC_GET_CLOCK_COUNTERS \ 1480 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) 1481 1482#define AMDKFD_IOC_GET_PROCESS_APERTURES \ 1483 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) 1484 1485#define AMDKFD_IOC_UPDATE_QUEUE \ 1486 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 1487 1488#define AMDKFD_IOC_CREATE_EVENT \ 1489 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args) 1490 1491#define AMDKFD_IOC_DESTROY_EVENT \ 1492 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args) 1493 1494#define AMDKFD_IOC_SET_EVENT \ 1495 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args) 1496 1497#define AMDKFD_IOC_RESET_EVENT \ 1498 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args) 1499 1500#define AMDKFD_IOC_WAIT_EVENTS \ 1501 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) 1502 1503#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \ 1504 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) 1505 1506#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \ 1507 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) 1508 1509#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \ 1510 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) 1511 1512#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \ 1513 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) 1514 1515#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ 1516 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args) 1517 1518#define AMDKFD_IOC_GET_TILE_CONFIG \ 1519 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) 1520 1521#define AMDKFD_IOC_SET_TRAP_HANDLER \ 1522 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args) 1523 1524#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \ 1525 AMDKFD_IOWR(0x14, \ 1526 struct kfd_ioctl_get_process_apertures_new_args) 1527 1528#define AMDKFD_IOC_ACQUIRE_VM \ 1529 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args) 1530 1531#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \ 1532 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args) 1533 1534#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \ 1535 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args) 1536 1537#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \ 1538 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args) 1539 1540#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \ 1541 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args) 1542 1543#define AMDKFD_IOC_SET_CU_MASK \ 1544 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args) 1545 1546#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \ 1547 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) 1548 1549#define AMDKFD_IOC_GET_DMABUF_INFO \ 1550 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args) 1551 1552#define AMDKFD_IOC_IMPORT_DMABUF \ 1553 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) 1554 1555#define AMDKFD_IOC_ALLOC_QUEUE_GWS \ 1556 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) 1557 1558#define AMDKFD_IOC_SMI_EVENTS \ 1559 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args) 1560 1561#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args) 1562 1563#define AMDKFD_IOC_SET_XNACK_MODE \ 1564 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) 1565 1566#define AMDKFD_IOC_CRIU_OP \ 1567 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) 1568 1569#define AMDKFD_IOC_AVAILABLE_MEMORY \ 1570 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args) 1571 1572#define AMDKFD_IOC_EXPORT_DMABUF \ 1573 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args) 1574 1575#define AMDKFD_IOC_RUNTIME_ENABLE \ 1576 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args) 1577 1578#define AMDKFD_IOC_DBG_TRAP \ 1579 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args) 1580 1581#define AMDKFD_COMMAND_START 0x01 1582#define AMDKFD_COMMAND_END 0x27 1583 1584#endif 1585