Lines Matching defs:args

178 	struct kfd_ioctl_get_version_args *args = data;
180 args->major_version = KFD_IOCTL_MAJOR_VERSION;
181 args->minor_version = KFD_IOCTL_MINOR_VERSION;
187 struct kfd_ioctl_create_queue_args *args)
194 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
199 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
204 if ((args->ring_base_address) &&
205 (!access_ok((const void __user *) args->ring_base_address,
211 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
216 if (!access_ok((const void __user *) args->read_pointer_address,
222 if (!access_ok((const void __user *) args->write_pointer_address,
228 if (args->eop_buffer_address &&
229 !access_ok((const void __user *) args->eop_buffer_address,
235 if (args->ctx_save_restore_address &&
236 !access_ok((const void __user *) args->ctx_save_restore_address,
244 q_properties->queue_percent = args->queue_percentage & 0xFF;
246 q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
247 q_properties->priority = args->queue_priority;
248 q_properties->queue_address = args->ring_base_address;
249 q_properties->queue_size = args->ring_size;
250 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
251 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
252 q_properties->eop_ring_buffer_address = args->eop_buffer_address;
253 q_properties->eop_ring_buffer_size = args->eop_buffer_size;
255 args->ctx_save_restore_address;
256 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
257 q_properties->ctl_stack_size = args->ctl_stack_size;
258 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
259 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
261 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
263 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
268 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
274 q_properties->queue_percent, args->queue_percentage);
277 q_properties->priority, args->queue_priority);
280 q_properties->queue_address, args->ring_base_address);
283 q_properties->queue_size, args->ring_size);
302 struct kfd_ioctl_create_queue_args *args = data;
315 err = set_queue_properties_from_user(&q_properties, args);
319 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
323 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
325 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
360 wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
391 args->queue_id = queue_id;
395 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
396 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
401 args->doorbell_offset |= doorbell_offset_in_process;
405 pr_debug("Queue id %d was created successfully\n", args->queue_id);
408 args->ring_base_address);
411 args->read_pointer_address);
414 args->write_pointer_address);
433 struct kfd_ioctl_destroy_queue_args *args = data;
436 args->queue_id,
441 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
451 struct kfd_ioctl_update_queue_args *args = data;
459 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
464 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
469 if ((args->ring_base_address) &&
470 (!access_ok((const void __user *) args->ring_base_address,
476 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
481 properties.queue_address = args->ring_base_address;
482 properties.queue_size = args->ring_size;
483 properties.queue_percent = args->queue_percentage & 0xFF;
485 properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
486 properties.priority = args->queue_priority;
489 args->queue_id, p->pasid);
493 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
505 struct kfd_ioctl_set_cu_mask_args *args = data;
507 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
508 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
510 if ((args->num_cu_mask % 32) != 0) {
512 args->num_cu_mask);
516 minfo.cu_mask.count = args->num_cu_mask;
545 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
557 struct kfd_ioctl_get_queue_wave_state_args *args = data;
562 r = pqm_get_wave_state(&p->pqm, args->queue_id,
563 (void __user *)args->ctl_stack_address,
564 &args->ctl_stack_used_size,
565 &args->save_area_used_size);
575 struct kfd_ioctl_set_memory_policy_args *args = data;
580 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
581 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
585 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
586 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
591 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
593 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
604 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
608 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
615 (void __user *)args->alternate_aperture_base,
616 args->alternate_aperture_size))
629 struct kfd_ioctl_set_trap_handler_args *args = data;
635 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
647 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
684 struct kfd_ioctl_get_clock_counters_args *args = data;
688 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
692 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
695 args->gpu_clock_counter = 0;
698 args->cpu_clock_counter = ktime_get_raw_ns();
699 args->system_clock_counter = ktime_get_boottime_ns();
702 args->system_clock_freq = 1000000000;
711 struct kfd_ioctl_get_process_apertures_args *args = data;
717 args->num_of_nodes = 0;
725 &args->process_apertures[args->num_of_nodes];
735 "node id %u\n", args->num_of_nodes);
751 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
762 struct kfd_ioctl_get_process_apertures_new_args *args = data;
769 if (args->num_of_nodes == 0) {
774 args->num_of_nodes = p->n_pdds;
779 * nodes, but not more than args->num_of_nodes as that is
782 pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
790 args->num_of_nodes = 0;
796 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
824 args->num_of_nodes = i;
826 (void __user *)args->kfd_process_device_apertures_ptr,
840 struct kfd_ioctl_create_event_args *args = data;
847 if (args->event_page_offset) {
849 err = kfd_kmap_event_page(p, args->event_page_offset);
855 err = kfd_event_create(filp, p, args->event_type,
856 args->auto_reset != 0, args->node_id,
857 &args->event_id, &args->event_trigger_data,
858 &args->event_page_offset,
859 &args->event_slot_index);
861 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
868 struct kfd_ioctl_destroy_event_args *args = data;
870 return kfd_event_destroy(p, args->event_id);
876 struct kfd_ioctl_set_event_args *args = data;
878 return kfd_set_event(p, args->event_id);
884 struct kfd_ioctl_reset_event_args *args = data;
886 return kfd_reset_event(p, args->event_id);
892 struct kfd_ioctl_wait_events_args *args = data;
894 return kfd_wait_on_events(p, args->num_events,
895 (void __user *)args->events_ptr,
896 (args->wait_for_all != 0),
897 &args->timeout, &args->wait_result);
902 struct kfd_ioctl_set_scratch_backing_va_args *args = data;
908 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
921 pdd->qpd.sh_hidden_private_base = args->va_addr;
928 dev->adev, args->va_addr, pdd->qpd.vmid);
941 struct kfd_ioctl_get_tile_config_args *args = data;
947 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
954 args->gb_addr_config = config.gb_addr_config;
955 args->num_banks = config.num_banks;
956 args->num_ranks = config.num_ranks;
958 if (args->num_tile_configs > config.num_tile_configs)
959 args->num_tile_configs = config.num_tile_configs;
960 err = copy_to_user((void __user *)args->tile_config_ptr,
962 args->num_tile_configs * sizeof(uint32_t));
964 args->num_tile_configs = 0;
968 if (args->num_macro_tile_configs > config.num_macro_tile_configs)
969 args->num_macro_tile_configs =
971 err = copy_to_user((void __user *)args->macro_tile_config_ptr,
973 args->num_macro_tile_configs * sizeof(uint32_t));
975 args->num_macro_tile_configs = 0;
985 struct kfd_ioctl_acquire_vm_args *args = data;
990 drm_file = fget(args->drm_fd);
995 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1046 struct kfd_ioctl_get_available_memory_args *args = data;
1047 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1051 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
1060 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1066 uint64_t offset = args->mmap_offset;
1067 uint32_t flags = args->flags;
1069 if (args->size == 0)
1080 args->va_addr >> PAGE_SHIFT,
1081 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1083 args->va_addr);
1093 args->mmap_offset >> PAGE_SHIFT,
1094 (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
1096 args->mmap_offset);
1104 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1127 if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
1137 if (args->size != PAGE_SIZE) {
1149 dev->adev, args->va_addr, args->size,
1164 uint64_t size = args->size;
1173 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1174 args->mmap_offset = offset;
1180 args->mmap_offset = KFD_MMAP_TYPE_MMIO
1181 | KFD_MMAP_GPU_ID(args->gpu_id);
1198 struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1209 if (p->signal_handle && (p->signal_handle == args->handle)) {
1215 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1223 pdd, GET_IDR_HANDLE(args->handle));
1237 pdd, GET_IDR_HANDLE(args->handle));
1250 struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1258 if (!args->n_devices) {
1262 if (args->n_success > args->n_devices) {
1267 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1273 (void __user *)args->device_ids_array_ptr,
1274 args->n_devices * sizeof(*devices_arr));
1281 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1295 GET_IDR_HANDLE(args->handle));
1301 for (i = args->n_success; i < args->n_devices; i++) {
1331 args->n_success = i+1;
1343 for (i = 0; i < args->n_devices; i++) {
1368 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1375 if (!args->n_devices) {
1379 if (args->n_success > args->n_devices) {
1384 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1390 (void __user *)args->device_ids_array_ptr,
1391 args->n_devices * sizeof(*devices_arr));
1398 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1405 GET_IDR_HANDLE(args->handle));
1411 for (i = args->n_success; i < args->n_devices; i++) {
1421 i, args->n_devices);
1424 args->n_success = i+1;
1438 for (i = 0; i < args->n_devices; i++) {
1471 struct kfd_ioctl_alloc_queue_gws_args *args = data;
1476 q = pqm_get_user_queue(&p->pqm, args->queue_id);
1501 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1504 args->first_gws = 0;
1515 struct kfd_ioctl_get_dmabuf_info_args *args = data;
1531 if (args->metadata_ptr) {
1532 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1538 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1539 &dmabuf_adev, &args->size,
1540 metadata_buffer, args->metadata_size,
1541 &args->metadata_size, &flags, &xcp_id);
1546 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
1548 args->gpu_id = dev->id;
1549 args->flags = flags;
1553 r = copy_to_user((void __user *)args->metadata_ptr,
1554 metadata_buffer, args->metadata_size);
1568 struct kfd_ioctl_import_dmabuf_args *args = data;
1576 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1588 r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
1589 args->va_addr, pdd->drm_priv,
1603 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1618 struct kfd_ioctl_export_dmabuf_args *args = data;
1625 dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1638 GET_IDR_HANDLE(args->handle));
1649 ret = dma_buf_fd(dmabuf, args->flags);
1657 args->dmabuf_fd = ret;
1671 struct kfd_ioctl_smi_events_args *args = data;
1676 pdd = kfd_process_device_data_by_id(p, args->gpuid);
1681 return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1689 struct kfd_ioctl_set_xnack_mode_args *args = data;
1693 if (args->xnack_enabled >= 0) {
1700 if (p->xnack_enabled == args->xnack_enabled)
1703 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1708 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1710 args->xnack_enabled = p->xnack_enabled;
1721 struct kfd_ioctl_svm_args *args = data;
1725 args->start_addr, args->size, args->op, args->nattr);
1727 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1729 if (!args->start_addr || !args->size)
1732 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1733 args->attrs);
2049 struct kfd_ioctl_criu_args *args)
2055 if (!args->devices || !args->bos || !args->priv_data)
2078 if (num_devices != args->num_devices ||
2079 num_bos != args->num_bos ||
2080 num_objects != args->num_objects ||
2081 priv_size != args->priv_data_size) {
2088 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2092 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2093 (uint8_t __user *)args->priv_data, &priv_offset);
2105 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2110 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2115 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2123 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2124 (uint8_t __user *)args->priv_data, &bo_priv_offset);
2137 struct kfd_ioctl_criu_args *args,
2148 (void __user *)(args->priv_data + *priv_offset),
2178 struct kfd_ioctl_criu_args *args,
2187 if (args->num_devices != p->n_pdds)
2190 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2193 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2197 ret = copy_from_user(device_buckets, (void __user *)args->devices,
2198 args->num_devices * sizeof(*device_buckets));
2205 for (i = 0; i < args->num_devices; i++) {
2278 *priv_offset += args->num_devices * sizeof(*device_privs);
2419 struct kfd_ioctl_criu_args *args,
2428 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2434 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2438 ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2439 args->num_bos * sizeof(*bo_buckets));
2446 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2452 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2453 args->num_bos * sizeof(*bo_privs));
2459 *priv_offset += args->num_bos * sizeof(*bo_privs);
2462 for (; i < args->num_bos; i++) {
2471 ret = copy_to_user((void __user *)args->bos,
2473 (args->num_bos * sizeof(*bo_buckets)));
2490 struct kfd_ioctl_criu_args *args,
2501 for (i = 0; i < args->num_objects; i++) {
2509 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2517 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2523 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2529 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2546 struct kfd_ioctl_criu_args *args)
2552 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2554 if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2555 !args->num_devices || !args->num_bos)
2569 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2573 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2577 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2581 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2585 if (priv_offset != args->priv_data_size) {
2602 struct kfd_ioctl_criu_args *args)
2626 struct kfd_ioctl_criu_args *args)
2633 args->pid);
2635 pid = find_get_pid(args->pid);
2637 pr_err("Cannot find pid info for %i\n", args->pid);
2647 pr_debug("Cannot find process info for %i\n", args->pid);
2654 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2660 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2671 struct kfd_ioctl_criu_args *args)
2689 args->pid = task_pid_nr_ns(p->lead_thread,
2692 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2693 &args->num_objects, &args->priv_data_size);
2698 args->num_devices, args->num_bos, args->num_objects,
2699 args->priv_data_size);
2712 struct kfd_ioctl_criu_args *args = data;
2715 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2716 switch (args->op) {
2718 ret = criu_process_info(filep, p, args);
2721 ret = criu_checkpoint(filep, p, args);
2724 ret = criu_unpause(filep, p, args);
2727 ret = criu_restore(filep, p, args);
2730 ret = criu_resume(filep, p, args);
2733 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2739 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2873 struct kfd_ioctl_runtime_enable_args *args = data;
2878 if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
2879 r = runtime_enable(p, args->r_debug,
2880 !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
2891 struct kfd_ioctl_dbg_trap_args *args = data;
2904 pid = find_get_pid(args->pid);
2906 pr_debug("Cannot find pid info for %i\n", args->pid);
2923 if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
2937 pr_debug("Cannot find process PID %i to debug\n", args->pid);
2945 if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
2947 pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
2957 if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
2958 pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
2964 (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
2965 args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
2966 args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
2967 args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
2968 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
2969 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
2970 args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
2975 if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
2976 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
2978 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
2979 args->set_node_address_watch.gpu_id :
2980 args->clear_node_address_watch.gpu_id);
2989 switch (args->op) {
2995 args->enable.dbg_fd,
2996 (void __user *)args->enable.rinfo_ptr,
2997 &args->enable.rinfo_size);
2999 target->exception_enable_mask = args->enable.exception_mask;
3007 args->send_runtime_event.gpu_id,
3008 args->send_runtime_event.queue_id,
3009 args->send_runtime_event.exception_mask);
3013 args->set_exceptions_enabled.exception_mask);
3017 args->launch_override.override_mode,
3018 args->launch_override.enable_mask,
3019 args->launch_override.support_request_mask,
3020 &args->launch_override.enable_mask,
3021 &args->launch_override.support_request_mask);
3025 args->launch_mode.launch_mode);
3029 args->suspend_queues.num_queues,
3030 args->suspend_queues.grace_period,
3031 args->suspend_queues.exception_mask,
3032 (uint32_t *)args->suspend_queues.queue_array_ptr);
3036 r = resume_queues(target, args->resume_queues.num_queues,
3037 (uint32_t *)args->resume_queues.queue_array_ptr);
3041 args->set_node_address_watch.address,
3042 args->set_node_address_watch.mask,
3043 &args->set_node_address_watch.id,
3044 args->set_node_address_watch.mode);
3048 args->clear_node_address_watch.id);
3051 r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
3055 &args->query_debug_event.queue_id,
3056 &args->query_debug_event.gpu_id,
3057 args->query_debug_event.exception_mask,
3058 &args->query_debug_event.exception_mask);
3062 args->query_exception_info.source_id,
3063 args->query_exception_info.exception_code,
3064 args->query_exception_info.clear_exception,
3065 (void __user *)args->query_exception_info.info_ptr,
3066 &args->query_exception_info.info_size);
3070 args->queue_snapshot.exception_mask,
3071 (void __user *)args->queue_snapshot.snapshot_buf_ptr,
3072 &args->queue_snapshot.num_queues,
3073 &args->queue_snapshot.entry_size);
3077 args->device_snapshot.exception_mask,
3078 (void __user *)args->device_snapshot.snapshot_buf_ptr,
3079 &args->device_snapshot.num_devices,
3080 &args->device_snapshot.entry_size);
3083 pr_err("Invalid option: %i\n", args->op);