Lines Matching refs:op

1028 	struct xe_vma_op *op;
1030 op = kzalloc(sizeof(*op), GFP_KERNEL);
1032 if (unlikely(!op))
1035 return &op->base;
1992 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1996 switch (op->op) {
1999 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2002 vma = gpuva_to_vma(op->remap.unmap->va);
2005 op->remap.unmap->keep ? 1 : 0);
2006 if (op->remap.prev)
2009 (ULL)op->remap.prev->va.addr,
2010 (ULL)op->remap.prev->va.range);
2011 if (op->remap.next)
2014 (ULL)op->remap.next->va.addr,
2015 (ULL)op->remap.next->va.range);
2018 vma = gpuva_to_vma(op->unmap.va);
2021 op->unmap.keep ? 1 : 0);
2024 vma = gpuva_to_vma(op->prefetch.va);
2033 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2057 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2098 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2100 if (__op->op == DRM_GPUVA_OP_MAP) {
2101 op->map.immediate =
2103 op->map.read_only =
2105 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2106 op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2107 op->map.pat_index = pat_index;
2108 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2109 op->prefetch.region = prefetch_region;
2118 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2121 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2146 vma = xe_vma_create(vm, bo, op->gem.offset,
2147 op->va.addr, op->va.addr +
2148 op->va.range - 1, pat_index, flags);
2203 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2209 switch (op->base.op) {
2211 err |= xe_vm_insert_vma(vm, op->map.vma);
2213 op->flags |= XE_VMA_OP_COMMITTED;
2218 gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2220 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2222 op->flags |= XE_VMA_OP_COMMITTED;
2224 if (op->remap.prev) {
2225 err |= xe_vm_insert_vma(vm, op->remap.prev);
2227 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2228 if (!err && op->remap.skip_prev) {
2229 op->remap.prev->tile_present =
2231 op->remap.prev = NULL;
2234 if (op->remap.next) {
2235 err |= xe_vm_insert_vma(vm, op->remap.next);
2237 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2238 if (!err && op->remap.skip_next) {
2239 op->remap.next->tile_present =
2241 op->remap.next = NULL;
2247 op->base.remap.unmap->va->va.addr = op->remap.start;
2248 op->base.remap.unmap->va->va.range = op->remap.range;
2253 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2254 op->flags |= XE_VMA_OP_COMMITTED;
2257 op->flags |= XE_VMA_OP_COMMITTED;
2280 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2285 INIT_LIST_HEAD(&op->link);
2286 list_add_tail(&op->link, ops_list);
2289 op->flags |= XE_VMA_OP_FIRST;
2290 op->num_syncs = num_syncs;
2291 op->syncs = syncs;
2294 op->q = q;
2296 switch (op->base.op) {
2299 flags |= op->map.read_only ?
2301 flags |= op->map.is_null ?
2303 flags |= op->map.dumpable ?
2306 vma = new_vma(vm, &op->base.map, op->map.pat_index,
2311 op->map.vma = vma;
2317 gpuva_to_vma(op->base.remap.unmap->va);
2319 op->remap.start = xe_vma_start(old);
2320 op->remap.range = xe_vma_size(old);
2322 if (op->base.remap.prev) {
2323 flags |= op->base.remap.unmap->va->flags &
2326 flags |= op->base.remap.unmap->va->flags &
2329 flags |= op->base.remap.unmap->va->flags &
2333 vma = new_vma(vm, op->base.remap.prev,
2338 op->remap.prev = vma;
2344 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2347 if (op->remap.skip_prev) {
2349 op->remap.range -=
2352 op->remap.start = xe_vma_end(vma);
2354 (ULL)op->remap.start,
2355 (ULL)op->remap.range);
2359 if (op->base.remap.next) {
2360 flags |= op->base.remap.unmap->va->flags &
2363 flags |= op->base.remap.unmap->va->flags &
2366 flags |= op->base.remap.unmap->va->flags &
2370 vma = new_vma(vm, op->base.remap.next,
2375 op->remap.next = vma;
2381 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2384 if (op->remap.skip_next) {
2386 op->remap.range -=
2390 (ULL)op->remap.start,
2391 (ULL)op->remap.range);
2404 last_op = op;
2406 err = xe_vma_op_commit(vm, op);
2428 struct xe_vma *vma, struct xe_vma_op *op)
2441 switch (op->base.op) {
2443 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2444 op->syncs, op->num_syncs,
2445 op->map.immediate || !xe_vm_in_fault_mode(vm),
2446 op->flags & XE_VMA_OP_FIRST,
2447 op->flags & XE_VMA_OP_LAST);
2451 bool prev = !!op->remap.prev;
2452 bool next = !!op->remap.next;
2454 if (!op->remap.unmap_done) {
2457 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2458 op->num_syncs,
2459 op->flags & XE_VMA_OP_FIRST,
2460 op->flags & XE_VMA_OP_LAST &&
2464 op->remap.unmap_done = true;
2468 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2469 err = xe_vm_bind(vm, op->remap.prev, op->q,
2470 xe_vma_bo(op->remap.prev), op->syncs,
2471 op->num_syncs, true, false,
2472 op->flags & XE_VMA_OP_LAST && !next);
2473 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2476 op->remap.prev = NULL;
2480 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2481 err = xe_vm_bind(vm, op->remap.next, op->q,
2482 xe_vma_bo(op->remap.next),
2483 op->syncs, op->num_syncs,
2485 op->flags & XE_VMA_OP_LAST);
2486 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2489 op->remap.next = NULL;
2495 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2496 op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2497 op->flags & XE_VMA_OP_LAST);
2500 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2501 op->syncs, op->num_syncs,
2502 op->flags & XE_VMA_OP_FIRST,
2503 op->flags & XE_VMA_OP_LAST);
2516 struct xe_vma_op *op)
2524 err = op_execute(&exec, vm, vma, op);
2534 if (op->base.op == DRM_GPUVA_OP_REMAP) {
2535 if (!op->remap.unmap_done)
2536 vma = gpuva_to_vma(op->base.remap.unmap->va);
2537 else if (op->remap.prev)
2538 vma = op->remap.prev;
2540 vma = op->remap.next;
2555 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2561 switch (op->base.op) {
2563 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2569 if (!op->remap.unmap_done)
2570 vma = gpuva_to_vma(op->base.remap.unmap->va);
2571 else if (op->remap.prev)
2572 vma = op->remap.prev;
2574 vma = op->remap.next;
2576 ret = __xe_vma_op_execute(vm, vma, op);
2580 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2581 op);
2585 gpuva_to_vma(op->base.prefetch.va),
2586 op);
2595 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2597 bool last = op->flags & XE_VMA_OP_LAST;
2600 while (op->num_syncs--)
2601 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2602 kfree(op->syncs);
2603 if (op->q)
2604 xe_exec_queue_put(op->q);
2606 if (!list_empty(&op->link))
2607 list_del(&op->link);
2608 if (op->ops)
2609 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2614 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2620 switch (op->base.op) {
2622 if (op->map.vma) {
2623 prep_vma_destroy(vm, op->map.vma, post_commit);
2624 xe_vma_destroy_unlocked(op->map.vma);
2629 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2642 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2644 if (op->remap.prev) {
2645 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2646 xe_vma_destroy_unlocked(op->remap.prev);
2648 if (op->remap.next) {
2649 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2650 xe_vma_destroy_unlocked(op->remap.next);
2683 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2685 xe_vma_op_unwind(vm, op,
2686 op->flags & XE_VMA_OP_COMMITTED,
2687 op->flags & XE_VMA_OP_PREV_COMMITTED,
2688 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2698 struct xe_vma_op *op, *next;
2703 list_for_each_entry_safe(op, next, ops_list, link) {
2704 err = xe_vma_op_execute(vm, op);
2706 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2707 op->base.op, err);
2714 xe_vma_op_cleanup(vm, op);
2766 u32 op = (*bind_ops)[i].op;
2793 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2797 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2800 op == DRM_XE_VM_BIND_OP_MAP &&
2803 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2805 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2807 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2809 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2811 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2813 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2815 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2819 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2828 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3032 u32 op = bind_ops[i].op;
3039 addr, range, op, flags,