Lines Matching defs:xe

77 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
119 mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
123 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
124 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
130 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
133 xe_assert(xe, resource_is_vram(res));
134 mgr = ttm_manager_type(&xe->ttm, res->mem_type);
138 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
142 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
151 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
158 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
160 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
161 xe_assert(xe, vram && vram->usable_size);
184 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
188 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
190 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
193 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
197 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
209 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
214 try_add_vram(xe, bo, bo_flags, &c);
215 try_add_system(xe, bo, bo_flags, &c);
216 try_add_stolen(xe, bo, bo_flags, &c);
229 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
233 return __xe_bo_placement_for_flags(xe, bo, bo_flags);
251 * For xe, sg bos that are evicted to system just triggers a
317 struct xe_device *xe = xe_bo_device(bo);
327 tt->dev = xe->drm.dev;
331 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
352 (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
414 struct xe_device *xe = ttm_to_xe_device(bdev);
444 return xe_ttm_stolen_io_mem_reserve(xe, mem);
450 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
527 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
530 xe_assert(xe, attach);
531 xe_assert(xe, ttm_bo->ttm);
576 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
590 ret = xe_bo_trigger_rebind(xe, bo, ctx);
604 mutex_lock(&xe->mem_access.vram_userfault.lock);
607 mutex_unlock(&xe->mem_access.vram_userfault.lock);
618 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
628 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
711 migrate = mem_type_to_migrate(xe, new_mem->mem_type);
713 migrate = mem_type_to_migrate(xe, old_mem_type);
715 migrate = xe->tiles[0].migrate;
717 xe_assert(xe, migrate);
719 xe_pm_runtime_get_noresume(xe);
743 xe_pm_runtime_put(xe);
747 xe_assert(xe, new_mem->start ==
761 xe_pm_runtime_put(xe);
786 xe_pm_runtime_put(xe);
937 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
940 xe_assert(xe, !kref_read(&ttm_bo->kref));
951 xe_assert(xe, locked);
1040 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1046 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1059 mutex_lock(&xe->mem_access.vram_userfault.lock);
1062 mutex_unlock(&xe->mem_access.vram_userfault.lock);
1104 struct xe_device *xe = to_xe_device(ddev);
1111 xe_pm_runtime_get(xe);
1133 mutex_lock(&xe->mem_access.vram_userfault.lock);
1135 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
1136 mutex_unlock(&xe->mem_access.vram_userfault.lock);
1142 xe_pm_runtime_put(xe);
1196 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
1212 xe_assert(xe, !tile || type == ttm_bo_type_kernel);
1221 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
1257 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
1265 err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1276 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
1322 static int __xe_bo_fixed_placement(struct xe_device *xe,
1361 __xe_bo_create_locked(struct xe_device *xe,
1378 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
1385 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
1406 tile = xe_device_get_root_tile(xe);
1408 xe_assert(xe, tile);
1430 xe_bo_create_locked_range(struct xe_device *xe,
1435 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
1438 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
1442 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
1445 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
1451 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
1460 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
1464 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
1472 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
1483 xe_ttm_stolen_cpu_access_needs_ggtt(xe))
1486 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
1511 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1515 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
1518 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1522 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
1528 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1538 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1544 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
1548 ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo);
1555 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1558 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
1563 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
1570 * @xe: xe device
1581 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
1588 xe_assert(xe, IS_DGFX(xe));
1589 xe_assert(xe, !(*src)->vmap.is_iomem);
1591 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
1596 drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src);
1608 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
1611 return xe_ttm_stolen_gpu_offset(xe);
1628 struct xe_device *xe = xe_bo_device(bo);
1631 xe_assert(xe, !bo->vm);
1632 xe_assert(xe, xe_bo_is_user(bo));
1640 spin_lock(&xe->pinned.lock);
1642 &xe->pinned.external_vram);
1643 spin_unlock(&xe->pinned.lock);
1660 struct xe_device *xe = xe_bo_device(bo);
1664 xe_assert(xe, !xe_bo_is_user(bo));
1667 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
1674 xe_assert(xe, !bo->ttm.base.import_attach);
1677 xe_assert(xe, !xe_bo_is_pinned(bo));
1688 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1693 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
1699 spin_lock(&xe->pinned.lock);
1700 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
1701 spin_unlock(&xe->pinned.lock);
1728 struct xe_device *xe = xe_bo_device(bo);
1730 xe_assert(xe, !bo->vm);
1731 xe_assert(xe, xe_bo_is_pinned(bo));
1732 xe_assert(xe, xe_bo_is_user(bo));
1735 spin_lock(&xe->pinned.lock);
1737 spin_unlock(&xe->pinned.lock);
1751 struct xe_device *xe = xe_bo_device(bo);
1753 xe_assert(xe, !bo->ttm.base.import_attach);
1754 xe_assert(xe, xe_bo_is_pinned(bo));
1756 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
1761 xe_assert(xe, !list_empty(&bo->pinned_link));
1763 spin_lock(&xe->pinned.lock);
1765 spin_unlock(&xe->pinned.lock);
1822 struct xe_device *xe = xe_bo_device(bo);
1826 xe_assert(xe, page_size <= PAGE_SIZE);
1831 xe_assert(xe, bo->ttm.ttm);
1903 struct xe_device *xe = to_xe_device(dev);
1912 if (XE_IOCTL_DBG(xe, args->extensions) ||
1913 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
1914 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1918 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
1922 if (XE_IOCTL_DBG(xe, args->flags &
1928 if (XE_IOCTL_DBG(xe, args->handle))
1931 if (XE_IOCTL_DBG(xe, !args->size))
1934 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
1937 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
1950 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
1956 if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
1960 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
1964 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
1970 if (XE_IOCTL_DBG(xe, !vm))
1977 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
2013 struct xe_device *xe = to_xe_device(dev);
2017 if (XE_IOCTL_DBG(xe, args->extensions) ||
2018 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2021 if (XE_IOCTL_DBG(xe, args->flags))
2025 if (XE_IOCTL_DBG(xe, !gem_obj))
2125 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
2152 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
2157 add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
2203 struct xe_device *xe = xe_bo_device(bo);
2205 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
2208 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
2216 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
2270 struct xe_device *xe = to_xe_device(dev);
2276 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
2282 bo = xe_bo_create_user(xe, NULL, NULL, args->size,
2285 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |