Lines Matching defs:i915

97 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
155 intel_gt_flush_ggtt_writes(to_gt(i915));
191 struct drm_i915_private *i915 = to_i915(obj->base.dev);
251 intel_gt_flush_ggtt_writes(to_gt(i915));
290 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
292 if (GRAPHICS_VER(i915) <= 2) {
297 HAS_128_BYTE_Y_TILING(i915)) {
307 if (GRAPHICS_VER(i915) < 4)
309 else if (GRAPHICS_VER(i915) < 7)
318 struct drm_i915_private *i915 = arg;
324 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
335 obj = huge_gem_object(i915,
337 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
348 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
372 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
383 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
386 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
395 max_pitch = setup_tile_size(&tile, i915);
405 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
414 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
424 if (GRAPHICS_VER(i915) >= 4) {
439 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
449 struct drm_i915_private *i915 = arg;
457 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
469 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
472 obj = huge_gem_object(i915,
474 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
485 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
503 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
506 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
515 unsigned int max_pitch = setup_tile_size(&tile, i915);
520 if (GRAPHICS_VER(i915) < 4)
533 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
542 struct drm_i915_private *i915 = to_i915(obj->base.dev);
545 for_each_uabi_engine(engine, i915) {
590 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
592 if (HAS_LMEM(i915))
599 create_sys_or_internal(struct drm_i915_private *i915,
602 if (HAS_LMEM(i915)) {
604 i915->mm.regions[INTEL_REGION_SMEM];
606 return __i915_gem_object_create_user(i915, size, &sys_region, 1);
609 return i915_gem_object_create_internal(i915, size);
612 static bool assert_mmap_offset(struct drm_i915_private *i915,
620 obj = create_sys_or_internal(i915, size);
624 ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
630 static void disable_retire_worker(struct drm_i915_private *i915)
632 i915_gem_driver_unregister__shrinker(i915);
633 intel_gt_pm_get_untracked(to_gt(i915));
634 cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
637 static void restore_retire_worker(struct drm_i915_private *i915)
639 igt_flush_test(i915);
640 intel_gt_pm_put_untracked(to_gt(i915));
641 i915_gem_driver_register__shrinker(i915);
644 static void mmap_offset_lock(struct drm_i915_private *i915)
645 __acquires(&i915->drm.vma_offset_manager->vm_lock)
647 write_lock(&i915->drm.vma_offset_manager->vm_lock);
650 static void mmap_offset_unlock(struct drm_i915_private *i915)
651 __releases(&i915->drm.vma_offset_manager->vm_lock)
653 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
658 struct drm_i915_private *i915 = arg;
659 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
664 int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
667 disable_retire_worker(i915);
668 GEM_BUG_ON(!to_gt(i915)->awake);
669 intel_gt_retire_requests(to_gt(i915));
670 i915_gem_drain_freed_objects(i915);
673 mmap_offset_lock(i915);
705 mmap_offset_unlock(i915);
708 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
715 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
722 obj = create_sys_or_internal(i915, PAGE_SIZE);
729 err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
735 if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
745 if (intel_gt_is_wedged(to_gt(i915)))
748 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
762 mmap_offset_lock(i915);
771 mmap_offset_unlock(i915);
772 restore_retire_worker(i915);
873 struct drm_i915_private *i915 = to_i915(obj->base.dev);
882 !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
895 static int __igt_mmap(struct drm_i915_private *i915,
917 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
962 intel_gt_flush_ggtt_writes(to_gt(i915));
974 struct drm_i915_private *i915 = arg;
978 for_each_memory_region(mr, i915, id) {
993 obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
1000 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
1002 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
1004 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
1015 static void igt_close_objects(struct drm_i915_private *i915,
1033 i915_gem_drain_freed_objects(i915);
1090 igt_close_objects(mr->i915, objects);
1094 static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1176 struct drm_i915_private *i915 = placements[0]->i915;
1184 obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1203 addr = igt_mmap_offset(i915, offset, obj->base.size,
1226 err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1261 err = ___igt_mmap_migrate(i915, obj, addr,
1275 for_each_gt(gt, i915, id) {
1303 igt_close_objects(i915, &objects);
1309 struct drm_i915_private *i915 = arg;
1310 struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1314 for_each_memory_region(mr, i915, id) {
1433 static int __igt_mmap_access(struct drm_i915_private *i915,
1454 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1466 intel_gt_flush_ggtt_writes(to_gt(i915));
1482 intel_gt_flush_ggtt_writes(to_gt(i915));
1506 struct drm_i915_private *i915 = arg;
1510 for_each_memory_region(mr, i915, id) {
1517 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1524 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1526 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1528 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1530 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1532 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1542 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1572 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1585 intel_gt_flush_ggtt_writes(to_gt(i915));
1587 for_each_uabi_engine(engine, i915) {
1620 drm_info_printer(engine->i915->drm.dev);
1652 struct drm_i915_private *i915 = arg;
1656 for_each_memory_region(mr, i915, id) {
1663 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1670 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1672 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1674 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1740 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1755 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1806 struct drm_i915_private *i915 = arg;
1810 for_each_memory_region(mr, i915, id) {
1817 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1824 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1826 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1828 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1838 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1851 return i915_live_subtests(tests, i915);