Deleted Added
full compact
i915_gem.c (277487) i915_gem.c (280183)
1/*-
2 * Copyright �� 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 38 unchanged lines hidden (view full) ---

47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54#include <sys/cdefs.h>
1/*-
2 * Copyright �� 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 38 unchanged lines hidden (view full) ---

47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54#include <sys/cdefs.h>
55__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_gem.c 277487 2015-01-21 16:10:37Z kib $");
55__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_gem.c 280183 2015-03-17 18:50:33Z dumbbell $");
56
57#include <dev/drm2/drmP.h>
58#include <dev/drm2/drm.h>
59#include <dev/drm2/i915/i915_drm.h>
60#include <dev/drm2/i915/i915_drv.h>
61#include <dev/drm2/i915/intel_drv.h>
62#include <dev/drm2/i915/intel_ringbuffer.h>
63#include <sys/resourcevar.h>

--- 92 unchanged lines hidden (view full) ---

156 dev_priv = dev->dev_private;
157 if (!atomic_load_acq_int(&dev_priv->mm.wedged))
158 return (0);
159
160 mtx_lock(&dev_priv->error_completion_lock);
161 while (dev_priv->error_completion == 0) {
162 ret = -msleep(&dev_priv->error_completion,
163 &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
56
57#include <dev/drm2/drmP.h>
58#include <dev/drm2/drm.h>
59#include <dev/drm2/i915/i915_drm.h>
60#include <dev/drm2/i915/i915_drv.h>
61#include <dev/drm2/i915/intel_drv.h>
62#include <dev/drm2/i915/intel_ringbuffer.h>
63#include <sys/resourcevar.h>

--- 92 unchanged lines hidden (view full) ---

156 dev_priv = dev->dev_private;
157 if (!atomic_load_acq_int(&dev_priv->mm.wedged))
158 return (0);
159
160 mtx_lock(&dev_priv->error_completion_lock);
161 while (dev_priv->error_completion == 0) {
162 ret = -msleep(&dev_priv->error_completion,
163 &dev_priv->error_completion_lock, PCATCH, "915wco", 0);
164 if (ret == -ERESTART)
165 ret = -ERESTARTSYS;
164 if (ret != 0) {
165 mtx_unlock(&dev_priv->error_completion_lock);
166 return (ret);
167 }
168 }
169 mtx_unlock(&dev_priv->error_completion_lock);
170
171 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {

--- 122 unchanged lines hidden (view full) ---

294}
295
296int
297i915_gem_init_ioctl(struct drm_device *dev, void *data,
298 struct drm_file *file)
299{
300 struct drm_i915_gem_init *args;
301 drm_i915_private_t *dev_priv;
166 if (ret != 0) {
167 mtx_unlock(&dev_priv->error_completion_lock);
168 return (ret);
169 }
170 }
171 mtx_unlock(&dev_priv->error_completion_lock);
172
173 if (atomic_load_acq_int(&dev_priv->mm.wedged)) {

--- 122 unchanged lines hidden (view full) ---

296}
297
298int
299i915_gem_init_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file)
301{
302 struct drm_i915_gem_init *args;
303 drm_i915_private_t *dev_priv;
304 int error;
302
303 if (drm_core_check_feature(dev, DRIVER_MODESET))
304 return -ENODEV;
305
306 dev_priv = dev->dev_private;
307 args = data;
308
309 if (args->gtt_start >= args->gtt_end ||

--- 6 unchanged lines hidden (view full) ---

316 /* GEM with user mode setting was never supported on ilk and later. */
317 if (INTEL_INFO(dev)->gen >= 5)
318 return -ENODEV;
319
320 /*
321 * XXXKIB. The second-time initialization should be guarded
322 * against.
323 */
305
306 if (drm_core_check_feature(dev, DRIVER_MODESET))
307 return -ENODEV;
308
309 dev_priv = dev->dev_private;
310 args = data;
311
312 if (args->gtt_start >= args->gtt_end ||

--- 6 unchanged lines hidden (view full) ---

319 /* GEM with user mode setting was never supported on ilk and later. */
320 if (INTEL_INFO(dev)->gen >= 5)
321 return -ENODEV;
322
323 /*
324 * XXXKIB. The second-time initialization should be guarded
325 * against.
326 */
324 return (i915_gem_init_global_gtt(dev, args->gtt_start, args->gtt_end,
325 args->gtt_end));
327 DRM_LOCK(dev);
328 error = i915_gem_init_global_gtt(dev, args->gtt_start,
329 args->gtt_end, args->gtt_end);
330 DRM_UNLOCK(dev);
331 return (error);
326}
327
328int
329i915_gem_idle(struct drm_device *dev)
330{
331 drm_i915_private_t *dev_priv;
332 int ret;
333
332}
333
334int
335i915_gem_idle(struct drm_device *dev)
336{
337 drm_i915_private_t *dev_priv;
338 int ret;
339
340 DRM_LOCK(dev);
341
334 dev_priv = dev->dev_private;
342 dev_priv = dev->dev_private;
335 if (dev_priv->mm.suspended)
343 if (dev_priv->mm.suspended) {
344 DRM_UNLOCK(dev);
336 return (0);
345 return (0);
346 }
337
338 ret = i915_gpu_idle(dev);
347
348 ret = i915_gpu_idle(dev);
339 if (ret != 0)
349 if (ret != 0) {
350 DRM_UNLOCK(dev);
340 return (ret);
351 return (ret);
352 }
341 i915_gem_retire_requests(dev);
342
343 /* Under UMS, be paranoid and evict. */
344 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
345 ret = i915_gem_evict_everything(dev, false);
353 i915_gem_retire_requests(dev);
354
355 /* Under UMS, be paranoid and evict. */
356 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
357 ret = i915_gem_evict_everything(dev, false);
346 if (ret != 0)
358 if (ret != 0) {
359 DRM_UNLOCK(dev);
347 return ret;
360 return ret;
361 }
348 }
349
350 i915_gem_reset_fences(dev);
351
352 /* Hack! Don't let anybody do execbuf while we don't control the chip.
353 * We need to replace this with a semaphore, or something.
354 * And not confound mm.suspended!
355 */
356 dev_priv->mm.suspended = 1;
357 callout_stop(&dev_priv->hangcheck_timer);
358
359 i915_kernel_lost_context(dev);
360 i915_gem_cleanup_ringbuffer(dev);
361
362 }
363
364 i915_gem_reset_fences(dev);
365
366 /* Hack! Don't let anybody do execbuf while we don't control the chip.
367 * We need to replace this with a semaphore, or something.
368 * And not confound mm.suspended!
369 */
370 dev_priv->mm.suspended = 1;
371 callout_stop(&dev_priv->hangcheck_timer);
372
373 i915_kernel_lost_context(dev);
374 i915_gem_cleanup_ringbuffer(dev);
375
376 DRM_UNLOCK(dev);
377
362 /* Cancel the retire work handler, which should be idle now. */
363 taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
364 return (ret);
365}
366
367void
368i915_gem_init_swizzling(struct drm_device *dev)
369{

--- 238 unchanged lines hidden (view full) ---

608 obj->pin_mappable |= map_and_fenceable;
609
610 return 0;
611}
612
613void
614i915_gem_object_unpin(struct drm_i915_gem_object *obj)
615{
378 /* Cancel the retire work handler, which should be idle now. */
379 taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
380 return (ret);
381}
382
383void
384i915_gem_init_swizzling(struct drm_device *dev)
385{

--- 238 unchanged lines hidden (view full) ---

624 obj->pin_mappable |= map_and_fenceable;
625
626 return 0;
627}
628
629void
630i915_gem_object_unpin(struct drm_i915_gem_object *obj)
631{
616
632
617 KASSERT(obj->pin_count != 0, ("zero pin count"));
618 KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
619
620 if (--obj->pin_count == 0)
621 obj->pin_mappable = false;
622}
623
624int

--- 243 unchanged lines hidden (view full) ---

868 if (drm_core_check_feature(dev, DRIVER_MODESET))
869 return (0);
870 dev_priv = dev->dev_private;
871 if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
872 DRM_ERROR("Reenabling wedged hardware, good luck\n");
873 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
874 }
875
633 KASSERT(obj->pin_count != 0, ("zero pin count"));
634 KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
635
636 if (--obj->pin_count == 0)
637 obj->pin_mappable = false;
638}
639
640int

--- 243 unchanged lines hidden (view full) ---

884 if (drm_core_check_feature(dev, DRIVER_MODESET))
885 return (0);
886 dev_priv = dev->dev_private;
887 if (atomic_load_acq_int(&dev_priv->mm.wedged) != 0) {
888 DRM_ERROR("Reenabling wedged hardware, good luck\n");
889 atomic_store_rel_int(&dev_priv->mm.wedged, 0);
890 }
891
892 DRM_LOCK(dev);
876 dev_priv->mm.suspended = 0;
877
878 ret = i915_gem_init_hw(dev);
879 if (ret != 0) {
893 dev_priv->mm.suspended = 0;
894
895 ret = i915_gem_init_hw(dev);
896 if (ret != 0) {
897 DRM_UNLOCK(dev);
880 return (ret);
881 }
882
883 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
884 KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
885 KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
886 DRM_UNLOCK(dev);
898 return (ret);
899 }
900
901 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
902 KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
903 KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
904 DRM_UNLOCK(dev);
905
887 ret = drm_irq_install(dev);
906 ret = drm_irq_install(dev);
888 DRM_LOCK(dev);
889 if (ret)
890 goto cleanup_ringbuffer;
891
892 return (0);
893
894cleanup_ringbuffer:
907 if (ret)
908 goto cleanup_ringbuffer;
909
910 return (0);
911
912cleanup_ringbuffer:
913 DRM_LOCK(dev);
895 i915_gem_cleanup_ringbuffer(dev);
896 dev_priv->mm.suspended = 1;
914 i915_gem_cleanup_ringbuffer(dev);
915 dev_priv->mm.suspended = 1;
916 DRM_UNLOCK(dev);
897
898 return (ret);
899}
900
901int
902i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
903 struct drm_file *file_priv)
904{

--- 16 unchanged lines hidden (view full) ---

921 size = roundup(size, PAGE_SIZE);
922 if (size == 0)
923 return (-EINVAL);
924
925 obj = i915_gem_alloc_object(dev, size);
926 if (obj == NULL)
927 return (-ENOMEM);
928
917
918 return (ret);
919}
920
921int
922i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
923 struct drm_file *file_priv)
924{

--- 16 unchanged lines hidden (view full) ---

941 size = roundup(size, PAGE_SIZE);
942 if (size == 0)
943 return (-EINVAL);
944
945 obj = i915_gem_alloc_object(dev, size);
946 if (obj == NULL)
947 return (-ENOMEM);
948
929 handle = 0;
930 ret = drm_gem_handle_create(file, &obj->base, &handle);
931 if (ret != 0) {
932 drm_gem_object_release(&obj->base);
933 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
934 free(obj, DRM_I915_GEM);
949 ret = drm_gem_handle_create(file, &obj->base, &handle);
950 if (ret != 0) {
951 drm_gem_object_release(&obj->base);
952 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
953 free(obj, DRM_I915_GEM);
935 return (-ret);
954 return (ret);
936 }
937
938 /* drop reference from allocate - handle holds it now */
939 drm_gem_object_unreference(&obj->base);
940 CTR2(KTR_DRM, "object_create %p %x", obj, size);
941 *handle_p = handle;
942 return (0);
943}

--- 27 unchanged lines hidden (view full) ---

971}
972
973#define __user
974#define __force
975#define __iomem
976#define to_user_ptr(x) ((void *)(uintptr_t)(x))
977#define offset_in_page(x) ((x) & PAGE_MASK)
978#define page_to_phys(x) VM_PAGE_TO_PHYS(x)
955 }
956
957 /* drop reference from allocate - handle holds it now */
958 drm_gem_object_unreference(&obj->base);
959 CTR2(KTR_DRM, "object_create %p %x", obj, size);
960 *handle_p = handle;
961 return (0);
962}

--- 27 unchanged lines hidden (view full) ---

990}
991
992#define __user
993#define __force
994#define __iomem
995#define to_user_ptr(x) ((void *)(uintptr_t)(x))
996#define offset_in_page(x) ((x) & PAGE_MASK)
997#define page_to_phys(x) VM_PAGE_TO_PHYS(x)
979static inline long
980__copy_to_user(void __user *to, const void *from, unsigned long n)
981{
982 return (copyout(from, to, n) != 0 ? n : 0);
983}
984static inline int
985__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
986{
987 return (copyout_nofault(from, to, n) != 0 ? n : 0);
988}
989static inline unsigned long
998static inline int
999__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
1000{
1001 return (copyout_nofault(from, to, n) != 0 ? n : 0);
1002}
1003static inline unsigned long
990__copy_from_user(void *to, const void __user *from, unsigned long n)
991{
992 return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0));
993}
994#define copy_from_user(to, from, n) __copy_from_user((to), (from), (n))
995static inline unsigned long
996__copy_from_user_inatomic_nocache(void *to, const void __user *from,
997 unsigned long n)
998{
999
1000 /*
1001 * XXXKIB. Equivalent Linux function is implemented using
1002 * MOVNTI for aligned moves. For unaligned head and tail,
1003 * normal move is performed. As such, it is not incorrect, if

--- 8 unchanged lines hidden (view full) ---

1012 char c;
1013 int ret = 0;
1014 const char __user *end = uaddr + size - 1;
1015
1016 if (unlikely(size == 0))
1017 return ret;
1018
1019 while (uaddr <= end) {
1004__copy_from_user_inatomic_nocache(void *to, const void __user *from,
1005 unsigned long n)
1006{
1007
1008 /*
1009 * XXXKIB. Equivalent Linux function is implemented using
1010 * MOVNTI for aligned moves. For unaligned head and tail,
1011 * normal move is performed. As such, it is not incorrect, if

--- 8 unchanged lines hidden (view full) ---

1020 char c;
1021 int ret = 0;
1022 const char __user *end = uaddr + size - 1;
1023
1024 if (unlikely(size == 0))
1025 return ret;
1026
1027 while (uaddr <= end) {
1020 ret = copyin(uaddr, &c, 1);
1028 ret = -copyin(uaddr, &c, 1);
1021 if (ret != 0)
1022 return -EFAULT;
1023 uaddr += PAGE_SIZE;
1024 }
1025
1026 /* Check whether the range spilled into the next page. */
1027 if (((unsigned long)uaddr & ~PAGE_MASK) ==
1028 ((unsigned long)end & ~PAGE_MASK)) {
1029 if (ret != 0)
1030 return -EFAULT;
1031 uaddr += PAGE_SIZE;
1032 }
1033
1034 /* Check whether the range spilled into the next page. */
1035 if (((unsigned long)uaddr & ~PAGE_MASK) ==
1036 ((unsigned long)end & ~PAGE_MASK)) {
1029 ret = copyin(end, &c, 1);
1037 ret = -copyin(end, &c, 1);
1030 }
1031
1038 }
1039
1032 return -ret;
1040 return ret;
1033}
1034
1035static inline int
1036fault_in_multipages_writeable(char __user *uaddr, int size)
1037{
1038 int ret = 0;
1039 char __user *end = uaddr + size - 1;
1040

--- 822 unchanged lines hidden (view full) ---

1863 if (args->size == 0)
1864 goto out;
1865 p = curproc;
1866 map = &p->p_vmspace->vm_map;
1867 size = round_page(args->size);
1868 PROC_LOCK(p);
1869 if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
1870 PROC_UNLOCK(p);
1041}
1042
1043static inline int
1044fault_in_multipages_writeable(char __user *uaddr, int size)
1045{
1046 int ret = 0;
1047 char __user *end = uaddr + size - 1;
1048

--- 822 unchanged lines hidden (view full) ---

1871 if (args->size == 0)
1872 goto out;
1873 p = curproc;
1874 map = &p->p_vmspace->vm_map;
1875 size = round_page(args->size);
1876 PROC_LOCK(p);
1877 if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
1878 PROC_UNLOCK(p);
1871 error = ENOMEM;
1879 error = -ENOMEM;
1872 goto out;
1873 }
1874 PROC_UNLOCK(p);
1875
1876 addr = 0;
1877 vm_object_reference(obj->vm_obj);
1880 goto out;
1881 }
1882 PROC_UNLOCK(p);
1883
1884 addr = 0;
1885 vm_object_reference(obj->vm_obj);
1878 DRM_UNLOCK(dev);
1879 rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1880 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1881 VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1882 if (rv != KERN_SUCCESS) {
1883 vm_object_deallocate(obj->vm_obj);
1884 error = -vm_mmap_to_errno(rv);
1885 } else {
1886 args->addr_ptr = (uint64_t)addr;
1887 }
1886 rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
1887 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1888 VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
1889 if (rv != KERN_SUCCESS) {
1890 vm_object_deallocate(obj->vm_obj);
1891 error = -vm_mmap_to_errno(rv);
1892 } else {
1893 args->addr_ptr = (uint64_t)addr;
1894 }
1888 DRM_LOCK(dev);
1889out:
1890 drm_gem_object_unreference(obj);
1891 return (error);
1892}
1893
1894static int
1895i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1896 vm_ooffset_t foff, struct ucred *cred, u_short *color)

--- 729 unchanged lines hidden (view full) ---

2626 &dev_priv->mm.gtt_space, size, alignment, 0,
2627 dev_priv->mm.gtt_mappable_end, 0);
2628 else
2629 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2630 size, alignment, 0);
2631 if (free_space != NULL) {
2632 if (map_and_fenceable)
2633 obj->gtt_space = drm_mm_get_block_range_generic(
1895out:
1896 drm_gem_object_unreference(obj);
1897 return (error);
1898}
1899
1900static int
1901i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
1902 vm_ooffset_t foff, struct ucred *cred, u_short *color)

--- 729 unchanged lines hidden (view full) ---

2632 &dev_priv->mm.gtt_space, size, alignment, 0,
2633 dev_priv->mm.gtt_mappable_end, 0);
2634 else
2635 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2636 size, alignment, 0);
2637 if (free_space != NULL) {
2638 if (map_and_fenceable)
2639 obj->gtt_space = drm_mm_get_block_range_generic(
2634 free_space, size, alignment, 0,
2640 free_space, size, alignment, 0, 0,
2635 dev_priv->mm.gtt_mappable_end, 1);
2636 else
2637 obj->gtt_space = drm_mm_get_block_generic(free_space,
2641 dev_priv->mm.gtt_mappable_end, 1);
2642 else
2643 obj->gtt_space = drm_mm_get_block_generic(free_space,
2638 size, alignment, 1);
2644 size, alignment, 0, 1);
2639 }
2640 if (obj->gtt_space == NULL) {
2641 ret = i915_gem_evict_something(dev, size, alignment,
2642 map_and_fenceable);
2643 if (ret != 0)
2644 return (ret);
2645 goto search_free;
2646 }

--- 119 unchanged lines hidden (view full) ---

2766 if (obj->gtt_space == NULL)
2767 return (0);
2768 if (obj->pin_count != 0) {
2769 DRM_ERROR("Attempting to unbind pinned buffer\n");
2770 return (-EINVAL);
2771 }
2772
2773 ret = i915_gem_object_finish_gpu(obj);
2645 }
2646 if (obj->gtt_space == NULL) {
2647 ret = i915_gem_evict_something(dev, size, alignment,
2648 map_and_fenceable);
2649 if (ret != 0)
2650 return (ret);
2651 goto search_free;
2652 }

--- 119 unchanged lines hidden (view full) ---

2772 if (obj->gtt_space == NULL)
2773 return (0);
2774 if (obj->pin_count != 0) {
2775 DRM_ERROR("Attempting to unbind pinned buffer\n");
2776 return (-EINVAL);
2777 }
2778
2779 ret = i915_gem_object_finish_gpu(obj);
2774 if (ret == -ERESTART || ret == -EINTR)
2780 if (ret == -ERESTARTSYS || ret == -EINTR)
2775 return (ret);
2776
2777 i915_gem_object_finish_gtt(obj);
2778
2779 if (ret == 0)
2780 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2781 return (ret);
2782
2783 i915_gem_object_finish_gtt(obj);
2784
2785 if (ret == 0)
2786 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2781 if (ret == -ERESTART || ret == -EINTR)
2787 if (ret == -ERESTARTSYS || ret == -EINTR)
2782 return (ret);
2783 if (ret != 0) {
2784 i915_gem_clflush_object(obj);
2785 obj->base.read_domains = obj->base.write_domain =
2786 I915_GEM_DOMAIN_CPU;
2787 }
2788
2789 ret = i915_gem_object_put_fence(obj);

--- 531 unchanged lines hidden (view full) ---

3321 if (!ring->irq_get(ring)) {
3322 mtx_unlock(&dev_priv->irq_lock);
3323 return (-ENODEV);
3324 }
3325
3326 flags = interruptible ? PCATCH : 0;
3327 while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
3328 && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
2788 return (ret);
2789 if (ret != 0) {
2790 i915_gem_clflush_object(obj);
2791 obj->base.read_domains = obj->base.write_domain =
2792 I915_GEM_DOMAIN_CPU;
2793 }
2794
2795 ret = i915_gem_object_put_fence(obj);

--- 531 unchanged lines hidden (view full) ---

3327 if (!ring->irq_get(ring)) {
3328 mtx_unlock(&dev_priv->irq_lock);
3329 return (-ENODEV);
3330 }
3331
3332 flags = interruptible ? PCATCH : 0;
3333 while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
3334 && !atomic_load_acq_int(&dev_priv->mm.wedged) &&
3329 ret == 0)
3335 ret == 0) {
3330 ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0);
3336 ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0);
3337 if (ret == -ERESTART)
3338 ret = -ERESTARTSYS;
3339 }
3331 ring->irq_put(ring);
3332 mtx_unlock(&dev_priv->irq_lock);
3333
3334 CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, ret);
3335
3336 return ret;
3337}
3338

--- 240 unchanged lines hidden (view full) ---

3579 int i;
3580
3581 if (list_empty(&ring->request_list))
3582 return;
3583
3584 seqno = ring->get_seqno(ring);
3585 CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
3586
3340 ring->irq_put(ring);
3341 mtx_unlock(&dev_priv->irq_lock);
3342
3343 CTR3(KTR_DRM, "request_wait_end %s %d %d", ring->name, seqno, ret);
3344
3345 return ret;
3346}
3347

--- 240 unchanged lines hidden (view full) ---

3588 int i;
3589
3590 if (list_empty(&ring->request_list))
3591 return;
3592
3593 seqno = ring->get_seqno(ring);
3594 CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
3595
3587 for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++)
3596 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
3588 if (seqno >= ring->sync_seqno[i])
3589 ring->sync_seqno[i] = 0;
3590
3591 while (!list_empty(&ring->request_list)) {
3592 struct drm_i915_gem_request *request;
3593
3594 request = list_first_entry(&ring->request_list,
3595 struct drm_i915_gem_request,

--- 446 unchanged lines hidden (view full) ---

4042 if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0)
4043 return (0);
4044
4045 phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object), DRM_I915_GEM,
4046 M_WAITOK | M_ZERO);
4047
4048 phys_obj->id = id;
4049
3597 if (seqno >= ring->sync_seqno[i])
3598 ring->sync_seqno[i] = 0;
3599
3600 while (!list_empty(&ring->request_list)) {
3601 struct drm_i915_gem_request *request;
3602
3603 request = list_first_entry(&ring->request_list,
3604 struct drm_i915_gem_request,

--- 446 unchanged lines hidden (view full) ---

4051 if (dev_priv->mm.phys_objs[id - 1] != NULL || size == 0)
4052 return (0);
4053
4054 phys_obj = malloc(sizeof(struct drm_i915_gem_phys_object), DRM_I915_GEM,
4055 M_WAITOK | M_ZERO);
4056
4057 phys_obj->id = id;
4058
4050 phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
4059 phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
4051 if (phys_obj->handle == NULL) {
4052 ret = -ENOMEM;
4053 goto free_obj;
4054 }
4055 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4056 size / PAGE_SIZE, PAT_WRITE_COMBINING);
4057
4058 dev_priv->mm.phys_objs[id - 1] = phys_obj;

--- 132 unchanged lines hidden (view full) ---

4191 vm_page_reference(m);
4192 vm_page_lock(m);
4193 vm_page_unwire(m, PQ_INACTIVE);
4194 vm_page_unlock(m);
4195 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4196 }
4197 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4198
4060 if (phys_obj->handle == NULL) {
4061 ret = -ENOMEM;
4062 goto free_obj;
4063 }
4064 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4065 size / PAGE_SIZE, PAT_WRITE_COMBINING);
4066
4067 dev_priv->mm.phys_objs[id - 1] = phys_obj;

--- 132 unchanged lines hidden (view full) ---

4200 vm_page_reference(m);
4201 vm_page_lock(m);
4202 vm_page_unwire(m, PQ_INACTIVE);
4203 vm_page_unlock(m);
4204 atomic_add_long(&i915_gem_wired_pages_cnt, -1);
4205 }
4206 VM_OBJECT_WUNLOCK(obj->base.vm_obj);
4207
4199 return (0);
4208 return (ret);
4200}
4201
4202static int
4203i915_gpu_is_active(struct drm_device *dev)
4204{
4205 drm_i915_private_t *dev_priv;
4206
4207 dev_priv = dev->dev_private;

--- 65 unchanged lines hidden ---
4209}
4210
4211static int
4212i915_gpu_is_active(struct drm_device *dev)
4213{
4214 drm_i915_private_t *dev_priv;
4215
4216 dev_priv = dev->dev_private;

--- 65 unchanged lines hidden ---