1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo.h>
31
32static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
33					      struct ttm_validate_buffer *entry)
34{
35	list_for_each_entry_continue_reverse(entry, list, head) {
36		struct ttm_buffer_object *bo = entry->bo;
37
38		dma_resv_unlock(bo->base.resv);
39	}
40}
41
42void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
43				struct list_head *list)
44{
45	struct ttm_validate_buffer *entry;
46
47	if (list_empty(list))
48		return;
49
50	list_for_each_entry(entry, list, head) {
51		struct ttm_buffer_object *bo = entry->bo;
52
53		ttm_bo_move_to_lru_tail_unlocked(bo);
54		dma_resv_unlock(bo->base.resv);
55	}
56
57	if (ticket)
58		ww_acquire_fini(ticket);
59}
60EXPORT_SYMBOL(ttm_eu_backoff_reservation);
61
62/*
63 * Reserve buffers for validation.
64 *
65 * If a buffer in the list is marked for CPU access, we back off and
66 * wait for that buffer to become free for GPU access.
67 *
68 * If a buffer is reserved for another validation, the validator with
69 * the highest validation sequence backs off and waits for that buffer
70 * to become unreserved. This prevents deadlocks when validating multiple
71 * buffers in different orders.
72 */
73
74int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
75			   struct list_head *list, bool intr,
76			   struct list_head *dups)
77{
78	struct ttm_validate_buffer *entry;
79	int ret;
80
81	if (list_empty(list))
82		return 0;
83
84	if (ticket)
85		ww_acquire_init(ticket, &reservation_ww_class);
86
87	list_for_each_entry(entry, list, head) {
88		struct ttm_buffer_object *bo = entry->bo;
89		unsigned int num_fences;
90
91		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
92		if (ret == -EALREADY && dups) {
93			struct ttm_validate_buffer *safe = entry;
94			entry = list_prev_entry(entry, head);
95			list_del(&safe->head);
96			list_add(&safe->head, dups);
97			continue;
98		}
99
100		num_fences = max(entry->num_shared, 1u);
101		if (!ret) {
102			ret = dma_resv_reserve_fences(bo->base.resv,
103						      num_fences);
104			if (!ret)
105				continue;
106		}
107
108		/* uh oh, we lost out, drop every reservation and try
109		 * to only reserve this buffer, then start over if
110		 * this succeeds.
111		 */
112		ttm_eu_backoff_reservation_reverse(list, entry);
113
114		if (ret == -EDEADLK) {
115			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
116		}
117
118		if (!ret)
119			ret = dma_resv_reserve_fences(bo->base.resv,
120						      num_fences);
121
122		if (unlikely(ret != 0)) {
123			if (ticket) {
124				ww_acquire_done(ticket);
125				ww_acquire_fini(ticket);
126			}
127			return ret;
128		}
129
130		/* move this item to the front of the list,
131		 * forces correct iteration of the loop without keeping track
132		 */
133		list_del(&entry->head);
134		list_add(&entry->head, list);
135	}
136
137	return 0;
138}
139EXPORT_SYMBOL(ttm_eu_reserve_buffers);
140
141void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
142				 struct list_head *list,
143				 struct dma_fence *fence)
144{
145	struct ttm_validate_buffer *entry;
146
147	if (list_empty(list))
148		return;
149
150	list_for_each_entry(entry, list, head) {
151		struct ttm_buffer_object *bo = entry->bo;
152
153		dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
154				   DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
155		ttm_bo_move_to_lru_tail_unlocked(bo);
156		dma_resv_unlock(bo->base.resv);
157	}
158	if (ticket)
159		ww_acquire_fini(ticket);
160}
161EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
162