1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright �� 2016 Intel Corporation
5 */
6
7#include <drm/drm_cache.h>
8
9#include "display/intel_frontbuffer.h"
10
11#include "i915_config.h"
12#include "i915_drv.h"
13#include "i915_gem_clflush.h"
14#include "i915_sw_fence_work.h"
15#include "i915_trace.h"
16
17struct clflush {
18	struct dma_fence_work base;
19	struct drm_i915_gem_object *obj;
20};
21
22static void __do_clflush(struct drm_i915_gem_object *obj)
23{
24	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
25	drm_clflush_sg(obj->mm.pages);
26
27	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
28}
29
30static void clflush_work(struct dma_fence_work *base)
31{
32	struct clflush *clflush = container_of(base, typeof(*clflush), base);
33
34	__do_clflush(clflush->obj);
35}
36
37static void clflush_release(struct dma_fence_work *base)
38{
39	struct clflush *clflush = container_of(base, typeof(*clflush), base);
40
41	i915_gem_object_unpin_pages(clflush->obj);
42	i915_gem_object_put(clflush->obj);
43}
44
45static const struct dma_fence_work_ops clflush_ops = {
46	.name = "clflush",
47	.work = clflush_work,
48	.release = clflush_release,
49};
50
51static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
52{
53	struct clflush *clflush;
54
55	GEM_BUG_ON(!obj->cache_dirty);
56
57	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
58	if (!clflush)
59		return NULL;
60
61	if (__i915_gem_object_get_pages(obj) < 0) {
62		kfree(clflush);
63		return NULL;
64	}
65
66	dma_fence_work_init(&clflush->base, &clflush_ops);
67	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
68
69	return clflush;
70}
71
72bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
73			     unsigned int flags)
74{
75	struct drm_i915_private *i915 = to_i915(obj->base.dev);
76	struct clflush *clflush;
77
78	assert_object_held(obj);
79
80	if (IS_DGFX(i915)) {
81		WARN_ON_ONCE(obj->cache_dirty);
82		return false;
83	}
84
85	/*
86	 * Stolen memory is always coherent with the GPU as it is explicitly
87	 * marked as wc by the system, or the system is cache-coherent.
88	 * Similarly, we only access struct pages through the CPU cache, so
89	 * anything not backed by physical memory we consider to be always
90	 * coherent and not need clflushing.
91	 */
92	if (!i915_gem_object_has_struct_page(obj)) {
93		obj->cache_dirty = false;
94		return false;
95	}
96
97	/* If the GPU is snooping the contents of the CPU cache,
98	 * we do not need to manually clear the CPU cache lines.  However,
99	 * the caches are only snooped when the render cache is
100	 * flushed/invalidated.  As we always have to emit invalidations
101	 * and flushes when moving into and out of the RENDER domain, correct
102	 * snooping behaviour occurs naturally as the result of our domain
103	 * tracking.
104	 */
105	if (!(flags & I915_CLFLUSH_FORCE) &&
106	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
107		return false;
108
109	trace_i915_gem_object_clflush(obj);
110
111	clflush = NULL;
112	if (!(flags & I915_CLFLUSH_SYNC) &&
113	    dma_resv_reserve_fences(obj->base.resv, 1) == 0)
114		clflush = clflush_work_create(obj);
115	if (clflush) {
116		i915_sw_fence_await_reservation(&clflush->base.chain,
117						obj->base.resv, true,
118						i915_fence_timeout(i915),
119						I915_FENCE_GFP);
120		dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
121				   DMA_RESV_USAGE_KERNEL);
122		dma_fence_work_commit(&clflush->base);
123		/*
124		 * We must have successfully populated the pages(since we are
125		 * holding a pin on the pages as per the flush worker) to reach
126		 * this point, which must mean we have already done the required
127		 * flush-on-acquire, hence resetting cache_dirty here should be
128		 * safe.
129		 */
130		obj->cache_dirty = false;
131	} else if (obj->mm.pages) {
132		__do_clflush(obj);
133		obj->cache_dirty = false;
134	} else {
135		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
136	}
137
138	return true;
139}
140