1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright �� 2016 Intel Corporation
5 */
6
7#ifndef __I915_GEM_OBJECT_H__
8#define __I915_GEM_OBJECT_H__
9
10#include <drm/drm_gem.h>
11#include <drm/drm_file.h>
12#include <drm/drm_device.h>
13
14#include "display/intel_frontbuffer.h"
15#include "intel_memory_region.h"
16#include "i915_gem_object_types.h"
17#include "i915_gem_gtt.h"
18#include "i915_gem_ww.h"
19#include "i915_vma_types.h"
20
21enum intel_region_id;
22
23#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
24
25static inline bool i915_gem_object_size_2big(u64 size)
26{
27	struct drm_i915_gem_object *obj;
28
29	if (overflows_type(size, obj->base.size))
30		return true;
31
32	return false;
33}
34
35unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
36				    enum i915_cache_level level);
37bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
38				     enum i915_cache_level lvl);
39void i915_gem_init__objects(struct drm_i915_private *i915);
40
41void i915_objects_module_exit(void);
42int i915_objects_module_init(void);
43
44struct drm_i915_gem_object *i915_gem_object_alloc(void);
45void i915_gem_object_free(struct drm_i915_gem_object *obj);
46
47void i915_gem_object_init(struct drm_i915_gem_object *obj,
48			  const struct drm_i915_gem_object_ops *ops,
49			  struct lock_class_key *key,
50			  unsigned alloc_flags);
51
52void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
53
54struct drm_i915_gem_object *
55i915_gem_object_create_shmem(struct drm_i915_private *i915,
56			     resource_size_t size);
57struct drm_i915_gem_object *
58i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
59				       const void *data, resource_size_t size);
60struct drm_i915_gem_object *
61__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
62			      struct intel_memory_region **placements,
63			      unsigned int n_placements);
64
65extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
66
67void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
68				     struct sg_table *pages,
69				     bool needs_clflush);
70
71int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
72				const struct drm_i915_gem_pwrite *args);
73int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
74			       const struct drm_i915_gem_pread *args);
75
76int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
77void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
78				     struct sg_table *pages);
79void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
80				    struct sg_table *pages);
81
82void i915_gem_flush_free_objects(struct drm_i915_private *i915);
83
84struct sg_table *
85__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
86
87/**
88 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
89 * @file: DRM file private date
90 * @handle: userspace handle
91 *
92 * Returns:
93 *
94 * A pointer to the object named by the handle if such exists on @filp, NULL
95 * otherwise. This object is only valid whilst under the RCU read lock, and
96 * note carefully the object may be in the process of being destroyed.
97 */
98static inline struct drm_i915_gem_object *
99i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
100{
101#ifdef CONFIG_LOCKDEP
102	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
103#endif
104	return idr_find(&file->object_idr, handle);
105}
106
107static inline struct drm_i915_gem_object *
108i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
109{
110	if (obj && !kref_get_unless_zero(&obj->base.refcount))
111		obj = NULL;
112
113	return obj;
114}
115
116static inline struct drm_i915_gem_object *
117i915_gem_object_lookup(struct drm_file *file, u32 handle)
118{
119	struct drm_i915_gem_object *obj;
120
121	rcu_read_lock();
122	obj = i915_gem_object_lookup_rcu(file, handle);
123	obj = i915_gem_object_get_rcu(obj);
124	rcu_read_unlock();
125
126	return obj;
127}
128
129__deprecated
130struct drm_gem_object *
131drm_gem_object_lookup(struct drm_file *file, u32 handle);
132
133__attribute__((nonnull))
134static inline struct drm_i915_gem_object *
135i915_gem_object_get(struct drm_i915_gem_object *obj)
136{
137	drm_gem_object_get(&obj->base);
138	return obj;
139}
140
141__attribute__((nonnull))
142static inline void
143i915_gem_object_put(struct drm_i915_gem_object *obj)
144{
145	__drm_gem_object_put(&obj->base);
146}
147
148#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
149
150/*
151 * If more than one potential simultaneous locker, assert held.
152 */
153static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
154{
155	/*
156	 * Note mm list lookup is protected by
157	 * kref_get_unless_zero().
158	 */
159	if (IS_ENABLED(CONFIG_LOCKDEP) &&
160	    kref_read(&obj->base.refcount) > 0)
161		assert_object_held(obj);
162}
163
164static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
165					 struct i915_gem_ww_ctx *ww,
166					 bool intr)
167{
168	int ret;
169
170	if (intr)
171		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
172	else
173		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
174
175	if (!ret && ww) {
176		i915_gem_object_get(obj);
177		list_add_tail(&obj->obj_link, &ww->obj_list);
178	}
179	if (ret == -EALREADY)
180		ret = 0;
181
182	if (ret == -EDEADLK) {
183		i915_gem_object_get(obj);
184		ww->contended = obj;
185	}
186
187	return ret;
188}
189
190static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
191				       struct i915_gem_ww_ctx *ww)
192{
193	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
194}
195
196static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
197						     struct i915_gem_ww_ctx *ww)
198{
199	WARN_ON(ww && !ww->intr);
200	return __i915_gem_object_lock(obj, ww, true);
201}
202
203static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
204					   struct i915_gem_ww_ctx *ww)
205{
206	if (!ww)
207		return dma_resv_trylock(obj->base.resv);
208	else
209		return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
210}
211
212static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
213{
214	if (obj->ops->adjust_lru)
215		obj->ops->adjust_lru(obj);
216
217	dma_resv_unlock(obj->base.resv);
218}
219
220static inline void
221i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
222{
223	obj->flags |= I915_BO_READONLY;
224}
225
226static inline bool
227i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
228{
229	return obj->flags & I915_BO_READONLY;
230}
231
232static inline bool
233i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
234{
235	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
236}
237
238static inline bool
239i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
240{
241	return obj->flags & I915_BO_ALLOC_VOLATILE;
242}
243
244static inline void
245i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
246{
247	obj->flags |= I915_BO_ALLOC_VOLATILE;
248}
249
250static inline bool
251i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
252{
253	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
254}
255
256static inline void
257i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
258{
259	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
260}
261
262static inline void
263i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
264{
265	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
266}
267
268static inline bool
269i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
270{
271	return obj->flags & I915_BO_PROTECTED;
272}
273
274static inline bool
275i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
276			 unsigned long flags)
277{
278	return obj->ops->flags & flags;
279}
280
281bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
282
283bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
284
285static inline bool
286i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
287{
288	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
289}
290
291static inline bool
292i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
293{
294	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
295}
296
297static inline bool
298i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
299{
300	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
301}
302
303static inline bool
304i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
305{
306	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
307}
308
309static inline bool
310i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
311{
312	return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
313}
314
315static inline unsigned int
316i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
317{
318	return obj->tiling_and_stride & TILING_MASK;
319}
320
321static inline bool
322i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
323{
324	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
325}
326
327static inline unsigned int
328i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
329{
330	return obj->tiling_and_stride & STRIDE_MASK;
331}
332
333static inline unsigned int
334i915_gem_tile_height(unsigned int tiling)
335{
336	GEM_BUG_ON(!tiling);
337	return tiling == I915_TILING_Y ? 32 : 8;
338}
339
340static inline unsigned int
341i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
342{
343	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
344}
345
346static inline unsigned int
347i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
348{
349	return (i915_gem_object_get_stride(obj) *
350		i915_gem_object_get_tile_height(obj));
351}
352
353int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
354			       unsigned int tiling, unsigned int stride);
355
356/**
357 * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
358 * pointer and the target page position using pgoff_t n input argument and
359 * i915_gem_object_page_iter
360 * @obj: i915 GEM buffer object
361 * @iter: i915 GEM buffer object page iterator
362 * @n: page offset
363 * @offset: searched physical offset,
364 *          it will be used for returning physical page offset value
365 *
366 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
367 *          Takes and releases the RCU lock to search the radix_tree of
368 *          i915_gem_object_page_iter.
369 *
370 * Returns:
371 * The target scatterlist pointer and the target page position.
372 *
373 * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
374 */
375struct scatterlist *
376__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
377				   struct i915_gem_object_page_iter *iter,
378				   pgoff_t  n,
379				   unsigned int *offset);
380
381/**
382 * i915_gem_object_page_iter_get_sg - wrapper macro for
383 * __i915_gem_object_page_iter_get_sg()
384 * @obj: i915 GEM buffer object
385 * @it: i915 GEM buffer object page iterator
386 * @n: page offset
387 * @offset: searched physical offset,
388 *          it will be used for returning physical page offset value
389 *
390 * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
391 *          Takes and releases the RCU lock to search the radix_tree of
392 *          i915_gem_object_page_iter.
393 *
394 * Returns:
395 * The target scatterlist pointer and the target page position.
396 *
397 * In order to avoid the truncation of the input parameter, it checks the page
398 * offset n's type from the input parameter before calling
399 * __i915_gem_object_page_iter_get_sg().
400 */
401#define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({	\
402	static_assert(castable_to_type(n, pgoff_t));		\
403	__i915_gem_object_page_iter_get_sg(obj, it, n, offset);	\
404})
405
406/**
407 * __i915_gem_object_get_sg - helper to find the target scatterlist
408 * pointer and the target page position using pgoff_t n input argument and
409 * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
410 * @obj: i915 GEM buffer object
411 * @n: page offset
412 * @offset: searched physical offset,
413 *          it will be used for returning physical page offset value
414 *
415 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
416 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
417 *
418 * Returns:
419 * The target scatterlist pointer and the target page position.
420 *
421 * Recommended to use wrapper macro: i915_gem_object_get_sg()
422 * See also __i915_gem_object_page_iter_get_sg()
423 */
424static inline struct scatterlist *
425__i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
426			 unsigned int *offset)
427{
428	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
429}
430
431/**
432 * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
433 * @obj: i915 GEM buffer object
434 * @n: page offset
435 * @offset: searched physical offset,
436 *          it will be used for returning physical page offset value
437 *
438 * Returns:
439 * The target scatterlist pointer and the target page position.
440 *
441 * In order to avoid the truncation of the input parameter, it checks the page
442 * offset n's type from the input parameter before calling
443 * __i915_gem_object_get_sg().
444 * See also __i915_gem_object_page_iter_get_sg()
445 */
446#define i915_gem_object_get_sg(obj, n, offset) ({	\
447	static_assert(castable_to_type(n, pgoff_t));	\
448	__i915_gem_object_get_sg(obj, n, offset);	\
449})
450
451/**
452 * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
453 * pointer and the target page position using pgoff_t n input argument and
454 * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
455 * @obj: i915 GEM buffer object
456 * @n: page offset
457 * @offset: searched physical offset,
458 *          it will be used for returning physical page offset value
459 *
460 * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
461 * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
462 *
463 * Returns:
464 * The target scatterlist pointer and the target page position.
465 *
466 * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
467 * See also __i915_gem_object_page_iter_get_sg()
468 */
469static inline struct scatterlist *
470__i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
471			     unsigned int *offset)
472{
473	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
474}
475
476/**
477 * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
478 * @obj: i915 GEM buffer object
479 * @n: page offset
480 * @offset: searched physical offset,
481 *          it will be used for returning physical page offset value
482 *
483 * Returns:
484 * The target scatterlist pointer and the target page position.
485 *
486 * In order to avoid the truncation of the input parameter, it checks the page
487 * offset n's type from the input parameter before calling
488 * __i915_gem_object_get_sg_dma().
489 * See also __i915_gem_object_page_iter_get_sg()
490 */
491#define i915_gem_object_get_sg_dma(obj, n, offset) ({	\
492	static_assert(castable_to_type(n, pgoff_t));	\
493	__i915_gem_object_get_sg_dma(obj, n, offset);	\
494})
495
496/**
497 * __i915_gem_object_get_page - helper to find the target page with a page offset
498 * @obj: i915 GEM buffer object
499 * @n: page offset
500 *
501 * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
502 * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
503 * internally.
504 *
505 * Returns:
506 * The target page pointer.
507 *
508 * Recommended to use wrapper macro: i915_gem_object_get_page()
509 * See also __i915_gem_object_page_iter_get_sg()
510 */
511struct vm_page *
512__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
513
514/**
515 * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
516 * @obj: i915 GEM buffer object
517 * @n: page offset
518 *
519 * Returns:
520 * The target page pointer.
521 *
522 * In order to avoid the truncation of the input parameter, it checks the page
523 * offset n's type from the input parameter before calling
524 * __i915_gem_object_get_page().
525 * See also __i915_gem_object_page_iter_get_sg()
526 */
527#define i915_gem_object_get_page(obj, n) ({		\
528	static_assert(castable_to_type(n, pgoff_t));	\
529	__i915_gem_object_get_page(obj, n);		\
530})
531
532/**
533 * __i915_gem_object_get_dirty_page - helper to find the target page with a page
534 * offset
535 * @obj: i915 GEM buffer object
536 * @n: page offset
537 *
538 * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
539 *
540 * Returns:
541 * The target page pointer.
542 *
543 * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
544 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
545 */
546struct vm_page *
547__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
548
549/**
550 * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
551 * @obj: i915 GEM buffer object
552 * @n: page offset
553 *
554 * Returns:
555 * The target page pointer.
556 *
557 * In order to avoid the truncation of the input parameter, it checks the page
558 * offset n's type from the input parameter before calling
559 * __i915_gem_object_get_dirty_page().
560 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
561 */
562#define i915_gem_object_get_dirty_page(obj, n) ({	\
563	static_assert(castable_to_type(n, pgoff_t));	\
564	__i915_gem_object_get_dirty_page(obj, n);	\
565})
566
567/**
568 * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
569 * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
570 * @obj: i915 GEM buffer object
571 * @n: page offset
572 * @len: DMA mapped scatterlist's DMA bus addresses length to return
573 *
574 * Returns:
575 * Bus addresses of targeted DMA mapped scatterlist
576 *
577 * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
578 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
579 */
580dma_addr_t
581__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
582				      unsigned int *len);
583
584/**
585 * i915_gem_object_get_dma_address_len - wrapper macro for
586 * __i915_gem_object_get_dma_address_len
587 * @obj: i915 GEM buffer object
588 * @n: page offset
589 * @len: DMA mapped scatterlist's DMA bus addresses length to return
590 *
591 * Returns:
592 * Bus addresses of targeted DMA mapped scatterlist
593 *
594 * In order to avoid the truncation of the input parameter, it checks the page
595 * offset n's type from the input parameter before calling
596 * __i915_gem_object_get_dma_address_len().
597 * See also __i915_gem_object_page_iter_get_sg() and
598 * __i915_gem_object_get_dma_address_len()
599 */
600#define i915_gem_object_get_dma_address_len(obj, n, len) ({	\
601	static_assert(castable_to_type(n, pgoff_t));		\
602	__i915_gem_object_get_dma_address_len(obj, n, len);	\
603})
604
605/**
606 * __i915_gem_object_get_dma_address - helper to get bus addresses of
607 * targeted DMA mapped scatterlist from i915 GEM buffer object
608 * @obj: i915 GEM buffer object
609 * @n: page offset
610 *
611 * Returns:
612 * Bus addresses of targeted DMA mapped scatterlis
613 *
614 * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
615 * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
616 */
617dma_addr_t
618__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
619
620/**
621 * i915_gem_object_get_dma_address - wrapper macro for
622 * __i915_gem_object_get_dma_address
623 * @obj: i915 GEM buffer object
624 * @n: page offset
625 *
626 * Returns:
627 * Bus addresses of targeted DMA mapped scatterlist
628 *
629 * In order to avoid the truncation of the input parameter, it checks the page
630 * offset n's type from the input parameter before calling
631 * __i915_gem_object_get_dma_address().
632 * See also __i915_gem_object_page_iter_get_sg() and
633 * __i915_gem_object_get_dma_address()
634 */
635#define i915_gem_object_get_dma_address(obj, n) ({	\
636	static_assert(castable_to_type(n, pgoff_t));	\
637	__i915_gem_object_get_dma_address(obj, n);	\
638})
639
640void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
641				 struct sg_table *pages);
642
643int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
644int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
645
646static inline int __must_check
647i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
648{
649	assert_object_held(obj);
650
651	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
652		return 0;
653
654	return __i915_gem_object_get_pages(obj);
655}
656
657int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
658
659static inline bool
660i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
661{
662	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
663}
664
665static inline void
666__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
667{
668	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
669
670	atomic_inc(&obj->mm.pages_pin_count);
671}
672
673static inline bool
674i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
675{
676	return atomic_read(&obj->mm.pages_pin_count);
677}
678
679static inline void
680__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
681{
682	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
683	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
684
685	atomic_dec(&obj->mm.pages_pin_count);
686}
687
688static inline void
689i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
690{
691	__i915_gem_object_unpin_pages(obj);
692}
693
694int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
695int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
696
697/**
698 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
699 * @obj: the object to map into kernel address space
700 * @type: the type of mapping, used to select pgprot_t
701 *
702 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
703 * pages and then returns a contiguous mapping of the backing storage into
704 * the kernel address space. Based on the @type of mapping, the PTE will be
705 * set to either WriteBack or WriteCombine (via pgprot_t).
706 *
707 * The caller is responsible for calling i915_gem_object_unpin_map() when the
708 * mapping is no longer required.
709 *
710 * Returns the pointer through which to access the mapped object, or an
711 * ERR_PTR() on error.
712 */
713void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
714					   enum i915_map_type type);
715
716void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
717						    enum i915_map_type type);
718
719void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
720				 unsigned long offset,
721				 unsigned long size);
722static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
723{
724	__i915_gem_object_flush_map(obj, 0, obj->base.size);
725}
726
727/**
728 * i915_gem_object_unpin_map - releases an earlier mapping
729 * @obj: the object to unmap
730 *
731 * After pinning the object and mapping its pages, once you are finished
732 * with your access, call i915_gem_object_unpin_map() to release the pin
733 * upon the mapping. Once the pin count reaches zero, that mapping may be
734 * removed.
735 */
736static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
737{
738	i915_gem_object_unpin_pages(obj);
739}
740
741void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
742
743int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
744				 unsigned int *needs_clflush);
745int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
746				  unsigned int *needs_clflush);
747#define CLFLUSH_BEFORE	BIT(0)
748#define CLFLUSH_AFTER	BIT(1)
749#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
750
751static inline void
752i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
753{
754	i915_gem_object_unpin_pages(obj);
755}
756
757int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
758				     struct dma_fence **fence);
759int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
760				      bool intr);
761bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
762
763void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
764					 unsigned int cache_level);
765void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
766				   unsigned int pat_index);
767bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
768void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
769void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
770bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
771
772int __must_check
773i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
774int __must_check
775i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
776int __must_check
777i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
778struct i915_vma * __must_check
779i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
780				     struct i915_gem_ww_ctx *ww,
781				     u32 alignment,
782				     const struct i915_gtt_view *view,
783				     unsigned int flags);
784
785void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
786void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
787void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
788void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
789void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
790
791static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
792{
793	obj->read_domains = I915_GEM_DOMAIN_CPU;
794	obj->write_domain = I915_GEM_DOMAIN_CPU;
795	if (i915_gem_cpu_write_needs_clflush(obj))
796		obj->cache_dirty = true;
797}
798
799void i915_gem_fence_wait_priority(struct dma_fence *fence,
800				  const struct i915_sched_attr *attr);
801
802int i915_gem_object_wait(struct drm_i915_gem_object *obj,
803			 unsigned int flags,
804			 long timeout);
805int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
806				  unsigned int flags,
807				  const struct i915_sched_attr *attr);
808
809void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
810					 enum fb_op_origin origin);
811void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
812					      enum fb_op_origin origin);
813
814static inline void
815i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
816				  enum fb_op_origin origin)
817{
818	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
819		__i915_gem_object_flush_frontbuffer(obj, origin);
820}
821
822static inline void
823i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
824				       enum fb_op_origin origin)
825{
826	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
827		__i915_gem_object_invalidate_frontbuffer(obj, origin);
828}
829
830int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
831
832bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
833
834void __i915_gem_free_object_rcu(struct rcu_head *head);
835
836void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
837
838void __i915_gem_free_object(struct drm_i915_gem_object *obj);
839
840bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
841
842bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
843
844int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
845			    struct i915_gem_ww_ctx *ww,
846			    enum intel_region_id id);
847int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
848			      struct i915_gem_ww_ctx *ww,
849			      enum intel_region_id id,
850			      unsigned int flags);
851
852bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
853				 enum intel_region_id id);
854
855int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
856				   unsigned int flags);
857
858bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
859					enum intel_memory_type type);
860
861bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
862
863#ifdef __linux__
864int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
865			 size_t size, struct intel_memory_region *mr,
866			 struct address_space *mapping,
867			 unsigned int max_segment);
868void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
869			 bool dirty, bool backup);
870#else
871int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
872			 size_t size, struct intel_memory_region *mr,
873			 struct address_space *mapping,
874			 unsigned int max_segment,
875			 struct drm_i915_gem_object *obj);
876void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
877			 bool dirty, bool backup,
878			 struct drm_i915_gem_object *obj);
879#endif
880void __shmem_writeback(size_t size, struct address_space *mapping);
881
882#ifdef CONFIG_MMU_NOTIFIER
883static inline bool
884i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
885{
886	return obj->userptr.notifier.mm;
887}
888
889int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
890int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
891int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
892#else
893static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
894
895static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
896static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
897static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
898
899#endif
900
901/**
902 * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
903 * @obj: The object whose frontbuffer to get.
904 *
905 * Get pointer to object's frontbuffer if such exists. Please note that RCU
906 * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
907 *
908 * Return: pointer to object's frontbuffer is such exists or NULL
909 */
910static inline struct intel_frontbuffer *
911i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
912{
913	struct intel_frontbuffer *front;
914
915	if (likely(!rcu_access_pointer(obj->frontbuffer)))
916		return NULL;
917
918	rcu_read_lock();
919	do {
920		front = rcu_dereference(obj->frontbuffer);
921		if (!front)
922			break;
923
924		if (unlikely(!kref_get_unless_zero(&front->ref)))
925			continue;
926
927		if (likely(front == rcu_access_pointer(obj->frontbuffer)))
928			break;
929
930		intel_frontbuffer_put(front);
931	} while (1);
932	rcu_read_unlock();
933
934	return front;
935}
936
937/**
938 * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
939 * @obj: The object whose frontbuffer to set.
940 * @front: The frontbuffer to set
941 *
942 * Set object's frontbuffer pointer. If frontbuffer is already set for the
943 * object keep it and return it's pointer to the caller. Please note that RCU
944 * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
945 * function is protected by i915->display.fb_tracking.lock
946 *
947 * Return: pointer to frontbuffer which was set.
948 */
949static inline struct intel_frontbuffer *
950i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
951				struct intel_frontbuffer *front)
952{
953	struct intel_frontbuffer *cur = front;
954
955	if (!front) {
956		RCU_INIT_POINTER(obj->frontbuffer, NULL);
957	} else if (rcu_access_pointer(obj->frontbuffer)) {
958		cur = rcu_dereference_protected(obj->frontbuffer, true);
959		kref_get(&cur->ref);
960	} else {
961		drm_gem_object_get(intel_bo_to_drm_bo(obj));
962		rcu_assign_pointer(obj->frontbuffer, front);
963	}
964
965	return cur;
966}
967
968#endif
969