1/* 2 * Copyright �� 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25#ifndef __I915_VMA_H__ 26#define __I915_VMA_H__ 27 28#include <linux/io-mapping.h> 29#include <linux/rbtree.h> 30 31#include <drm/drm_mm.h> 32 33#include "gt/intel_ggtt_fencing.h" 34#include "gem/i915_gem_object.h" 35 36#include "i915_gem_gtt.h" 37 38#include "i915_active.h" 39#include "i915_request.h" 40#include "i915_vma_resource.h" 41#include "i915_vma_types.h" 42 43struct i915_vma * 44i915_vma_instance(struct drm_i915_gem_object *obj, 45 struct i915_address_space *vm, 46 const struct i915_gtt_view *view); 47 48void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags); 49#define I915_VMA_RELEASE_MAP BIT(0) 50 51static inline bool i915_vma_is_active(const struct i915_vma *vma) 52{ 53 return !i915_active_is_idle(&vma->active); 54} 55 56/* do not reserve memory to prevent deadlocks */ 57#define __EXEC_OBJECT_NO_RESERVE BIT(31) 58#define __EXEC_OBJECT_NO_REQUEST_AWAIT BIT(30) 59 60int __must_check _i915_vma_move_to_active(struct i915_vma *vma, 61 struct i915_request *rq, 62 struct dma_fence *fence, 63 unsigned int flags); 64static inline int __must_check 65i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, 66 unsigned int flags) 67{ 68 return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); 69} 70 71#ifdef __linux__ 72#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter) 73#else 74#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags) 75#endif 76 77static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) 78{ 79 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 80} 81 82static inline bool i915_vma_is_dpt(const struct i915_vma *vma) 83{ 84 return i915_is_dpt(vma->vm); 85} 86 87static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) 88{ 89 return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); 90} 91 92static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) 93{ 94 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 95 set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); 96} 97 98static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma) 99{ 100 return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT, 101 __i915_vma_flags(vma)); 102} 103 104void i915_vma_flush_writes(struct i915_vma *vma); 105 106static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) 107{ 108 return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 109} 110 111static inline bool i915_vma_set_userfault(struct i915_vma *vma) 112{ 113 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 114 return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); 115} 116 117static inline void i915_vma_unset_userfault(struct i915_vma *vma) 118{ 119 return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); 120} 121 122static inline bool i915_vma_has_userfault(const struct i915_vma *vma) 123{ 124 return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); 125} 126 127static inline bool i915_vma_is_closed(const struct i915_vma *vma) 128{ 129 return !list_empty(&vma->closed_link); 130} 131 132/* Internal use only. */ 133static inline u64 __i915_vma_size(const struct i915_vma *vma) 134{ 135 return vma->node.size - 2 * vma->guard; 136} 137 138/** 139 * i915_vma_size - Obtain the va range size of the vma 140 * @vma: The vma 141 * 142 * GPU virtual address space may be allocated with padding. This 143 * function returns the effective virtual address range size 144 * with padding subtracted. 145 * 146 * Return: The effective virtual address range size. 147 */ 148static inline u64 i915_vma_size(const struct i915_vma *vma) 149{ 150 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 151 return __i915_vma_size(vma); 152} 153 154/* Internal use only. */ 155static inline u64 __i915_vma_offset(const struct i915_vma *vma) 156{ 157 /* The actual start of the vma->pages is after the guard pages. */ 158 return vma->node.start + vma->guard; 159} 160 161/** 162 * i915_vma_offset - Obtain the va offset of the vma 163 * @vma: The vma 164 * 165 * GPU virtual address space may be allocated with padding. This 166 * function returns the effective virtual address offset the gpu 167 * should use to access the bound data. 168 * 169 * Return: The effective virtual address offset. 170 */ 171static inline u64 i915_vma_offset(const struct i915_vma *vma) 172{ 173 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 174 return __i915_vma_offset(vma); 175} 176 177static inline u32 i915_ggtt_offset(const struct i915_vma *vma) 178{ 179 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 180 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 181 GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma))); 182 GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) + 183 i915_vma_size(vma) - 1)); 184 return lower_32_bits(i915_vma_offset(vma)); 185} 186 187static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma) 188{ 189 return i915_vm_to_ggtt(vma->vm)->pin_bias; 190} 191 192static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) 193{ 194 i915_gem_object_get(vma->obj); 195 return vma; 196} 197 198static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma) 199{ 200 if (likely(kref_get_unless_zero(&vma->obj->base.refcount))) 201 return vma; 202 203 return NULL; 204} 205 206static inline void i915_vma_put(struct i915_vma *vma) 207{ 208 i915_gem_object_put(vma->obj); 209} 210 211static inline long 212i915_vma_compare(struct i915_vma *vma, 213 struct i915_address_space *vm, 214 const struct i915_gtt_view *view) 215{ 216 ptrdiff_t cmp; 217 218 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); 219 220 cmp = ptrdiff(vma->vm, vm); 221 if (cmp) 222 return cmp; 223 224 BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0); 225 cmp = vma->gtt_view.type; 226 if (!view) 227 return cmp; 228 229 cmp -= view->type; 230 if (cmp) 231 return cmp; 232 233 assert_i915_gem_gtt_types(); 234 235 /* gtt_view.type also encodes its size so that we both distinguish 236 * different views using it as a "type" and also use a compact (no 237 * accessing of uninitialised padding bytes) memcmp without storing 238 * an extra parameter or adding more code. 239 * 240 * To ensure that the memcmp is valid for all branches of the union, 241 * even though the code looks like it is just comparing one branch, 242 * we assert above that all branches have the same address, and that 243 * each branch has a unique type/size. 244 */ 245 BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL); 246 BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED); 247 BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED); 248 BUILD_BUG_ON(offsetof(typeof(*view), rotated) != 249 offsetof(typeof(*view), partial)); 250 BUILD_BUG_ON(offsetof(typeof(*view), rotated) != 251 offsetof(typeof(*view), remapped)); 252 return memcmp(&vma->gtt_view.partial, &view->partial, view->type); 253} 254 255struct i915_vma_work *i915_vma_work(void); 256int i915_vma_bind(struct i915_vma *vma, 257 unsigned int pat_index, 258 u32 flags, 259 struct i915_vma_work *work, 260 struct i915_vma_resource *vma_res); 261 262bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color); 263bool i915_vma_misplaced(const struct i915_vma *vma, 264 u64 size, u64 alignment, u64 flags); 265void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 266void i915_vma_revoke_mmap(struct i915_vma *vma); 267void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb); 268struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async); 269int __i915_vma_unbind(struct i915_vma *vma); 270int __must_check i915_vma_unbind(struct i915_vma *vma); 271int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm); 272int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma); 273void i915_vma_unlink_ctx(struct i915_vma *vma); 274void i915_vma_close(struct i915_vma *vma); 275void i915_vma_reopen(struct i915_vma *vma); 276 277void i915_vma_destroy_locked(struct i915_vma *vma); 278void i915_vma_destroy(struct i915_vma *vma); 279 280#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) 281 282static inline void i915_vma_lock(struct i915_vma *vma) 283{ 284 dma_resv_lock(vma->obj->base.resv, NULL); 285} 286 287static inline void i915_vma_unlock(struct i915_vma *vma) 288{ 289 dma_resv_unlock(vma->obj->base.resv); 290} 291 292int __must_check 293i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 294 u64 size, u64 alignment, u64 flags); 295 296static inline int __must_check 297i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 298{ 299 struct i915_gem_ww_ctx ww; 300 int err; 301 302 i915_gem_ww_ctx_init(&ww, true); 303retry: 304 err = i915_gem_object_lock(vma->obj, &ww); 305 if (!err) 306 err = i915_vma_pin_ww(vma, &ww, size, alignment, flags); 307 if (err == -EDEADLK) { 308 err = i915_gem_ww_ctx_backoff(&ww); 309 if (!err) 310 goto retry; 311 } 312 i915_gem_ww_ctx_fini(&ww); 313 314 return err; 315} 316 317int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 318 u32 align, unsigned int flags); 319 320static inline int i915_vma_pin_count(const struct i915_vma *vma) 321{ 322 return atomic_read(&vma->flags) & I915_VMA_PIN_MASK; 323} 324 325static inline bool i915_vma_is_pinned(const struct i915_vma *vma) 326{ 327 return i915_vma_pin_count(vma); 328} 329 330static inline void __i915_vma_pin(struct i915_vma *vma) 331{ 332 atomic_inc(&vma->flags); 333 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 334} 335 336static inline void __i915_vma_unpin(struct i915_vma *vma) 337{ 338 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 339 atomic_dec(&vma->flags); 340} 341 342static inline void i915_vma_unpin(struct i915_vma *vma) 343{ 344 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 345 __i915_vma_unpin(vma); 346} 347 348static inline bool i915_vma_is_bound(const struct i915_vma *vma, 349 unsigned int where) 350{ 351 return atomic_read(&vma->flags) & where; 352} 353 354static inline bool i915_node_color_differs(const struct drm_mm_node *node, 355 unsigned long color) 356{ 357 return drm_mm_node_allocated(node) && node->color != color; 358} 359 360/** 361 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture 362 * @vma: VMA to iomap 363 * 364 * The passed in VMA has to be pinned in the global GTT mappable region. 365 * An extra pinning of the VMA is acquired for the return iomapping, 366 * the caller must call i915_vma_unpin_iomap to relinquish the pinning 367 * after the iomapping is no longer required. 368 * 369 * Returns a valid iomapped pointer or ERR_PTR. 370 */ 371void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); 372 373/** 374 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap 375 * @vma: VMA to unpin 376 * 377 * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). 378 * 379 * This function is only valid to be called on a VMA previously 380 * iomapped by the caller with i915_vma_pin_iomap(). 381 */ 382void i915_vma_unpin_iomap(struct i915_vma *vma); 383 384/** 385 * i915_vma_pin_fence - pin fencing state 386 * @vma: vma to pin fencing for 387 * 388 * This pins the fencing state (whether tiled or untiled) to make sure the 389 * vma (and its object) is ready to be used as a scanout target. Fencing 390 * status must be synchronize first by calling i915_vma_get_fence(): 391 * 392 * The resulting fence pin reference must be released again with 393 * i915_vma_unpin_fence(). 394 * 395 * Returns: 396 * 397 * True if the vma has a fence, false otherwise. 398 */ 399int __must_check i915_vma_pin_fence(struct i915_vma *vma); 400void i915_vma_revoke_fence(struct i915_vma *vma); 401 402int __i915_vma_pin_fence(struct i915_vma *vma); 403 404static inline void __i915_vma_unpin_fence(struct i915_vma *vma) 405{ 406 GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0); 407 atomic_dec(&vma->fence->pin_count); 408} 409 410/** 411 * i915_vma_unpin_fence - unpin fencing state 412 * @vma: vma to unpin fencing for 413 * 414 * This releases the fence pin reference acquired through 415 * i915_vma_pin_fence. It will handle both objects with and without an 416 * attached fence correctly, callers do not need to distinguish this. 417 */ 418static inline void 419i915_vma_unpin_fence(struct i915_vma *vma) 420{ 421 if (vma->fence) 422 __i915_vma_unpin_fence(vma); 423} 424 425static inline int i915_vma_fence_id(const struct i915_vma *vma) 426{ 427 return vma->fence ? vma->fence->id : -1; 428} 429 430void i915_vma_parked(struct intel_gt *gt); 431 432static inline bool i915_vma_is_scanout(const struct i915_vma *vma) 433{ 434 return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); 435} 436 437static inline void i915_vma_mark_scanout(struct i915_vma *vma) 438{ 439 set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); 440} 441 442static inline void i915_vma_clear_scanout(struct i915_vma *vma) 443{ 444 clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); 445} 446 447void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj); 448 449#define for_each_until(cond) if (cond) break; else 450 451/** 452 * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object. 453 * @V: the #i915_vma iterator 454 * @OBJ: the #drm_i915_gem_object 455 * 456 * GGTT VMA are placed at the being of the object's vma_list, see 457 * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA, 458 * or the list is empty ofc. 459 */ 460#define for_each_ggtt_vma(V, OBJ) \ 461 list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ 462 for_each_until(!i915_vma_is_ggtt(V)) 463 464struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); 465void i915_vma_make_shrinkable(struct i915_vma *vma); 466void i915_vma_make_purgeable(struct i915_vma *vma); 467 468int i915_vma_wait_for_bind(struct i915_vma *vma); 469 470static inline int i915_vma_sync(struct i915_vma *vma) 471{ 472 /* Wait for the asynchronous bindings and pending GPU reads */ 473 return i915_active_wait(&vma->active); 474} 475 476/** 477 * i915_vma_get_current_resource - Get the current resource of the vma 478 * @vma: The vma to get the current resource from. 479 * 480 * It's illegal to call this function if the vma is not bound. 481 * 482 * Return: A refcounted pointer to the current vma resource 483 * of the vma, assuming the vma is bound. 484 */ 485static inline struct i915_vma_resource * 486i915_vma_get_current_resource(struct i915_vma *vma) 487{ 488 return i915_vma_resource_get(vma->resource); 489} 490 491#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 492void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, 493 struct i915_vma *vma); 494#endif 495 496void i915_vma_module_exit(void); 497int i915_vma_module_init(void); 498 499I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma)); 500I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma)); 501 502#endif 503