1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30#ifndef _TTM_BO_DRIVER_H_ 31#define _TTM_BO_DRIVER_H_ 32 33#include "ttm/ttm_bo_api.h" 34#include "ttm/ttm_memory.h" 35#include "ttm/ttm_module.h" 36#include "drm_mm.h" 37#include "drm_global.h" 38#include "linux/workqueue.h" 39#include "linux/fs.h" 40#include "linux/spinlock.h" 41 42struct ttm_backend; 43 44struct ttm_backend_func { 45 /** 46 * struct ttm_backend_func member populate 47 * 48 * @backend: Pointer to a struct ttm_backend. 49 * @num_pages: Number of pages to populate. 50 * @pages: Array of pointers to ttm pages. 51 * @dummy_read_page: Page to be used instead of NULL pages in the 52 * array @pages. 53 * 54 * Populate the backend with ttm pages. Depending on the backend, 55 * it may or may not copy the @pages array. 56 */ 57 int (*populate) (struct ttm_backend *backend, 58 unsigned long num_pages, struct page **pages, 59 struct page *dummy_read_page); 60 /** 61 * struct ttm_backend_func member clear 62 * 63 * @backend: Pointer to a struct ttm_backend. 64 * 65 * This is an "unpopulate" function. Release all resources 66 * allocated with populate. 67 */ 68 void (*clear) (struct ttm_backend *backend); 69 70 /** 71 * struct ttm_backend_func member bind 72 * 73 * @backend: Pointer to a struct ttm_backend. 74 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 75 * memory type and location for binding. 76 * 77 * Bind the backend pages into the aperture in the location 78 * indicated by @bo_mem. This function should be able to handle 79 * differences between aperture- and system page sizes. 80 */ 81 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem); 82 83 /** 84 * struct ttm_backend_func member unbind 85 * 86 * @backend: Pointer to a struct ttm_backend. 87 * 88 * Unbind previously bound backend pages. This function should be 89 * able to handle differences between aperture- and system page sizes. 90 */ 91 int (*unbind) (struct ttm_backend *backend); 92 93 /** 94 * struct ttm_backend_func member destroy 95 * 96 * @backend: Pointer to a struct ttm_backend. 97 * 98 * Destroy the backend. 99 */ 100 void (*destroy) (struct ttm_backend *backend); 101}; 102 103/** 104 * struct ttm_backend 105 * 106 * @bdev: Pointer to a struct ttm_bo_device. 107 * @flags: For driver use. 108 * @func: Pointer to a struct ttm_backend_func that describes 109 * the backend methods. 110 * 111 */ 112 113struct ttm_backend { 114 struct ttm_bo_device *bdev; 115 uint32_t flags; 116 struct ttm_backend_func *func; 117}; 118 119#define TTM_PAGE_FLAG_USER (1 << 1) 120#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) 121#define TTM_PAGE_FLAG_WRITE (1 << 3) 122#define TTM_PAGE_FLAG_SWAPPED (1 << 4) 123#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) 124#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) 125#define TTM_PAGE_FLAG_DMA32 (1 << 7) 126 127enum ttm_caching_state { 128 tt_uncached, 129 tt_wc, 130 tt_cached 131}; 132 133/** 134 * struct ttm_tt 135 * 136 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 137 * pointer. 138 * @pages: Array of pages backing the data. 139 * @first_himem_page: Himem pages are put last in the page array, which 140 * enables us to run caching attribute changes on only the first part 141 * of the page array containing lomem pages. This is the index of the 142 * first himem page. 143 * @last_lomem_page: Index of the last lomem page in the page array. 144 * @num_pages: Number of pages in the page array. 145 * @bdev: Pointer to the current struct ttm_bo_device. 146 * @be: Pointer to the ttm backend. 147 * @tsk: The task for user ttm. 148 * @start: virtual address for user ttm. 149 * @swap_storage: Pointer to shmem struct file for swap storage. 150 * @caching_state: The current caching state of the pages. 151 * @state: The current binding state of the pages. 152 * 153 * This is a structure holding the pages, caching- and aperture binding 154 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 155 * memory. 156 */ 157 158struct ttm_tt { 159 struct page *dummy_read_page; 160 struct page **pages; 161 long first_himem_page; 162 long last_lomem_page; 163 uint32_t page_flags; 164 unsigned long num_pages; 165 struct ttm_bo_global *glob; 166 struct ttm_backend *be; 167 struct task_struct *tsk; 168 unsigned long start; 169 struct file *swap_storage; 170 enum ttm_caching_state caching_state; 171 enum { 172 tt_bound, 173 tt_unbound, 174 tt_unpopulated, 175 } state; 176}; 177 178#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 181 182 183struct ttm_mem_type_manager { 184 185 /* 186 * No protection. Constant from start. 187 */ 188 189 bool has_type; 190 bool use_type; 191 uint32_t flags; 192 unsigned long gpu_offset; 193 uint64_t size; 194 uint32_t available_caching; 195 uint32_t default_caching; 196 197 /* 198 * Protected by the bdev->lru_lock. 199 * TODO: Consider one lru_lock per ttm_mem_type_manager. 200 * Plays ill with list removal, though. 201 */ 202 203 struct drm_mm manager; 204 struct list_head lru; 205}; 206 207/** 208 * struct ttm_bo_driver 209 * 210 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 211 * @invalidate_caches: Callback to invalidate read caches when a buffer object 212 * has been evicted. 213 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 214 * structure. 215 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 216 * @move: Callback for a driver to hook in accelerated functions to 217 * move a buffer. 218 * If set to NULL, a potentially slow memcpy() move is used. 219 * @sync_obj_signaled: See ttm_fence_api.h 220 * @sync_obj_wait: See ttm_fence_api.h 221 * @sync_obj_flush: See ttm_fence_api.h 222 * @sync_obj_unref: See ttm_fence_api.h 223 * @sync_obj_ref: See ttm_fence_api.h 224 */ 225 226struct ttm_bo_driver { 227 /** 228 * struct ttm_bo_driver member create_ttm_backend_entry 229 * 230 * @bdev: The buffer object device. 231 * 232 * Create a driver specific struct ttm_backend. 233 */ 234 235 struct ttm_backend *(*create_ttm_backend_entry) 236 (struct ttm_bo_device *bdev); 237 238 /** 239 * struct ttm_bo_driver member invalidate_caches 240 * 241 * @bdev: the buffer object device. 242 * @flags: new placement of the rebound buffer object. 243 * 244 * A previosly evicted buffer has been rebound in a 245 * potentially new location. Tell the driver that it might 246 * consider invalidating read (texture) caches on the next command 247 * submission as a consequence. 248 */ 249 250 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); 251 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, 252 struct ttm_mem_type_manager *man); 253 /** 254 * struct ttm_bo_driver member evict_flags: 255 * 256 * @bo: the buffer object to be evicted 257 * 258 * Return the bo flags for a buffer which is not mapped to the hardware. 259 * These will be placed in proposed_flags so that when the move is 260 * finished, they'll end up in bo->mem.flags 261 */ 262 263 void(*evict_flags) (struct ttm_buffer_object *bo, 264 struct ttm_placement *placement); 265 /** 266 * struct ttm_bo_driver member move: 267 * 268 * @bo: the buffer to move 269 * @evict: whether this motion is evicting the buffer from 270 * the graphics address space 271 * @interruptible: Use interruptible sleeps if possible when sleeping. 272 * @no_wait: whether this should give up and return -EBUSY 273 * if this move would require sleeping 274 * @new_mem: the new memory region receiving the buffer 275 * 276 * Move a buffer between two memory regions. 277 */ 278 int (*move) (struct ttm_buffer_object *bo, 279 bool evict, bool interruptible, 280 bool no_wait_reserve, bool no_wait_gpu, 281 struct ttm_mem_reg *new_mem); 282 283 /** 284 * struct ttm_bo_driver_member verify_access 285 * 286 * @bo: Pointer to a buffer object. 287 * @filp: Pointer to a struct file trying to access the object. 288 * 289 * Called from the map / write / read methods to verify that the 290 * caller is permitted to access the buffer object. 291 * This member may be set to NULL, which will refuse this kind of 292 * access for all buffer objects. 293 * This function should return 0 if access is granted, -EPERM otherwise. 294 */ 295 int (*verify_access) (struct ttm_buffer_object *bo, 296 struct file *filp); 297 298 /** 299 * In case a driver writer dislikes the TTM fence objects, 300 * the driver writer can replace those with sync objects of 301 * his / her own. If it turns out that no driver writer is 302 * using these. I suggest we remove these hooks and plug in 303 * fences directly. The bo driver needs the following functionality: 304 * See the corresponding functions in the fence object API 305 * documentation. 306 */ 307 308 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); 309 int (*sync_obj_wait) (void *sync_obj, void *sync_arg, 310 bool lazy, bool interruptible); 311 int (*sync_obj_flush) (void *sync_obj, void *sync_arg); 312 void (*sync_obj_unref) (void **sync_obj); 313 void *(*sync_obj_ref) (void *sync_obj); 314 315 /* hook to notify driver about a driver move so it 316 * can do tiling things */ 317 void (*move_notify)(struct ttm_buffer_object *bo, 318 struct ttm_mem_reg *new_mem); 319 /* notify the driver we are taking a fault on this BO 320 * and have reserved it */ 321 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 322 323 /** 324 * notify the driver that we're about to swap out this bo 325 */ 326 void (*swap_notify) (struct ttm_buffer_object *bo); 327 328 /** 329 * Driver callback on when mapping io memory (for bo_move_memcpy 330 * for instance). TTM will take care to call io_mem_free whenever 331 * the mapping is not use anymore. io_mem_reserve & io_mem_free 332 * are balanced. 333 */ 334 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 335 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 336}; 337 338/** 339 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 340 */ 341 342struct ttm_bo_global_ref { 343 struct drm_global_reference ref; 344 struct ttm_mem_global *mem_glob; 345}; 346 347/** 348 * struct ttm_bo_global - Buffer object driver global data. 349 * 350 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 351 * @dummy_read_page: Pointer to a dummy page used for mapping requests 352 * of unpopulated pages. 353 * @shrink: A shrink callback object used for buffer object swap. 354 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) 355 * used by a buffer object. This is excluding page arrays and backing pages. 356 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). 357 * @device_list_mutex: Mutex protecting the device list. 358 * This mutex is held while traversing the device list for pm options. 359 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 360 * @device_list: List of buffer object devices. 361 * @swap_lru: Lru list of buffer objects used for swapping. 362 */ 363 364struct ttm_bo_global { 365 366 /** 367 * Constant after init. 368 */ 369 370 struct kobject kobj; 371 struct ttm_mem_global *mem_glob; 372 struct page *dummy_read_page; 373 struct ttm_mem_shrink shrink; 374 size_t ttm_bo_extra_size; 375 size_t ttm_bo_size; 376 struct mutex device_list_mutex; 377 spinlock_t lru_lock; 378 379 /** 380 * Protected by device_list_mutex. 381 */ 382 struct list_head device_list; 383 384 /** 385 * Protected by the lru_lock. 386 */ 387 struct list_head swap_lru; 388 389 /** 390 * Internal protection. 391 */ 392 atomic_t bo_count; 393}; 394 395 396#define TTM_NUM_MEM_TYPES 8 397 398#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs 399 idling before CPU mapping */ 400#define TTM_BO_PRIV_FLAG_MAX 1 401/** 402 * struct ttm_bo_device - Buffer object driver device-specific data. 403 * 404 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 405 * @man: An array of mem_type_managers. 406 * @addr_space_mm: Range manager for the device address space. 407 * lru_lock: Spinlock that protects the buffer+device lru lists and 408 * ddestroy lists. 409 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 410 * If a GPU lockup has been detected, this is forced to 0. 411 * @dev_mapping: A pointer to the struct address_space representing the 412 * device address space. 413 * @wq: Work queue structure for the delayed delete workqueue. 414 * 415 */ 416 417struct ttm_bo_device { 418 419 /* 420 * Constant after bo device init / atomic. 421 */ 422 struct list_head device_list; 423 struct ttm_bo_global *glob; 424 struct ttm_bo_driver *driver; 425 rwlock_t vm_lock; 426 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 427 /* 428 * Protected by the vm lock. 429 */ 430 struct rb_root addr_space_rb; 431 struct drm_mm addr_space_mm; 432 433 /* 434 * Protected by the global:lru lock. 435 */ 436 struct list_head ddestroy; 437 438 /* 439 * Protected by load / firstopen / lastclose /unload sync. 440 */ 441 442 bool nice_mode; 443 struct address_space *dev_mapping; 444 445 /* 446 * Internal protection. 447 */ 448 449 struct delayed_work wq; 450 451 bool need_dma32; 452}; 453 454/** 455 * ttm_flag_masked 456 * 457 * @old: Pointer to the result and original value. 458 * @new: New value of bits. 459 * @mask: Mask of bits to change. 460 * 461 * Convenience function to change a number of bits identified by a mask. 462 */ 463 464static inline uint32_t 465ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 466{ 467 *old ^= (*old ^ new) & mask; 468 return *old; 469} 470 471/** 472 * ttm_tt_create 473 * 474 * @bdev: pointer to a struct ttm_bo_device: 475 * @size: Size of the data needed backing. 476 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 477 * @dummy_read_page: See struct ttm_bo_device. 478 * 479 * Create a struct ttm_tt to back data with system memory pages. 480 * No pages are actually allocated. 481 * Returns: 482 * NULL: Out of memory. 483 */ 484extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, 485 unsigned long size, 486 uint32_t page_flags, 487 struct page *dummy_read_page); 488 489/** 490 * ttm_tt_set_user: 491 * 492 * @ttm: The struct ttm_tt to populate. 493 * @tsk: A struct task_struct for which @start is a valid user-space address. 494 * @start: A valid user-space address. 495 * @num_pages: Size in pages of the user memory area. 496 * 497 * Populate a struct ttm_tt with a user-space memory area after first pinning 498 * the pages backing it. 499 * Returns: 500 * !0: Error. 501 */ 502 503extern int ttm_tt_set_user(struct ttm_tt *ttm, 504 struct task_struct *tsk, 505 unsigned long start, unsigned long num_pages); 506 507/** 508 * ttm_ttm_bind: 509 * 510 * @ttm: The struct ttm_tt containing backing pages. 511 * @bo_mem: The struct ttm_mem_reg identifying the binding location. 512 * 513 * Bind the pages of @ttm to an aperture location identified by @bo_mem 514 */ 515extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 516 517/** 518 * ttm_tt_populate: 519 * 520 * @ttm: The struct ttm_tt to contain the backing pages. 521 * 522 * Add backing pages to all of @ttm 523 */ 524extern int ttm_tt_populate(struct ttm_tt *ttm); 525 526/** 527 * ttm_ttm_destroy: 528 * 529 * @ttm: The struct ttm_tt. 530 * 531 * Unbind, unpopulate and destroy a struct ttm_tt. 532 */ 533extern void ttm_tt_destroy(struct ttm_tt *ttm); 534 535/** 536 * ttm_ttm_unbind: 537 * 538 * @ttm: The struct ttm_tt. 539 * 540 * Unbind a struct ttm_tt. 541 */ 542extern void ttm_tt_unbind(struct ttm_tt *ttm); 543 544/** 545 * ttm_ttm_destroy: 546 * 547 * @ttm: The struct ttm_tt. 548 * @index: Index of the desired page. 549 * 550 * Return a pointer to the struct page backing @ttm at page 551 * index @index. If the page is unpopulated, one will be allocated to 552 * populate that index. 553 * 554 * Returns: 555 * NULL on OOM. 556 */ 557extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); 558 559/** 560 * ttm_tt_cache_flush: 561 * 562 * @pages: An array of pointers to struct page:s to flush. 563 * @num_pages: Number of pages to flush. 564 * 565 * Flush the data of the indicated pages from the cpu caches. 566 * This is used when changing caching attributes of the pages from 567 * cache-coherent. 568 */ 569extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); 570 571/** 572 * ttm_tt_set_placement_caching: 573 * 574 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 575 * @placement: Flag indicating the desired caching policy. 576 * 577 * This function will change caching policy of any default kernel mappings of 578 * the pages backing @ttm. If changing from cached to uncached or 579 * write-combined, 580 * all CPU caches will first be flushed to make sure the data of the pages 581 * hit RAM. This function may be very costly as it involves global TLB 582 * and cache flushes and potential page splitting / combining. 583 */ 584extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 585extern int ttm_tt_swapout(struct ttm_tt *ttm, 586 struct file *persistant_swap_storage); 587 588/* 589 * ttm_bo.c 590 */ 591 592/** 593 * ttm_mem_reg_is_pci 594 * 595 * @bdev: Pointer to a struct ttm_bo_device. 596 * @mem: A valid struct ttm_mem_reg. 597 * 598 * Returns true if the memory described by @mem is PCI memory, 599 * false otherwise. 600 */ 601extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 602 struct ttm_mem_reg *mem); 603 604/** 605 * ttm_bo_mem_space 606 * 607 * @bo: Pointer to a struct ttm_buffer_object. the data of which 608 * we want to allocate space for. 609 * @proposed_placement: Proposed new placement for the buffer object. 610 * @mem: A struct ttm_mem_reg. 611 * @interruptible: Sleep interruptible when sliping. 612 * @no_wait_reserve: Return immediately if other buffers are busy. 613 * @no_wait_gpu: Return immediately if the GPU is busy. 614 * 615 * Allocate memory space for the buffer object pointed to by @bo, using 616 * the placement flags in @mem, potentially evicting other idle buffer objects. 617 * This function may sleep while waiting for space to become available. 618 * Returns: 619 * -EBUSY: No space available (only if no_wait == 1). 620 * -ENOMEM: Could not allocate memory for the buffer object, either due to 621 * fragmentation or concurrent allocators. 622 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 623 */ 624extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 625 struct ttm_placement *placement, 626 struct ttm_mem_reg *mem, 627 bool interruptible, 628 bool no_wait_reserve, bool no_wait_gpu); 629/** 630 * ttm_bo_wait_for_cpu 631 * 632 * @bo: Pointer to a struct ttm_buffer_object. 633 * @no_wait: Don't sleep while waiting. 634 * 635 * Wait until a buffer object is no longer sync'ed for CPU access. 636 * Returns: 637 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 638 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 639 */ 640 641extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 642 643/** 644 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. 645 * 646 * @bo Pointer to a struct ttm_buffer_object. 647 * @bus_base On return the base of the PCI region 648 * @bus_offset On return the byte offset into the PCI region 649 * @bus_size On return the byte size of the buffer object or zero if 650 * the buffer object memory is not accessible through a PCI region. 651 * 652 * Returns: 653 * -EINVAL if the buffer object is currently not mappable. 654 * 0 otherwise. 655 */ 656 657extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, 658 struct ttm_mem_reg *mem, 659 unsigned long *bus_base, 660 unsigned long *bus_offset, 661 unsigned long *bus_size); 662 663extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 664 struct ttm_mem_reg *mem); 665extern void ttm_mem_io_free(struct ttm_bo_device *bdev, 666 struct ttm_mem_reg *mem); 667 668extern void ttm_bo_global_release(struct drm_global_reference *ref); 669extern int ttm_bo_global_init(struct drm_global_reference *ref); 670 671extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 672 673/** 674 * ttm_bo_device_init 675 * 676 * @bdev: A pointer to a struct ttm_bo_device to initialize. 677 * @mem_global: A pointer to an initialized struct ttm_mem_global. 678 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 679 * @file_page_offset: Offset into the device address space that is available 680 * for buffer data. This ensures compatibility with other users of the 681 * address space. 682 * 683 * Initializes a struct ttm_bo_device: 684 * Returns: 685 * !0: Failure. 686 */ 687extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 688 struct ttm_bo_global *glob, 689 struct ttm_bo_driver *driver, 690 uint64_t file_page_offset, bool need_dma32); 691 692/** 693 * ttm_bo_unmap_virtual 694 * 695 * @bo: tear down the virtual mappings for this BO 696 */ 697extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 698 699/** 700 * ttm_bo_reserve: 701 * 702 * @bo: A pointer to a struct ttm_buffer_object. 703 * @interruptible: Sleep interruptible if waiting. 704 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 705 * @use_sequence: If @bo is already reserved, Only sleep waiting for 706 * it to become unreserved if @sequence < (@bo)->sequence. 707 * 708 * Locks a buffer object for validation. (Or prevents other processes from 709 * locking it for validation) and removes it from lru lists, while taking 710 * a number of measures to prevent deadlocks. 711 * 712 * Deadlocks may occur when two processes try to reserve multiple buffers in 713 * different order, either by will or as a result of a buffer being evicted 714 * to make room for a buffer already reserved. (Buffers are reserved before 715 * they are evicted). The following algorithm prevents such deadlocks from 716 * occuring: 717 * 1) Buffers are reserved with the lru spinlock held. Upon successful 718 * reservation they are removed from the lru list. This stops a reserved buffer 719 * from being evicted. However the lru spinlock is released between the time 720 * a buffer is selected for eviction and the time it is reserved. 721 * Therefore a check is made when a buffer is reserved for eviction, that it 722 * is still the first buffer in the lru list, before it is removed from the 723 * list. @check_lru == 1 forces this check. If it fails, the function returns 724 * -EINVAL, and the caller should then choose a new buffer to evict and repeat 725 * the procedure. 726 * 2) Processes attempting to reserve multiple buffers other than for eviction, 727 * (typically execbuf), should first obtain a unique 32-bit 728 * validation sequence number, 729 * and call this function with @use_sequence == 1 and @sequence == the unique 730 * sequence number. If upon call of this function, the buffer object is already 731 * reserved, the validation sequence is checked against the validation 732 * sequence of the process currently reserving the buffer, 733 * and if the current validation sequence is greater than that of the process 734 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 735 * waiting for the buffer to become unreserved, after which it retries 736 * reserving. 737 * The caller should, when receiving an -EAGAIN error 738 * release all its buffer reservations, wait for @bo to become unreserved, and 739 * then rerun the validation with the same validation sequence. This procedure 740 * will always guarantee that the process with the lowest validation sequence 741 * will eventually succeed, preventing both deadlocks and starvation. 742 * 743 * Returns: 744 * -EAGAIN: The reservation may cause a deadlock. 745 * Release all buffer reservations, wait for @bo to become unreserved and 746 * try again. (only if use_sequence == 1). 747 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 748 * a signal. Release all buffer reservations and return to user-space. 749 */ 750extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 751 bool interruptible, 752 bool no_wait, bool use_sequence, uint32_t sequence); 753 754/** 755 * ttm_bo_unreserve 756 * 757 * @bo: A pointer to a struct ttm_buffer_object. 758 * 759 * Unreserve a previous reservation of @bo. 760 */ 761extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 762 763/** 764 * ttm_bo_wait_unreserved 765 * 766 * @bo: A pointer to a struct ttm_buffer_object. 767 * 768 * Wait for a struct ttm_buffer_object to become unreserved. 769 * This is typically used in the execbuf code to relax cpu-usage when 770 * a potential deadlock condition backoff. 771 */ 772extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 773 bool interruptible); 774 775/* 776 * ttm_bo_util.c 777 */ 778 779/** 780 * ttm_bo_move_ttm 781 * 782 * @bo: A pointer to a struct ttm_buffer_object. 783 * @evict: 1: This is an eviction. Don't try to pipeline. 784 * @no_wait_reserve: Return immediately if other buffers are busy. 785 * @no_wait_gpu: Return immediately if the GPU is busy. 786 * @new_mem: struct ttm_mem_reg indicating where to move. 787 * 788 * Optimized move function for a buffer object with both old and 789 * new placement backed by a TTM. The function will, if successful, 790 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 791 * and update the (@bo)->mem placement flags. If unsuccessful, the old 792 * data remains untouched, and it's up to the caller to free the 793 * memory space indicated by @new_mem. 794 * Returns: 795 * !0: Failure. 796 */ 797 798extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 799 bool evict, bool no_wait_reserve, 800 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 801 802/** 803 * ttm_bo_move_memcpy 804 * 805 * @bo: A pointer to a struct ttm_buffer_object. 806 * @evict: 1: This is an eviction. Don't try to pipeline. 807 * @no_wait_reserve: Return immediately if other buffers are busy. 808 * @no_wait_gpu: Return immediately if the GPU is busy. 809 * @new_mem: struct ttm_mem_reg indicating where to move. 810 * 811 * Fallback move function for a mappable buffer object in mappable memory. 812 * The function will, if successful, 813 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 814 * and update the (@bo)->mem placement flags. If unsuccessful, the old 815 * data remains untouched, and it's up to the caller to free the 816 * memory space indicated by @new_mem. 817 * Returns: 818 * !0: Failure. 819 */ 820 821extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 822 bool evict, bool no_wait_reserve, 823 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 824 825/** 826 * ttm_bo_free_old_node 827 * 828 * @bo: A pointer to a struct ttm_buffer_object. 829 * 830 * Utility function to free an old placement after a successful move. 831 */ 832extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 833 834/** 835 * ttm_bo_move_accel_cleanup. 836 * 837 * @bo: A pointer to a struct ttm_buffer_object. 838 * @sync_obj: A sync object that signals when moving is complete. 839 * @sync_obj_arg: An argument to pass to the sync object idle / wait 840 * functions. 841 * @evict: This is an evict move. Don't return until the buffer is idle. 842 * @no_wait_reserve: Return immediately if other buffers are busy. 843 * @no_wait_gpu: Return immediately if the GPU is busy. 844 * @new_mem: struct ttm_mem_reg indicating where to move. 845 * 846 * Accelerated move function to be called when an accelerated move 847 * has been scheduled. The function will create a new temporary buffer object 848 * representing the old placement, and put the sync object on both buffer 849 * objects. After that the newly created buffer object is unref'd to be 850 * destroyed when the move is complete. This will help pipeline 851 * buffer moves. 852 */ 853 854extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 855 void *sync_obj, 856 void *sync_obj_arg, 857 bool evict, bool no_wait_reserve, 858 bool no_wait_gpu, 859 struct ttm_mem_reg *new_mem); 860/** 861 * ttm_io_prot 862 * 863 * @c_state: Caching state. 864 * @tmp: Page protection flag for a normal, cached mapping. 865 * 866 * Utility function that returns the pgprot_t that should be used for 867 * setting up a PTE with the caching model indicated by @c_state. 868 */ 869extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 870 871#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 872#define TTM_HAS_AGP 873#include <linux/agp_backend.h> 874 875/** 876 * ttm_agp_backend_init 877 * 878 * @bdev: Pointer to a struct ttm_bo_device. 879 * @bridge: The agp bridge this device is sitting on. 880 * 881 * Create a TTM backend that uses the indicated AGP bridge as an aperture 882 * for TT memory. This function uses the linux agpgart interface to 883 * bind and unbind memory backing a ttm_tt. 884 */ 885extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, 886 struct agp_bridge_data *bridge); 887#endif 888 889#endif 890