ttm_page_alloc.c revision 247835
1/* 2 * Copyright (c) Red Hat Inc. 3 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie <airlied@redhat.com> 24 * Jerome Glisse <jglisse@redhat.com> 25 * Pauli Nieminen <suokkos@gmail.com> 26 */ 27/* 28 * Copyright (c) 2013 The FreeBSD Foundation 29 * All rights reserved. 30 * 31 * Portions of this software were developed by Konstantin Belousov 32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 33 */ 34 35/* simple list based uncached page pool 36 * - Pool collects resently freed pages for reuse 37 * - Use page->lru to keep a free list 38 * - doesn't track currently in use pages 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247835 2013-03-05 09:49:34Z kib $"); 43 44#include <dev/drm2/drmP.h> 45#include <dev/drm2/ttm/ttm_bo_driver.h> 46#include <dev/drm2/ttm/ttm_page_alloc.h> 47 48#ifdef TTM_HAS_AGP 49#include <asm/agp.h> 50#endif 51 52#define VM_ALLOC_DMA32 VM_ALLOC_RESERVED1 53 54#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t)) 55#define SMALL_ALLOCATION 16 56#define FREE_ALL_PAGES (~0U) 57/* times are in msecs */ 58#define PAGE_FREE_INTERVAL 1000 59 60/** 61 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. 62 * 63 * @lock: Protects the shared pool from concurrnet access. Must be used with 64 * irqsave/irqrestore variants because pool allocator maybe called from 65 * delayed work. 66 * @fill_lock: Prevent concurrent calls to fill. 67 * @list: Pool of free uc/wc pages for fast reuse. 68 * @gfp_flags: Flags to pass for alloc_page. 69 * @npages: Number of pages in pool. 70 */ 71struct ttm_page_pool { 72 struct mtx lock; 73 bool fill_lock; 74 bool dma32; 75 struct pglist list; 76 int ttm_page_alloc_flags; 77 unsigned npages; 78 char *name; 79 unsigned long nfrees; 80 unsigned long nrefills; 81}; 82 83/** 84 * Limits for the pool. They are handled without locks because only place where 85 * they may change is in sysfs store. They won't have immediate effect anyway 86 * so forcing serialization to access them is pointless. 87 */ 88 89struct ttm_pool_opts { 90 unsigned alloc_size; 91 unsigned max_size; 92 unsigned small; 93}; 94 95#define NUM_POOLS 4 96 97/** 98 * struct ttm_pool_manager - Holds memory pools for fst allocation 99 * 100 * Manager is read only object for pool code so it doesn't need locking. 101 * 102 * @free_interval: minimum number of jiffies between freeing pages from pool. 103 * @page_alloc_inited: reference counting for pool allocation. 104 * @work: Work that is used to shrink the pool. Work is only run when there is 105 * some pages to free. 106 * @small_allocation: Limit in number of pages what is small allocation. 107 * 108 * @pools: All pool objects in use. 109 **/ 110struct ttm_pool_manager { 111 unsigned int kobj_ref; 112 eventhandler_tag lowmem_handler; 113 struct ttm_pool_opts options; 114 115 union { 116 struct ttm_page_pool pools[NUM_POOLS]; 117 struct { 118 struct ttm_page_pool wc_pool; 119 struct ttm_page_pool uc_pool; 120 struct ttm_page_pool wc_pool_dma32; 121 struct ttm_page_pool uc_pool_dma32; 122 } ; 123 }; 124}; 125 126MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager"); 127 128static void 129ttm_vm_page_free(vm_page_t m) 130{ 131 132 KASSERT(m->object == NULL, ("ttm page %p is owned", m)); 133 KASSERT(m->wire_count == 1, ("ttm lost wire %p", m)); 134 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m)); 135 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m)); 136 m->flags &= ~PG_FICTITIOUS; 137 m->oflags |= VPO_UNMANAGED; 138 vm_page_unwire(m, 0); 139 vm_page_free(m); 140} 141 142static vm_memattr_t 143ttm_caching_state_to_vm(enum ttm_caching_state cstate) 144{ 145 146 switch (cstate) { 147 case tt_uncached: 148 return (VM_MEMATTR_UNCACHEABLE); 149 case tt_wc: 150 return (VM_MEMATTR_WRITE_COMBINING); 151 case tt_cached: 152 return (VM_MEMATTR_WRITE_BACK); 153 } 154 panic("caching state %d\n", cstate); 155} 156 157static void ttm_pool_kobj_release(struct ttm_pool_manager *m) 158{ 159 160 free(m, M_TTM_POOLMGR); 161} 162 163#if 0 164/* XXXKIB sysctl */ 165static ssize_t ttm_pool_store(struct ttm_pool_manager *m, 166 struct attribute *attr, const char *buffer, size_t size) 167{ 168 int chars; 169 unsigned val; 170 chars = sscanf(buffer, "%u", &val); 171 if (chars == 0) 172 return size; 173 174 /* Convert kb to number of pages */ 175 val = val / (PAGE_SIZE >> 10); 176 177 if (attr == &ttm_page_pool_max) 178 m->options.max_size = val; 179 else if (attr == &ttm_page_pool_small) 180 m->options.small = val; 181 else if (attr == &ttm_page_pool_alloc_size) { 182 if (val > NUM_PAGES_TO_ALLOC*8) { 183 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", 184 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 185 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 186 return size; 187 } else if (val > NUM_PAGES_TO_ALLOC) { 188 pr_warn("Setting allocation size to larger than %lu is not recommended\n", 189 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 190 } 191 m->options.alloc_size = val; 192 } 193 194 return size; 195} 196 197static ssize_t ttm_pool_show(struct ttm_pool_manager *m, 198 struct attribute *attr, char *buffer) 199{ 200 unsigned val = 0; 201 202 if (attr == &ttm_page_pool_max) 203 val = m->options.max_size; 204 else if (attr == &ttm_page_pool_small) 205 val = m->options.small; 206 else if (attr == &ttm_page_pool_alloc_size) 207 val = m->options.alloc_size; 208 209 val = val * (PAGE_SIZE >> 10); 210 211 return snprintf(buffer, PAGE_SIZE, "%u\n", val); 212} 213#endif 214 215static struct ttm_pool_manager *_manager; 216 217static int set_pages_array_wb(vm_page_t *pages, int addrinarray) 218{ 219 vm_page_t m; 220 int i; 221 222 for (i = 0; i < addrinarray; i++) { 223 m = pages[i]; 224#ifdef TTM_HAS_AGP 225 unmap_page_from_agp(m); 226#endif 227 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK); 228 } 229 return 0; 230} 231 232static int set_pages_array_wc(vm_page_t *pages, int addrinarray) 233{ 234 vm_page_t m; 235 int i; 236 237 for (i = 0; i < addrinarray; i++) { 238 m = pages[i]; 239#ifdef TTM_HAS_AGP 240 map_page_into_agp(pages[i]); 241#endif 242 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING); 243 } 244 return 0; 245} 246 247static int set_pages_array_uc(vm_page_t *pages, int addrinarray) 248{ 249 vm_page_t m; 250 int i; 251 252 for (i = 0; i < addrinarray; i++) { 253 m = pages[i]; 254#ifdef TTM_HAS_AGP 255 map_page_into_agp(pages[i]); 256#endif 257 pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE); 258 } 259 return 0; 260} 261 262/** 263 * Select the right pool or requested caching state and ttm flags. */ 264static struct ttm_page_pool *ttm_get_pool(int flags, 265 enum ttm_caching_state cstate) 266{ 267 int pool_index; 268 269 if (cstate == tt_cached) 270 return NULL; 271 272 if (cstate == tt_wc) 273 pool_index = 0x0; 274 else 275 pool_index = 0x1; 276 277 if (flags & TTM_PAGE_FLAG_DMA32) 278 pool_index |= 0x2; 279 280 return &_manager->pools[pool_index]; 281} 282 283/* set memory back to wb and free the pages. */ 284static void ttm_pages_put(vm_page_t *pages, unsigned npages) 285{ 286 unsigned i; 287 288 /* Our VM handles vm memattr automatically on the page free. */ 289 if (set_pages_array_wb(pages, npages)) 290 printf("[TTM] Failed to set %d pages to wb!\n", npages); 291 for (i = 0; i < npages; ++i) 292 ttm_vm_page_free(pages[i]); 293} 294 295static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, 296 unsigned freed_pages) 297{ 298 pool->npages -= freed_pages; 299 pool->nfrees += freed_pages; 300} 301 302/** 303 * Free pages from pool. 304 * 305 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC 306 * number of pages in one go. 307 * 308 * @pool: to free the pages from 309 * @free_all: If set to true will free all pages in pool 310 **/ 311static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) 312{ 313 vm_page_t p, p1; 314 vm_page_t *pages_to_free; 315 unsigned freed_pages = 0, 316 npages_to_free = nr_free; 317 318 if (NUM_PAGES_TO_ALLOC < nr_free) 319 npages_to_free = NUM_PAGES_TO_ALLOC; 320 321 pages_to_free = malloc(npages_to_free * sizeof(vm_page_t), 322 M_TEMP, M_WAITOK | M_ZERO); 323 324restart: 325 mtx_lock(&pool->lock); 326 327 TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) { 328 if (freed_pages >= npages_to_free) 329 break; 330 331 pages_to_free[freed_pages++] = p; 332 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ 333 if (freed_pages >= NUM_PAGES_TO_ALLOC) { 334 /* remove range of pages from the pool */ 335 TAILQ_REMOVE(&pool->list, p, pageq); 336 337 ttm_pool_update_free_locked(pool, freed_pages); 338 /** 339 * Because changing page caching is costly 340 * we unlock the pool to prevent stalling. 341 */ 342 mtx_unlock(&pool->lock); 343 344 ttm_pages_put(pages_to_free, freed_pages); 345 if (likely(nr_free != FREE_ALL_PAGES)) 346 nr_free -= freed_pages; 347 348 if (NUM_PAGES_TO_ALLOC >= nr_free) 349 npages_to_free = nr_free; 350 else 351 npages_to_free = NUM_PAGES_TO_ALLOC; 352 353 freed_pages = 0; 354 355 /* free all so restart the processing */ 356 if (nr_free) 357 goto restart; 358 359 /* Not allowed to fall through or break because 360 * following context is inside spinlock while we are 361 * outside here. 362 */ 363 goto out; 364 365 } 366 } 367 368 /* remove range of pages from the pool */ 369 if (freed_pages) { 370 TAILQ_REMOVE(&pool->list, p, pageq); 371 372 ttm_pool_update_free_locked(pool, freed_pages); 373 nr_free -= freed_pages; 374 } 375 376 mtx_unlock(&pool->lock); 377 378 if (freed_pages) 379 ttm_pages_put(pages_to_free, freed_pages); 380out: 381 free(pages_to_free, M_TEMP); 382 return nr_free; 383} 384 385/* Get good estimation how many pages are free in pools */ 386static int ttm_pool_get_num_unused_pages(void) 387{ 388 unsigned i; 389 int total = 0; 390 for (i = 0; i < NUM_POOLS; ++i) 391 total += _manager->pools[i].npages; 392 393 return total; 394} 395 396/** 397 * Callback for mm to request pool to reduce number of page held. 398 */ 399static int ttm_pool_mm_shrink(void *arg) 400{ 401 static unsigned int start_pool = 0; 402 unsigned i; 403 unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1); 404 struct ttm_page_pool *pool; 405 int shrink_pages = 100; /* XXXKIB */ 406 407 pool_offset = pool_offset % NUM_POOLS; 408 /* select start pool in round robin fashion */ 409 for (i = 0; i < NUM_POOLS; ++i) { 410 unsigned nr_free = shrink_pages; 411 if (shrink_pages == 0) 412 break; 413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 414 shrink_pages = ttm_page_pool_free(pool, nr_free); 415 } 416 /* return estimated number of unused pages in pool */ 417 return ttm_pool_get_num_unused_pages(); 418} 419 420static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 421{ 422 423 manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem, 424 ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY); 425} 426 427static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) 428{ 429 430 EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler); 431} 432 433static int ttm_set_pages_caching(vm_page_t *pages, 434 enum ttm_caching_state cstate, unsigned cpages) 435{ 436 int r = 0; 437 /* Set page caching */ 438 switch (cstate) { 439 case tt_uncached: 440 r = set_pages_array_uc(pages, cpages); 441 if (r) 442 printf("[TTM] Failed to set %d pages to uc!\n", cpages); 443 break; 444 case tt_wc: 445 r = set_pages_array_wc(pages, cpages); 446 if (r) 447 printf("[TTM] Failed to set %d pages to wc!\n", cpages); 448 break; 449 default: 450 break; 451 } 452 return r; 453} 454 455/** 456 * Free pages the pages that failed to change the caching state. If there is 457 * any pages that have changed their caching state already put them to the 458 * pool. 459 */ 460static void ttm_handle_caching_state_failure(struct pglist *pages, 461 int ttm_flags, enum ttm_caching_state cstate, 462 vm_page_t *failed_pages, unsigned cpages) 463{ 464 unsigned i; 465 /* Failed pages have to be freed */ 466 for (i = 0; i < cpages; ++i) { 467 TAILQ_REMOVE(pages, failed_pages[i], pageq); 468 ttm_vm_page_free(failed_pages[i]); 469 } 470} 471 472/** 473 * Allocate new pages with correct caching. 474 * 475 * This function is reentrant if caller updates count depending on number of 476 * pages returned in pages array. 477 */ 478static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags, 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 480{ 481 vm_page_t *caching_array; 482 vm_page_t p; 483 int r = 0; 484 unsigned i, cpages, aflags; 485 unsigned max_cpages = min(count, 486 (unsigned)(PAGE_SIZE/sizeof(vm_page_t))); 487 488 aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 489 ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? 490 VM_ALLOC_ZERO : 0); 491 492 /* allocate array for page caching change */ 493 caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP, 494 M_WAITOK | M_ZERO); 495 496 for (i = 0, cpages = 0; i < count; ++i) { 497 p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0, 498 (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 499 VM_MAX_ADDRESS, PAGE_SIZE, 0, 500 ttm_caching_state_to_vm(cstate)); 501 if (!p) { 502 printf("[TTM] Unable to get page %u\n", i); 503 504 /* store already allocated pages in the pool after 505 * setting the caching state */ 506 if (cpages) { 507 r = ttm_set_pages_caching(caching_array, 508 cstate, cpages); 509 if (r) 510 ttm_handle_caching_state_failure(pages, 511 ttm_flags, cstate, 512 caching_array, cpages); 513 } 514 r = -ENOMEM; 515 goto out; 516 } 517 p->oflags &= ~VPO_UNMANAGED; 518 p->flags |= PG_FICTITIOUS; 519 520#ifdef CONFIG_HIGHMEM /* KIB: nop */ 521 /* gfp flags of highmem page should never be dma32 so we 522 * we should be fine in such case 523 */ 524 if (!PageHighMem(p)) 525#endif 526 { 527 caching_array[cpages++] = p; 528 if (cpages == max_cpages) { 529 530 r = ttm_set_pages_caching(caching_array, 531 cstate, cpages); 532 if (r) { 533 ttm_handle_caching_state_failure(pages, 534 ttm_flags, cstate, 535 caching_array, cpages); 536 goto out; 537 } 538 cpages = 0; 539 } 540 } 541 542 TAILQ_INSERT_HEAD(pages, p, pageq); 543 } 544 545 if (cpages) { 546 r = ttm_set_pages_caching(caching_array, cstate, cpages); 547 if (r) 548 ttm_handle_caching_state_failure(pages, 549 ttm_flags, cstate, 550 caching_array, cpages); 551 } 552out: 553 free(caching_array, M_TEMP); 554 555 return r; 556} 557 558/** 559 * Fill the given pool if there aren't enough pages and the requested number of 560 * pages is small. 561 */ 562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 563 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 564{ 565 vm_page_t p; 566 int r; 567 unsigned cpages = 0; 568 /** 569 * Only allow one pool fill operation at a time. 570 * If pool doesn't have enough pages for the allocation new pages are 571 * allocated from outside of pool. 572 */ 573 if (pool->fill_lock) 574 return; 575 576 pool->fill_lock = true; 577 578 /* If allocation request is small and there are not enough 579 * pages in a pool we fill the pool up first. */ 580 if (count < _manager->options.small 581 && count > pool->npages) { 582 struct pglist new_pages; 583 unsigned alloc_size = _manager->options.alloc_size; 584 585 /** 586 * Can't change page caching if in irqsave context. We have to 587 * drop the pool->lock. 588 */ 589 mtx_unlock(&pool->lock); 590 591 TAILQ_INIT(&new_pages); 592 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, 593 ttm_flags, cstate, alloc_size); 594 mtx_lock(&pool->lock); 595 596 if (!r) { 597 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 598 ++pool->nrefills; 599 pool->npages += alloc_size; 600 } else { 601 printf("[TTM] Failed to fill pool (%p)\n", pool); 602 /* If we have any pages left put them to the pool. */ 603 TAILQ_FOREACH(p, &pool->list, pageq) { 604 ++cpages; 605 } 606 TAILQ_CONCAT(&pool->list, &new_pages, pageq); 607 pool->npages += cpages; 608 } 609 610 } 611 pool->fill_lock = false; 612} 613 614/** 615 * Cut 'count' number of pages from the pool and put them on the return list. 616 * 617 * @return count of pages still required to fulfill the request. 618 */ 619static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 620 struct pglist *pages, 621 int ttm_flags, 622 enum ttm_caching_state cstate, 623 unsigned count) 624{ 625 vm_page_t p; 626 unsigned i; 627 628 mtx_lock(&pool->lock); 629 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); 630 631 if (count >= pool->npages) { 632 /* take all pages from the pool */ 633 TAILQ_CONCAT(pages, &pool->list, pageq); 634 count -= pool->npages; 635 pool->npages = 0; 636 goto out; 637 } 638 for (i = 0; i < count; i++) { 639 p = TAILQ_FIRST(&pool->list); 640 TAILQ_REMOVE(&pool->list, p, pageq); 641 TAILQ_INSERT_TAIL(pages, p, pageq); 642 } 643 pool->npages -= count; 644 count = 0; 645out: 646 mtx_unlock(&pool->lock); 647 return count; 648} 649 650/* Put all pages in pages list to correct pool to wait for reuse */ 651static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, 652 enum ttm_caching_state cstate) 653{ 654 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 655 unsigned i; 656 657 if (pool == NULL) { 658 /* No pool for this memory type so free the pages */ 659 for (i = 0; i < npages; i++) { 660 if (pages[i]) { 661 ttm_vm_page_free(pages[i]); 662 pages[i] = NULL; 663 } 664 } 665 return; 666 } 667 668 mtx_lock(&pool->lock); 669 for (i = 0; i < npages; i++) { 670 if (pages[i]) { 671 TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq); 672 pages[i] = NULL; 673 pool->npages++; 674 } 675 } 676 /* Check that we don't go over the pool limit */ 677 npages = 0; 678 if (pool->npages > _manager->options.max_size) { 679 npages = pool->npages - _manager->options.max_size; 680 /* free at least NUM_PAGES_TO_ALLOC number of pages 681 * to reduce calls to set_memory_wb */ 682 if (npages < NUM_PAGES_TO_ALLOC) 683 npages = NUM_PAGES_TO_ALLOC; 684 } 685 mtx_unlock(&pool->lock); 686 if (npages) 687 ttm_page_pool_free(pool, npages); 688} 689 690/* 691 * On success pages list will hold count number of correctly 692 * cached pages. 693 */ 694static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags, 695 enum ttm_caching_state cstate) 696{ 697 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 698 struct pglist plist; 699 vm_page_t p = NULL; 700 int gfp_flags, aflags; 701 unsigned count; 702 int r; 703 704 aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 705 ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0); 706 707 /* No pool for cached pages */ 708 if (pool == NULL) { 709 for (r = 0; r < npages; ++r) { 710 p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0, 711 (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff : 712 VM_MAX_ADDRESS, PAGE_SIZE, 713 0, ttm_caching_state_to_vm(cstate)); 714 if (!p) { 715 printf("[TTM] Unable to allocate page\n"); 716 return -ENOMEM; 717 } 718 p->oflags &= ~VPO_UNMANAGED; 719 p->flags |= PG_FICTITIOUS; 720 pages[r] = p; 721 } 722 return 0; 723 } 724 725 /* combine zero flag to pool flags */ 726 gfp_flags = flags | pool->ttm_page_alloc_flags; 727 728 /* First we take pages from the pool */ 729 TAILQ_INIT(&plist); 730 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); 731 count = 0; 732 TAILQ_FOREACH(p, &plist, pageq) { 733 pages[count++] = p; 734 } 735 736 /* clear the pages coming from the pool if requested */ 737 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 738 TAILQ_FOREACH(p, &plist, pageq) { 739 pmap_zero_page(p); 740 } 741 } 742 743 /* If pool didn't have enough pages allocate new one. */ 744 if (npages > 0) { 745 /* ttm_alloc_new_pages doesn't reference pool so we can run 746 * multiple requests in parallel. 747 **/ 748 TAILQ_INIT(&plist); 749 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, 750 npages); 751 TAILQ_FOREACH(p, &plist, pageq) { 752 pages[count++] = p; 753 } 754 if (r) { 755 /* If there is any pages in the list put them back to 756 * the pool. */ 757 printf("[TTM] Failed to allocate extra pages for large request\n"); 758 ttm_put_pages(pages, count, flags, cstate); 759 return r; 760 } 761 } 762 763 return 0; 764} 765 766static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 767 char *name) 768{ 769 mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF); 770 pool->fill_lock = false; 771 TAILQ_INIT(&pool->list); 772 pool->npages = pool->nfrees = 0; 773 pool->ttm_page_alloc_flags = flags; 774 pool->name = name; 775} 776 777int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 778{ 779 780 if (_manager != NULL) 781 printf("[TTM] manager != NULL\n"); 782 printf("[TTM] Initializing pool allocator\n"); 783 784 _manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO); 785 786 ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc"); 787 ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc"); 788 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 789 TTM_PAGE_FLAG_DMA32, "wc dma"); 790 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 791 TTM_PAGE_FLAG_DMA32, "uc dma"); 792 793 _manager->options.max_size = max_pages; 794 _manager->options.small = SMALL_ALLOCATION; 795 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 796 797 refcount_init(&_manager->kobj_ref, 1); 798 ttm_pool_mm_shrink_init(_manager); 799 800 return 0; 801} 802 803void ttm_page_alloc_fini(void) 804{ 805 int i; 806 807 printf("[TTM] Finalizing pool allocator\n"); 808 ttm_pool_mm_shrink_fini(_manager); 809 810 for (i = 0; i < NUM_POOLS; ++i) 811 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); 812 813 if (refcount_release(&_manager->kobj_ref)) 814 ttm_pool_kobj_release(_manager); 815 _manager = NULL; 816} 817 818int ttm_pool_populate(struct ttm_tt *ttm) 819{ 820 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 821 unsigned i; 822 int ret; 823 824 if (ttm->state != tt_unpopulated) 825 return 0; 826 827 for (i = 0; i < ttm->num_pages; ++i) { 828 ret = ttm_get_pages(&ttm->pages[i], 1, 829 ttm->page_flags, 830 ttm->caching_state); 831 if (ret != 0) { 832 ttm_pool_unpopulate(ttm); 833 return -ENOMEM; 834 } 835 836 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 837 false, false); 838 if (unlikely(ret != 0)) { 839 ttm_pool_unpopulate(ttm); 840 return -ENOMEM; 841 } 842 } 843 844 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 845 ret = ttm_tt_swapin(ttm); 846 if (unlikely(ret != 0)) { 847 ttm_pool_unpopulate(ttm); 848 return ret; 849 } 850 } 851 852 ttm->state = tt_unbound; 853 return 0; 854} 855 856void ttm_pool_unpopulate(struct ttm_tt *ttm) 857{ 858 unsigned i; 859 860 for (i = 0; i < ttm->num_pages; ++i) { 861 if (ttm->pages[i]) { 862 ttm_mem_global_free_page(ttm->glob->mem_glob, 863 ttm->pages[i]); 864 ttm_put_pages(&ttm->pages[i], 1, 865 ttm->page_flags, 866 ttm->caching_state); 867 } 868 } 869 ttm->state = tt_unpopulated; 870} 871 872#if 0 873/* XXXKIB sysctl */ 874int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 875{ 876 struct ttm_page_pool *p; 877 unsigned i; 878 char *h[] = {"pool", "refills", "pages freed", "size"}; 879 if (!_manager) { 880 seq_printf(m, "No pool allocator running.\n"); 881 return 0; 882 } 883 seq_printf(m, "%6s %12s %13s %8s\n", 884 h[0], h[1], h[2], h[3]); 885 for (i = 0; i < NUM_POOLS; ++i) { 886 p = &_manager->pools[i]; 887 888 seq_printf(m, "%6s %12ld %13ld %8d\n", 889 p->name, p->nrefills, 890 p->nfrees, p->npages); 891 } 892 return 0; 893} 894#endif 895