Lines Matching defs:zspage

16  * struct page(s) to form a zspage.
19 * page->private: points to zspage
20 * page->index: links together all component pages of a zspage
23 * page->page_type: first object offset in a subpage of zspage
37 * zspage->lock
192 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
201 * For every zspage, zspage->freeobj gives head of this list.
243 struct zspage {
266 static void SetZsHugePage(struct zspage *zspage)
268 zspage->huge = 1;
271 static bool ZsHugePage(struct zspage *zspage)
273 return zspage->huge;
276 static void migrate_lock_init(struct zspage *zspage);
277 static void migrate_read_lock(struct zspage *zspage);
278 static void migrate_read_unlock(struct zspage *zspage);
279 static void migrate_write_lock(struct zspage *zspage);
280 static void migrate_write_unlock(struct zspage *zspage);
285 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
289 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
299 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
327 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
333 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
335 kmem_cache_free(pool->zspage_cachep, zspage);
423 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
434 static inline int get_zspage_inuse(struct zspage *zspage)
436 return zspage->inuse;
440 static inline void mod_zspage_inuse(struct zspage *zspage, int val)
442 zspage->inuse += val;
445 static inline struct page *get_first_page(struct zspage *zspage)
447 struct page *first_page = zspage->first_page;
463 static inline unsigned int get_freeobj(struct zspage *zspage)
465 return zspage->freeobj;
468 static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
470 zspage->freeobj = obj;
474 struct zspage *zspage)
476 return pool->size_class[zspage->class];
481 * class maintains a list of zspages where each zspage is divided
640 static int get_fullness_group(struct size_class *class, struct zspage *zspage)
644 inuse = get_zspage_inuse(zspage);
664 * have. This functions inserts the given zspage into the freelist
668 struct zspage *zspage,
672 list_add(&zspage->list, &class->fullness_list[fullness]);
673 zspage->fullness = fullness;
677 * This function removes the given zspage from the freelist identified
680 static void remove_zspage(struct size_class *class, struct zspage *zspage)
682 int fullness = zspage->fullness;
686 list_del_init(&zspage->list);
699 static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
703 newfg = get_fullness_group(class, zspage);
704 if (newfg == zspage->fullness)
707 remove_zspage(class, zspage);
708 insert_zspage(class, zspage, newfg);
713 static struct zspage *get_zspage(struct page *page)
715 struct zspage *zspage = (struct zspage *)page_private(page);
717 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
718 return zspage;
723 struct zspage *zspage = get_zspage(page);
725 if (unlikely(ZsHugePage(zspage)))
734 * @page: page object resides in zspage
751 * @page: page object resides in zspage
773 struct zspage *zspage = get_zspage(page);
775 if (unlikely(ZsHugePage(zspage))) {
798 static int trylock_zspage(struct zspage *zspage)
802 for (cursor = get_first_page(zspage); cursor != NULL; cursor =
812 for (cursor = get_first_page(zspage); cursor != fail; cursor =
820 struct zspage *zspage)
826 VM_BUG_ON(get_zspage_inuse(zspage));
827 VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
829 next = page = get_first_page(zspage);
840 cache_free_zspage(pool, zspage);
847 struct zspage *zspage)
849 VM_BUG_ON(get_zspage_inuse(zspage));
850 VM_BUG_ON(list_empty(&zspage->list));
857 if (!trylock_zspage(zspage)) {
862 remove_zspage(class, zspage);
863 __free_zspage(pool, class, zspage);
866 /* Initialize a newly allocated zspage */
867 static void init_zspage(struct size_class *class, struct zspage *zspage)
871 struct page *page = get_first_page(zspage);
908 set_freeobj(zspage, 0);
911 static void create_page_chain(struct size_class *class, struct zspage *zspage,
922 * 2. each sub-page point to zspage using page->private
929 set_page_private(page, (unsigned long)zspage);
932 zspage->first_page = page;
936 SetZsHugePage(zspage);
945 * Allocate a zspage for the given size class
947 static struct zspage *alloc_zspage(struct zs_pool *pool,
953 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
955 if (!zspage)
958 zspage->magic = ZSPAGE_MAGIC;
959 migrate_lock_init(zspage);
970 cache_free_zspage(pool, zspage);
978 create_page_chain(class, zspage, pages);
979 init_zspage(class, zspage);
980 zspage->pool = pool;
981 zspage->class = class->index;
983 return zspage;
986 static struct zspage *find_get_zspage(struct size_class *class)
989 struct zspage *zspage;
992 zspage = list_first_entry_or_null(&class->fullness_list[i],
993 struct zspage, list);
994 if (zspage)
998 return zspage;
1108 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1110 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1113 static bool zspage_empty(struct zspage *zspage)
1115 return get_zspage_inuse(zspage) == 0;
1163 struct zspage *zspage;
1180 /* It guarantees it can get zspage from handle safely */
1184 zspage = get_zspage(page);
1187 * migration cannot move any zpages in this zspage. Here, pool->lock
1189 * zs_unmap_object API so delegate the locking from class to zspage
1192 migrate_read_lock(zspage);
1195 class = zspage_class(pool, zspage);
1215 if (likely(!ZsHugePage(zspage)))
1224 struct zspage *zspage;
1234 zspage = get_zspage(page);
1235 class = zspage_class(pool, zspage);
1252 migrate_read_unlock(zspage);
1262 * or bigger size will be stored in zspage consisting of a single physical
1276 struct zspage *zspage, unsigned long handle)
1287 class = pool->size_class[zspage->class];
1289 obj = get_freeobj(zspage);
1294 m_page = get_first_page(zspage);
1301 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1302 if (likely(!ZsHugePage(zspage)))
1307 zspage->first_page->index = handle;
1310 mod_zspage_inuse(zspage, 1);
1333 struct zspage *zspage;
1351 zspage = find_get_zspage(class);
1352 if (likely(zspage)) {
1353 obj = obj_malloc(pool, zspage, handle);
1354 /* Now move the zspage to another fullness group, if required */
1355 fix_fullness_group(class, zspage);
1364 zspage = alloc_zspage(pool, class, gfp);
1365 if (!zspage) {
1371 obj = obj_malloc(pool, zspage, handle);
1372 newfg = get_fullness_group(class, zspage);
1373 insert_zspage(class, zspage, newfg);
1379 /* We completely set up zspage so mark them as movable */
1380 SetZsPageMovable(pool, zspage);
1391 struct zspage *zspage;
1399 zspage = get_zspage(f_page);
1404 /* Insert this object in containing zspage's freelist */
1405 if (likely(!ZsHugePage(zspage)))
1406 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1409 set_freeobj(zspage, f_objidx);
1412 mod_zspage_inuse(zspage, -1);
1417 struct zspage *zspage;
1433 zspage = get_zspage(f_page);
1434 class = zspage_class(pool, zspage);
1439 fullness = fix_fullness_group(class, zspage);
1441 free_zspage(pool, class, zspage);
1519 * Find alloced object in zspage from index object and
1548 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
1549 struct zspage *dst_zspage)
1584 static struct zspage *isolate_src_zspage(struct size_class *class)
1586 struct zspage *zspage;
1590 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1591 struct zspage, list);
1592 if (zspage) {
1593 remove_zspage(class, zspage);
1594 return zspage;
1598 return zspage;
1601 static struct zspage *isolate_dst_zspage(struct size_class *class)
1603 struct zspage *zspage;
1607 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1608 struct zspage, list);
1609 if (zspage) {
1610 remove_zspage(class, zspage);
1611 return zspage;
1615 return zspage;
1619 * putback_zspage - add @zspage into right class's fullness list
1621 * @zspage: target page
1623 * Return @zspage's fullness status
1625 static int putback_zspage(struct size_class *class, struct zspage *zspage)
1629 fullness = get_fullness_group(class, zspage);
1630 insert_zspage(class, zspage, fullness);
1637 * To prevent zspage destroy during migration, zspage freeing should
1638 * hold locks of all pages in the zspage.
1640 static void lock_zspage(struct zspage *zspage)
1648 * may no longer belong to the zspage. This means that we may wait for
1653 migrate_read_lock(zspage);
1654 page = get_first_page(zspage);
1658 migrate_read_unlock(zspage);
1669 migrate_read_unlock(zspage);
1672 migrate_read_lock(zspage);
1675 migrate_read_unlock(zspage);
1679 static void migrate_lock_init(struct zspage *zspage)
1681 rwlock_init(&zspage->lock);
1684 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1686 read_lock(&zspage->lock);
1689 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1691 read_unlock(&zspage->lock);
1694 static void migrate_write_lock(struct zspage *zspage)
1696 write_lock(&zspage->lock);
1699 static void migrate_write_unlock(struct zspage *zspage)
1701 write_unlock(&zspage->lock);
1708 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1715 page = get_first_page(zspage);
1724 create_page_chain(class, zspage, pages);
1726 if (unlikely(ZsHugePage(zspage)))
1734 * Page is locked so zspage couldn't be destroyed. For detail, look at
1747 struct zspage *zspage;
1766 zspage = get_zspage(page);
1767 pool = zspage->pool;
1774 class = zspage_class(pool, zspage);
1777 migrate_write_lock(zspage);
1783 * Here, any user cannot access all objects in the zspage so let's move.
1802 replace_sub_page(class, zspage, newpage, page);
1804 * Since we complete the data copy and set up new zspage structure,
1808 migrate_write_unlock(zspage);
1834 * Caller should hold page_lock of all pages in the zspage
1835 * In here, we cannot use zspage meta data.
1841 struct zspage *zspage, *tmp;
1857 list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
1858 list_del(&zspage->list);
1859 lock_zspage(zspage);
1862 class = zspage_class(pool, zspage);
1863 __free_zspage(pool, class, zspage);
1883 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
1885 struct page *page = get_first_page(zspage);
1920 struct zspage *src_zspage = NULL;
1921 struct zspage *dst_zspage = NULL;