Lines Matching refs:pcp

62 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
109 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
563 if (pcp_allowed_order(order)) /* Via pcp? */
1189 struct per_cpu_pages *pcp,
1201 count = min(pcp->count, count);
1217 list = &pcp->lists[pindex];
1228 /* must delete to avoid corrupting pcp list */
1231 pcp->count -= nr_pages;
2164 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2169 high_min = READ_ONCE(pcp->high_min);
2170 batch = READ_ONCE(pcp->batch);
2172 * Decrease pcp->high periodically to try to free possible
2174 * control latency. This caps pcp->high decrement too.
2176 if (pcp->high > high_min) {
2177 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2178 pcp->high - (pcp->high >> 3), high_min);
2179 if (pcp->high > high_min)
2183 to_drain = pcp->count - pcp->high;
2185 spin_lock(&pcp->lock);
2186 free_pcppages_bulk(zone, to_drain, pcp, 0);
2187 spin_unlock(&pcp->lock);
2200 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2204 batch = READ_ONCE(pcp->batch);
2205 to_drain = min(pcp->count, batch);
2207 spin_lock(&pcp->lock);
2208 free_pcppages_bulk(zone, to_drain, pcp, 0);
2209 spin_unlock(&pcp->lock);
2219 struct per_cpu_pages *pcp;
2221 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2222 if (pcp->count) {
2223 spin_lock(&pcp->lock);
2224 free_pcppages_bulk(zone, pcp->count, pcp, 0);
2225 spin_unlock(&pcp->lock);
2260 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2292 struct per_cpu_pages *pcp;
2298 * The pcp.count check is racy, some callers need a
2303 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2304 if (pcp->count)
2308 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2309 if (pcp->count) {
2355 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
2361 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
2367 /* Leave at least pcp->batch pages on the list */
2375 batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
2380 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2385 high_min = READ_ONCE(pcp->high_min);
2386 high_max = READ_ONCE(pcp->high_max);
2387 high = pcp->high = clamp(pcp->high, high_min, high_max);
2393 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2400 * stored on pcp lists
2403 int free_count = max_t(int, pcp->free_count, batch);
2405 pcp->high = max(high - free_count, high_min);
2406 return min(batch << 2, pcp->high);
2413 int free_count = max_t(int, pcp->free_count, batch);
2415 pcp->high = max(high - free_count, high_min);
2416 high = max(pcp->count, high_min);
2417 } else if (pcp->count >= high) {
2418 int need_high = pcp->free_count + batch;
2420 /* pcp->high should be large enough to hold batch freed pages */
2421 if (pcp->high < need_high)
2422 pcp->high = clamp(need_high, high_min, high_max);
2428 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2441 pcp->alloc_factor >>= 1;
2444 list_add(&page->pcp_list, &pcp->lists[pindex]);
2445 pcp->count += 1 << order;
2447 batch = READ_ONCE(pcp->batch);
2455 free_high = (pcp->free_count >= batch &&
2456 (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2457 (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
2458 pcp->count >= READ_ONCE(batch)));
2459 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2460 } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2461 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2463 if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2464 pcp->free_count += (1 << order);
2465 high = nr_pcp_high(pcp, zone, batch, free_high);
2466 if (pcp->count >= high) {
2467 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high),
2468 pcp, pindex);
2477 * Free a pcp page
2482 struct per_cpu_pages *pcp;
2491 * We only track unmovable, reclaimable and movable on pcp lists.
2508 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2509 if (pcp) {
2510 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2511 pcp_spin_unlock(pcp);
2524 struct per_cpu_pages *pcp = NULL;
2565 /* Different zone requires a different pcp lock */
2567 if (pcp) {
2568 pcp_spin_unlock(pcp);
2577 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2578 if (unlikely(!pcp)) {
2591 * to the MIGRATE_MOVABLE pcp list.
2597 free_unref_page_commit(zone, pcp, &folio->page, migratetype,
2601 if (pcp) {
2602 pcp_spin_unlock(pcp);
2761 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
2766 base_batch = READ_ONCE(pcp->batch);
2767 high_min = READ_ONCE(pcp->high_min);
2768 high_max = READ_ONCE(pcp->high_max);
2769 high = pcp->high = clamp(pcp->high, high_min, high_max);
2778 batch = (base_batch << pcp->alloc_factor);
2781 * If we had larger pcp->high, we could avoid to allocate from
2785 high = pcp->high = min(high + batch, high_max);
2788 max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
2794 pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
2795 pcp->alloc_factor++;
2816 struct per_cpu_pages *pcp,
2823 int batch = nr_pcp_alloc(pcp, zone, order);
2830 pcp->count += alloced << order;
2837 pcp->count -= 1 << order;
2848 struct per_cpu_pages *pcp;
2855 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2856 if (!pcp) {
2866 pcp->free_count >>= 1;
2867 list = &pcp->lists[order_to_pindex(migratetype, order)];
2868 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2869 pcp_spin_unlock(pcp);
3248 * watermark. If so, we will decrease pcp->high and free
4396 struct per_cpu_pages *pcp;
4476 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4477 if (!pcp)
4481 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
4491 pcp, pcp_list);
4495 pcp_spin_unlock(pcp);
4510 pcp_spin_unlock(pcp);
5235 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5426 * By default, the high value of the pcp is based on the zone
5446 * prematurely due to pages stored on pcp lists.
5466 * pcp->high and pcp->batch values are related and generally batch is lower
5467 * than high. They are also related to pcp->count such that count is lower
5473 * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
5475 * fully trust only the pcp->count field on the local CPU with interrupts
5482 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
5485 WRITE_ONCE(pcp->batch, batch);
5486 WRITE_ONCE(pcp->high_min, high_min);
5487 WRITE_ONCE(pcp->high_max, high_max);
5490 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
5494 memset(pcp, 0, sizeof(*pcp));
5497 spin_lock_init(&pcp->lock);
5499 INIT_LIST_HEAD(&pcp->lists[pindex]);
5507 pcp->high_min = BOOT_PAGESET_HIGH;
5508 pcp->high_max = BOOT_PAGESET_HIGH;
5509 pcp->batch = BOOT_PAGESET_BATCH;
5510 pcp->free_count = 0;
5516 struct per_cpu_pages *pcp;
5520 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5521 pageset_update(pcp, high_min, high_max, batch);
5571 struct per_cpu_pages *pcp;
5574 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5576 per_cpu_pages_init(pcp, pzstats);
5595 struct per_cpu_pages *pcp;
5598 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5601 * If data cache slice of CPU is large enough, "pcp->batch"
5607 spin_lock(&pcp->lock);
5608 if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
5609 pcp->flags |= PCPF_FREE_HIGH_BATCH;
5611 pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
5612 spin_unlock(&pcp->lock);
5765 "mm/page_alloc:pcp",
6109 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6127 /* Sanity checking to avoid pcp imbalance */