Lines Matching defs:pool

38 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
40 #define recycle_stat_inc(pool, __stat) \
42 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
46 #define recycle_stat_add(pool, __stat, val) \
48 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
67 * page_pool_get_stats() - fetch page pool stats
68 * @pool: pool from which page was allocated
77 bool page_pool_get_stats(const struct page_pool *pool,
86 stats->alloc_stats.fast += pool->alloc_stats.fast;
87 stats->alloc_stats.slow += pool->alloc_stats.slow;
88 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
89 stats->alloc_stats.empty += pool->alloc_stats.empty;
90 stats->alloc_stats.refill += pool->alloc_stats.refill;
91 stats->alloc_stats.waive += pool->alloc_stats.waive;
95 per_cpu_ptr(pool->recycle_stats, cpu);
148 #define alloc_stat_inc(pool, __stat)
149 #define recycle_stat_inc(pool, __stat)
150 #define recycle_stat_add(pool, __stat, val)
153 static bool page_pool_producer_lock(struct page_pool *pool)
154 __acquires(&pool->ring.producer_lock)
159 spin_lock(&pool->ring.producer_lock);
161 spin_lock_bh(&pool->ring.producer_lock);
166 static void page_pool_producer_unlock(struct page_pool *pool,
168 __releases(&pool->ring.producer_lock)
171 spin_unlock(&pool->ring.producer_lock);
173 spin_unlock_bh(&pool->ring.producer_lock);
176 static int page_pool_init(struct page_pool *pool,
182 memcpy(&pool->p, &params->fast, sizeof(pool->p));
183 memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
185 pool->cpuid = cpuid;
188 if (pool->p.flags & ~(PP_FLAG_ALL))
191 if (pool->p.pool_size)
192 ring_qsize = pool->p.pool_size;
202 if (pool->p.flags & PP_FLAG_DMA_MAP) {
203 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
204 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
208 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
212 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
215 if (!pool->p.max_len)
218 /* pool->p.offset has to be set according to the address
223 pool->has_init_callback = !!pool->slow.init_callback;
226 if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
227 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
228 if (!pool->recycle_stats)
231 /* For system page pool instance we use a singular stats object
233 * (also percpu) page pool instance.
235 pool->recycle_stats = &pp_system_recycle_stats;
239 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
241 if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
242 free_percpu(pool->recycle_stats);
247 atomic_set(&pool->pages_state_release_cnt, 0);
250 refcount_set(&pool->user_cnt, 1);
252 if (pool->p.flags & PP_FLAG_DMA_MAP)
253 get_device(pool->p.dev);
258 static void page_pool_uninit(struct page_pool *pool)
260 ptr_ring_cleanup(&pool->ring, NULL);
262 if (pool->p.flags & PP_FLAG_DMA_MAP)
263 put_device(pool->p.dev);
266 if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
267 free_percpu(pool->recycle_stats);
272 * page_pool_create_percpu() - create a page pool for a given cpu.
279 struct page_pool *pool;
282 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
283 if (!pool)
286 err = page_pool_init(pool, params, cpuid);
290 err = page_pool_list(pool);
294 return pool;
297 page_pool_uninit(pool);
300 kfree(pool);
306 * page_pool_create() - create a page pool
315 static void page_pool_return_page(struct page_pool *pool, struct page *page);
318 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
320 struct ptr_ring *r = &pool->ring;
326 alloc_stat_inc(pool, empty);
334 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
336 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
347 pool->alloc.cache[pool->alloc.count++] = page;
354 page_pool_return_page(pool, page);
355 alloc_stat_inc(pool, waive);
359 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
362 if (likely(pool->alloc.count > 0)) {
363 page = pool->alloc.cache[--pool->alloc.count];
364 alloc_stat_inc(pool, refill);
371 static struct page *__page_pool_get_cached(struct page_pool *pool)
376 if (likely(pool->alloc.count)) {
378 page = pool->alloc.cache[--pool->alloc.count];
379 alloc_stat_inc(pool, fast);
381 page = page_pool_refill_alloc_cache(pool);
387 static void page_pool_dma_sync_for_device(const struct page_pool *pool,
393 dma_sync_size = min(dma_sync_size, pool->p.max_len);
394 dma_sync_single_range_for_device(pool->p.dev, dma_addr,
395 pool->p.offset, dma_sync_size,
396 pool->p.dma_dir);
399 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
406 * This mapping is kept for lifetime of page, until leaving pool.
408 dma = dma_map_page_attrs(pool->p.dev, page, 0,
409 (PAGE_SIZE << pool->p.order),
410 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
412 if (dma_mapping_error(pool->p.dev, dma))
418 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
419 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
425 dma_unmap_page_attrs(pool->p.dev, dma,
426 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
431 static void page_pool_set_pp_info(struct page_pool *pool,
434 page->pp = pool;
444 if (pool->has_init_callback)
445 pool->slow.init_callback(page, pool->slow.init_arg);
454 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
460 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
464 if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
465 unlikely(!page_pool_dma_map(pool, page))) {
470 alloc_stat_inc(pool, slow_high_order);
471 page_pool_set_pp_info(pool, page);
474 pool->pages_state_hold_cnt++;
475 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
481 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
485 unsigned int pp_flags = pool->p.flags;
486 unsigned int pp_order = pool->p.order;
492 return __page_pool_alloc_page_order(pool, gfp);
495 if (unlikely(pool->alloc.count > 0))
496 return pool->alloc.cache[--pool->alloc.count];
499 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
501 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
502 pool->alloc.cache);
510 page = pool->alloc.cache[i];
512 unlikely(!page_pool_dma_map(pool, page))) {
517 page_pool_set_pp_info(pool, page);
518 pool->alloc.cache[pool->alloc.count++] = page;
520 pool->pages_state_hold_cnt++;
521 trace_page_pool_state_hold(pool, page,
522 pool->pages_state_hold_cnt);
526 if (likely(pool->alloc.count > 0)) {
527 page = pool->alloc.cache[--pool->alloc.count];
528 alloc_stat_inc(pool, slow);
540 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
545 page = __page_pool_get_cached(pool);
550 page = __page_pool_alloc_pages_slow(pool, gfp);
561 s32 page_pool_inflight(const struct page_pool *pool, bool strict)
563 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
564 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
570 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
581 void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
585 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
593 /* When page is unmapped, it cannot be returned to our pool */
594 dma_unmap_page_attrs(pool->p.dev, dma,
595 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
605 void page_pool_return_page(struct page_pool *pool, struct page *page)
609 __page_pool_release_page_dma(pool, page);
613 /* This may be the last page returned, releasing the pool, so
614 * it is not safe to reference pool afterwards.
616 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
617 trace_page_pool_state_release(pool, page, count);
620 /* An optimization would be to call __free_pages(page, pool->p.order)
626 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
631 ret = ptr_ring_produce(&pool->ring, page);
633 ret = ptr_ring_produce_bh(&pool->ring, page);
636 recycle_stat_inc(pool, ring);
649 struct page_pool *pool)
651 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
652 recycle_stat_inc(pool, cache_full);
657 pool->alloc.cache[pool->alloc.count++] = page;
658 recycle_stat_inc(pool, cached);
669 * the configured size min(dma_sync_size, pool->max_len).
674 __page_pool_put_page(struct page_pool *pool, struct page *page,
691 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
692 page_pool_dma_sync_for_device(pool, page,
695 if (allow_direct && page_pool_recycle_in_cache(page, pool))
714 recycle_stat_inc(pool, released_refcnt);
715 page_pool_return_page(pool, page);
720 static bool page_pool_napi_local(const struct page_pool *pool)
735 if (READ_ONCE(pool->cpuid) == cpuid)
738 napi = READ_ONCE(pool->p.napi);
743 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
747 allow_direct = page_pool_napi_local(pool);
749 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
750 if (page && !page_pool_recycle_in_ring(pool, page)) {
752 recycle_stat_inc(pool, ring_full);
753 page_pool_return_page(pool, page);
760 * @pool: pool from which pages were allocated
773 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
780 allow_direct = page_pool_napi_local(pool);
789 page = __page_pool_put_page(pool, page, -1, allow_direct);
799 in_softirq = page_pool_producer_lock(pool);
801 if (__ptr_ring_produce(&pool->ring, data[i])) {
803 recycle_stat_inc(pool, ring_full);
807 recycle_stat_add(pool, ring, i);
808 page_pool_producer_unlock(pool, in_softirq);
818 page_pool_return_page(pool, data[i]);
822 static struct page *page_pool_drain_frag(struct page_pool *pool,
825 long drain_count = BIAS_MAX - pool->frag_users;
832 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
833 page_pool_dma_sync_for_device(pool, page, -1);
838 page_pool_return_page(pool, page);
842 static void page_pool_free_frag(struct page_pool *pool)
844 long drain_count = BIAS_MAX - pool->frag_users;
845 struct page *page = pool->frag_page;
847 pool->frag_page = NULL;
852 page_pool_return_page(pool, page);
855 struct page *page_pool_alloc_frag(struct page_pool *pool,
859 unsigned int max_size = PAGE_SIZE << pool->p.order;
860 struct page *page = pool->frag_page;
866 *offset = pool->frag_offset;
869 page = page_pool_drain_frag(pool, page);
871 alloc_stat_inc(pool, fast);
877 page = page_pool_alloc_pages(pool, gfp);
879 pool->frag_page = NULL;
883 pool->frag_page = page;
886 pool->frag_users = 1;
888 pool->frag_offset = size;
893 pool->frag_users++;
894 pool->frag_offset = *offset + size;
895 alloc_stat_inc(pool, fast);
900 static void page_pool_empty_ring(struct page_pool *pool)
905 while ((page = ptr_ring_consume_bh(&pool->ring))) {
911 page_pool_return_page(pool, page);
915 static void __page_pool_destroy(struct page_pool *pool)
917 if (pool->disconnect)
918 pool->disconnect(pool);
920 page_pool_unlist(pool);
921 page_pool_uninit(pool);
922 kfree(pool);
925 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
929 if (pool->destroy_cnt)
936 while (pool->alloc.count) {
937 page = pool->alloc.cache[--pool->alloc.count];
938 page_pool_return_page(pool, page);
942 static void page_pool_scrub(struct page_pool *pool)
944 page_pool_empty_alloc_cache_once(pool);
945 pool->destroy_cnt++;
950 page_pool_empty_ring(pool);
953 static int page_pool_release(struct page_pool *pool)
957 page_pool_scrub(pool);
958 inflight = page_pool_inflight(pool, true);
960 __page_pool_destroy(pool);
968 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
972 inflight = page_pool_release(pool);
977 netdev = READ_ONCE(pool->slow.netdev);
978 if (time_after_eq(jiffies, pool->defer_warn) &&
980 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
982 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
983 __func__, pool->user.id, inflight, sec);
984 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
988 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
991 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
994 refcount_inc(&pool->user_cnt);
995 pool->disconnect = disconnect;
996 pool->xdp_mem_id = mem->id;
999 static void page_pool_disable_direct_recycling(struct page_pool *pool)
1001 /* Disable direct recycling based on pool->cpuid.
1004 WRITE_ONCE(pool->cpuid, -1);
1006 if (!pool->p.napi)
1010 * pool and NAPI are unlinked when NAPI is disabled.
1012 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
1013 READ_ONCE(pool->p.napi->list_owner) != -1);
1015 WRITE_ONCE(pool->p.napi, NULL);
1018 void page_pool_destroy(struct page_pool *pool)
1020 if (!pool)
1023 if (!page_pool_put(pool))
1026 page_pool_disable_direct_recycling(pool);
1027 page_pool_free_frag(pool);
1029 if (!page_pool_release(pool))
1032 page_pool_detached(pool);
1033 pool->defer_start = jiffies;
1034 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1036 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
1037 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1042 void page_pool_update_nid(struct page_pool *pool, int new_nid)
1046 trace_page_pool_update_nid(pool, new_nid);
1047 pool->p.nid = new_nid;
1049 /* Flush pool alloc cache, as refill will check NUMA node */
1050 while (pool->alloc.count) {
1051 page = pool->alloc.cache[--pool->alloc.count];
1052 page_pool_return_page(pool, page);