Searched refs:pools (Results 1 - 25 of 35) sorted by last modified time

12

/linux-master/kernel/dma/
H A Dswiotlb.c91 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
310 list_add_rcu(&pool->node, &mem->pools);
618 * Allocate from the atomic pools if memory is encrypted and
781 list_for_each_entry_rcu(pool, &mem->pools, node) {
1125 * swiotlb_search_area() - search one memory area in all pools
1135 * Search one memory area in all pools for a sequence of slots that match the
1150 list_for_each_entry_rcu(pool, &mem->pools, node) {
1240 * Second, the load from mem->pools must be ordered before the same
1331 list_for_each_entry_rcu(pool, &mem->pools, nod
[all...]
/linux-master/kernel/
H A Dworkqueue.c20 * automatically managed. There are two worker pools for each CPU (one for
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
105 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
224 int refcnt; /* PL: refcnt for unbound pools */
433 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
469 /* to raise softirq for the BH worker pools on other CPUs */
473 /* the BH worker pools */
477 /* the per-cpu worker pools */
481 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
5354 struct worker_pool __percpu *pools; local
[all...]
/linux-master/drivers/md/
H A Ddm.c3165 void dm_free_md_mempools(struct dm_md_mempools *pools) argument
3167 if (!pools)
3170 bioset_exit(&pools->bs);
3171 bioset_exit(&pools->io_bs);
3173 kfree(pools);
H A Ddm-thin.c522 * A global list of pools that uses a struct mapped_device as a key.
526 struct list_head pools; member in struct:dm_thin_pool_table
532 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
543 list_add(&pool->list, &dm_thin_pool_table.pools);
558 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
574 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
H A Ddm-table.c1024 struct dm_md_mempools *pools; local
1031 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
1032 if (!pools)
1053 if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
1057 bioset_integrity_create(&pools->io_bs, pool_size))
1060 if (bioset_init(&pools->bs, pool_size, front_pad, 0))
1063 bioset_integrity_create(&pools->bs, pool_size))
1066 t->mempools = pools;
1070 dm_free_md_mempools(pools);
[all...]
H A Ddm.h224 void dm_free_md_mempools(struct dm_md_mempools *pools);
/linux-master/arch/powerpc/kernel/
H A Diommu.c100 * The hash is important to spread CPUs across all the pools. For example,
102 * with 4 pools all primary threads would map to the same pool.
252 pool = &(tbl->pools[pool_nr]);
280 pool = &(tbl->pools[0]);
299 /* Now try scanning all the other pools */
302 pool = &tbl->pools[pool_nr];
429 p = &tbl->pools[pool_nr];
755 p = &tbl->pools[i];
1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1132 spin_unlock(&tbl->pools[
[all...]
/linux-master/drivers/soc/fsl/qbman/
H A Dqman.c1763 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) argument
1768 pools &= p->config->pools;
1769 p->sdqcr |= pools;
/linux-master/include/linux/
H A Dswiotlb.h109 * @nslabs: Total number of IO TLB slabs in all pools.
113 * @can_grow: %true if more pools can be allocated dynamically.
116 * @pools: List of IO TLB memory pool descriptors (if dynamic).
123 * @transient_nslabs: The total number of slots in all transient pools that
136 struct list_head pools; member in struct:io_tlb_mem
185 * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
/linux-master/tools/net/ynl/samples/
H A Dpage-pool.c60 struct netdev_page_pool_get_list *pools; local
76 pools = netdev_page_pool_get_dump(ys);
77 if (!pools)
80 ynl_dump_foreach(pools, pp) {
87 netdev_page_pool_get_list_free(pools);
124 printf("page pools: %u (zombies: %u)\n",
/linux-master/drivers/net/ethernet/wangxun/libwx/
H A Dwx_type.h745 u64 pools; member in struct:wx_mac_addr
H A Dwx_hw.c599 * @pools: VMDq "set" or "pool" index
604 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, argument
620 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
622 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
797 wx->mac_table[i].pools,
811 wx->mac_table[0].pools = 1ULL;
814 wx->mac_table[0].pools,
830 wx->mac_table[i].pools = 0;
846 if (wx->mac_table[i].pools != (1ULL << pool)) {
848 wx->mac_table[i].pools |
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dpool.c25 if (!xsk->pools) {
26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
27 sizeof(*xsk->pools), GFP_KERNEL);
28 if (unlikely(!xsk->pools))
41 kfree(xsk->pools);
42 xsk->pools = NULL;
54 xsk->pools[ix] = pool;
60 xsk->pools[ix] = NULL;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den.h855 /* XSK buffer pools are stored separately from channels,
861 struct xsk_buff_pool **pools; member in struct:mlx5e_xsk
/linux-master/drivers/net/ethernet/freescale/dpaa2/
H A Ddpaa2-switch.c2718 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2719 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2720 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
H A Ddpaa2-eth.c1821 * pools.
4369 pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
4370 pools_params.pools[0].backup_pool = 0;
4371 pools_params.pools[0].buffer_size = priv->rx_buf_size;
H A Ddpni.h24 * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI
99 * struct dpni_pools_cfg - Structure representing buffer pools configuration
103 * @pools: Array of buffer pools parameters; The number of valid entries
105 * @pools.dpbp_id: DPBP object ID
106 * @pools.priority: Priority mask that indicates TC's used with this buffer.
108 * @pools.buffer_size: Buffer size
109 * @pools.backup_pool: Backup pool
119 } pools[DPNI_MAX_DPBP]; member in struct:dpni_pools_cfg
H A Ddpni.c150 * dpni_set_pools() - Set buffer pools configuration
154 * @cfg: Buffer pools configuration
179 cpu_to_le16(cfg->pools[i].dpbp_id);
181 cfg->pools[i].priority_mask;
183 cpu_to_le16(cfg->pools[i].buffer_size);
185 DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
H A Ddpaa2-xsk.c162 pools_params->pools[curr_bp].priority_mask |= (1 << j);
163 if (!pools_params->pools[curr_bp].priority_mask)
166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
168 pools_params->pools[curr_bp++].backup_pool = 0;
/linux-master/mm/
H A Ddmapool.c60 struct list_head pools; member in struct:dma_pool
80 list_for_each_entry(pool, &dev->dma_pools, pools) {
92 static DEVICE_ATTR_RO(pools);
210 * Given one of these pools, dma_pool_alloc()
267 INIT_LIST_HEAD(&retval->pools);
280 list_add(&retval->pools, &dev->dma_pools);
288 list_del(&retval->pools);
370 list_del(&pool->pools);
/linux-master/arch/sparc/kernel/
H A Dpci_sun4v.c718 pool = &(iommu->pools[pool_nr]);
/linux-master/drivers/soc/ti/
H A Dknav_qmss_queue.c815 /* Region maintains a sorted (by region offset) list of pools
820 node = &region->pools;
821 list_for_each_entry(iter, &region->pools, region_inst) {
835 list_add_tail(&pool->list, &kdev->pools);
1037 list_add(&pool->region_inst, &region->pools);
1121 INIT_LIST_HEAD(&region->pools);
1359 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1781 INIT_LIST_HEAD(&kdev->pools);
1827 queue_pools = of_get_child_by_name(node, "queue-pools");
1829 dev_err(dev, "queue-pools no
[all...]
/linux-master/arch/powerpc/include/asm/
H A Diommu.h108 struct iommu_pool pools[IOMMU_NR_POOLS]; member in struct:iommu_table
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_arg.c28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member in struct:mlx5dr_arg_mgr
201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
246 if (!pool_mgr->pools[i])
254 dr_arg_pool_destroy(pool_mgr->pools[i]);
262 struct dr_arg_pool **pools; local
268 pools = mgr->pools;
270 dr_arg_pool_destroy(pools[
[all...]
/linux-master/include/soc/fsl/
H A Dqman.h940 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
943 * (SDQCR). The requested pools are limited to those the portal has dequeue
946 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);

Completed in 450 milliseconds

12