Searched refs:buckets (Results 1 - 25 of 100) sorted by path

1234

/linux-master/block/
H A Dblk-stat.c86 for (bucket = 0; bucket < cb->buckets; bucket++)
93 for (bucket = 0; bucket < cb->buckets; bucket++) {
105 unsigned int buckets, void *data)
113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
130 cb->buckets = buckets;
147 for (bucket = 0; bucket < cb->buckets; bucket++)
103 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), int (*bucket_fn)(const struct request *), unsigned int buckets, void *data) argument
H A Dblk-stat.h16 * buckets by @bucket_fn and added to a per-cpu buffer, @cpu_stat. When the
31 * @cpu_stat: Per-cpu statistics buckets.
43 * @buckets: Number of statistics buckets.
45 unsigned int buckets; member in struct:blk_stat_callback
48 * @stat: Array of statistics buckets.
79 * @buckets: Number of statistics buckets.
89 unsigned int buckets, void *data);
H A Dkyber-iosched.c88 * Requests latencies are recorded in a histogram with buckets defined relative
102 * The width of the latency histogram buckets is
107 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
111 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
134 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member in struct:kyber_cpu_latency
214 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; local
215 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
219 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
230 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; local
234 samples += buckets[bucke
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_execbuffer.c313 struct hlist_head *buckets; /** ht for relocation handles */ member in struct:i915_execbuffer
363 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
365 if (eb->buckets)
579 &eb->buckets[hash_32(entry->handle,
1076 head = &eb->buckets[hash_32(handle, eb->lut_size)];
1110 kfree(eb->buckets);
/linux-master/drivers/gpu/drm/radeon/
H A Dradeon_cs.c75 /* Connect the sorted buckets in the output list. */
84 struct radeon_cs_buckets buckets; local
102 radeon_cs_buckets_init(&buckets);
188 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
192 radeon_cs_buckets_get_list(&buckets, &p->validated);
/linux-master/drivers/infiniband/ulp/ipoib/
H A Dipoib.h298 struct ipoib_neigh __rcu **buckets; member in struct:ipoib_neigh_hash
H A Dipoib_main.c1313 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1359 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1438 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1464 rcu_dereference_protected(htbl->buckets[hash_val],
1466 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1523 np = &htbl->buckets[hash_val];
1548 struct ipoib_neigh __rcu **buckets; local
1557 buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
1558 if (!buckets) {
1581 struct ipoib_neigh __rcu **buckets = htbl->buckets; local
[all...]
/linux-master/drivers/md/bcache/
H A Dalloc.c7 * Allocation in bcache is done in terms of buckets:
17 * of buckets on disk, with a pointer to them in the journal header.
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
31 * smaller freelist, and buckets on that list are always ready to be used.
36 * There is another freelist, because sometimes we have buckets that we know
38 * priorities to be rewritten. These come from freed btree nodes and buckets
40 * them (because they were overwritten). That's the unused list - buckets on the
57 * buckets are ready.
59 * invalidate_buckets_(lru|fifo)() find buckets tha
[all...]
H A Dbcache.h42 * To do this, we first divide the cache device up into buckets. A bucket is the
47 * it. The gens and priorities for all the buckets are stored contiguously and
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
49 * of bcache's metadata is stored in buckets).
56 * The generation is used for invalidating buckets. Each pointer also has an 8
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
113 * (If buckets are really big we'll only use part of the bucket for a btree node
143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
145 * allocation can reuse buckets sooner when they've been mostly overwritten.
434 * When allocating new buckets, prio_writ
458 struct bucket *buckets; member in struct:cache
[all...]
H A Dbtree.c6 * All allocation is done in buckets, which should match the erase block size
10 * bucket priority is increased on cache hit, and periodically all the buckets
83 * Add a sysfs tunable for the number of open data buckets
815 * Don't worry about the mca_rereserve buckets
1591 * would run out of the buckets (since no new bucket
1775 /* don't reclaim buckets to which writeback keys point */
1802 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1806 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
2132 * We need to put some unused buckets directly on the prio freelist in
2133 * order to get the allocator thread started - it needs freed buckets i
[all...]
H A Dsuper.c104 err = "Too many journal buckets";
108 err = "Too many buckets";
112 err = "Not enough buckets";
147 err = "Journal buckets not sequential";
152 err = "Too many journal buckets";
568 * buckets in suboptimal order.
570 * On disk they're stored in a packed array, and in as many buckets are required
571 * to fit them all. The buckets we use to store them form a list; the journal
576 * of buckets to allocate from) the allocation code will invalidate some
577 * buckets, bu
[all...]
H A Dsysfs.c1082 p[i] = ca->buckets[i].prio;
/linux-master/drivers/md/
H A Ddm-cache-policy-smq.c578 unsigned int *buckets; member in struct:smq_hash_table
593 ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
594 if (!ht->buckets)
598 ht->buckets[i] = INDEXER_NULL;
605 vfree(ht->buckets);
610 return to_entry(ht->es, ht->buckets[bucket]);
620 e->hash_next = ht->buckets[bucket];
621 ht->buckets[bucket] = to_index(ht->es, e);
653 ht->buckets[
[all...]
H A Ddm-region-hash.c73 struct list_head *buckets; member in struct:dm_region_hash
179 * Calculate a suitable number of buckets for our hash
209 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
210 if (!rh->buckets) {
217 INIT_LIST_HEAD(rh->buckets + i);
231 vfree(rh->buckets);
247 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
258 vfree(rh->buckets);
277 struct list_head *bucket = rh->buckets
[all...]
/linux-master/drivers/md/dm-vdo/
H A Dint-map.c15 * are stored in a fixed array of buckets, with no dynamic allocation for collisions. Unlike linear
23 * that process fails (typically when the buckets are around 90% full), the table must be resized
26 * Unlike linear probing, the number of buckets that must be searched in the worst case has a fixed
60 #define NEIGHBORHOOD 255 /* the number of buckets in each neighborhood */
70 * it's crucial to keep the hop fields near the buckets that they use them so they'll tend to share
91 * bucket array, we allocate a few more buckets at the end of the array instead, which is why
99 /* @bucket_count: The number of buckets in the bucket array. */
101 /** @buckets: The array of hash buckets. */
102 struct bucket *buckets; member in struct:int_map
[all...]
H A Dpriority-table.c21 * priority. The table is essentially an array of buckets.
33 * A priority table is an array of buckets, indexed by priority. New entries are added to the end
41 /* A bit vector flagging all buckets that are currently non-empty */
43 /* The array of all buckets, indexed by priority */
44 struct bucket buckets[]; member in struct:priority_table
69 struct bucket *bucket = &table->buckets[priority];
94 * Unlink the buckets from any entries still in the table so the entries won't be left with
116 list_del_init(&table->buckets[priority].queue);
134 list_move_tail(entry, &table->buckets[priority].queue);
162 /* All buckets ar
[all...]
/linux-master/drivers/md/persistent-data/
H A Ddm-transaction-manager.c98 struct hlist_head buckets[DM_HASH_SIZE]; member in struct:dm_transaction_manager
112 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
136 hlist_add_head(&si->hlist, tm->buckets + bucket);
150 bucket = tm->buckets + i;
179 INIT_HLIST_HEAD(tm->buckets + i);
/linux-master/drivers/media/v4l2-core/
H A Dv4l2-ctrls-core.c1543 hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
1545 hdl->error = hdl->buckets ? 0 : -ENOMEM;
1558 if (hdl == NULL || hdl->buckets == NULL)
1579 kvfree(hdl->buckets);
1580 hdl->buckets = NULL;
1633 ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
1730 new_ref->next = hdl->buckets[bucket];
1731 hdl->buckets[bucke
[all...]
/linux-master/drivers/message/fusion/
H A Dmptlan.c89 atomic_t buckets_out; /* number of unused buckets on IOC */
103 int max_buckets_out; /* Max buckets to send to IOC */
459 any buckets it still has. */
503 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
832 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
838 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
920 "IOC returned %d buckets, freeing them...\n", count));
950 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
953 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1010 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returne
1151 u32 curr, buckets, count, max; local
[all...]
/linux-master/drivers/net/
H A Damt.c418 int i, buckets; local
420 buckets = amt->hash_buckets;
430 for (i = 0; i < buckets; i++) {
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
H A Dipoib_vlan.c45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member in struct:mlx5i_pkey_qpn_ht
71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)];
99 hlist_add_head(&new_node->hlist, &ht->buckets[key]);
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lag/
H A Dlag.c207 for (j = 0; j < ldev->buckets; j++) {
208 idx = i * ldev->buckets + j;
279 ldev->buckets = 1;
307 * As we have ldev->buckets slots per port first assume the native
314 u8 buckets,
334 /* Use native mapping by default where each port's buckets
338 for (j = 0; j < buckets; j++) {
339 idx = i * buckets + j;
350 for (j = 0; j < buckets; j++) {
352 ports[disabled[i] * buckets
312 mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, u8 num_ports, u8 buckets, u8 *ports) argument
[all...]
H A Dlag.h61 u8 buckets; member in struct:mlx5_lag
H A Dport_sel.c51 ft_attr.max_fte = ldev->ports * ldev->buckets;
78 for (j = 0; j < ldev->buckets; j++) {
81 idx = i * ldev->buckets + j;
346 for (j = 0; j < ldev->buckets; j++) {
347 idx = i * ldev->buckets + j;
569 for (j = 0; j < ldev->buckets; j++) {
570 idx = i * ldev->buckets + j;
/linux-master/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dpno.c298 struct brcmf_gscan_bucket_config **buckets,
323 *buckets = NULL;
355 *buckets = fw_buckets;
396 struct brcmf_gscan_bucket_config *buckets; local
403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
437 memcpy(gscan_cfg->bucket, buckets,
438 array_size(n_buckets, sizeof(*buckets)));
463 kfree(buckets);
296 brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi, struct brcmf_pno_config_le *pno_cfg, struct brcmf_gscan_bucket_config **buckets, u32 *scan_freq) argument

Completed in 585 milliseconds

1234