Searched refs:buckets (Results 26 - 50 of 100) sorted by relevance

1234

/linux-master/tools/perf/
H A Dbuiltin-ftrace.c685 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf, argument
742 buckets[i]++;
753 static void display_histogram(int buckets[], bool use_nsec) argument
762 total += buckets[i];
772 bar_len = buckets[0] * bar_total / total;
774 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
786 bar_len = buckets[i] * bar_total / total;
788 start, stop, unit, buckets[i], bar_len, bar,
792 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total;
794 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKE
861 read_func_latency(struct perf_ftrace *ftrace, int buckets[]) argument
886 int buckets[NUM_BUCKET] = { }; local
[all...]
/linux-master/fs/bcachefs/
H A Dbuckets_types.h37 u64 buckets; member in struct:bch_dev_usage::__anon14
41 * Why do we have this? Isn't it just buckets * bucket_size -
H A Dalloc_background.h7 #include "buckets.h"
242 u.d[BCH_DATA_free].buckets
243 + u.d[BCH_DATA_need_discard].buckets
246 return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
H A DMakefile25 buckets.o \
H A Djournal_reclaim.c7 #include "buckets.h"
82 unsigned sectors, buckets, unwritten; local
91 buckets = bch2_journal_dev_buckets_available(j, ja, from);
111 if (!buckets) {
116 buckets--;
123 if (sectors < ca->mi.bucket_size && buckets) {
124 buckets--;
130 .total = sectors + buckets * ca->mi.bucket_size,
255 * Advance ja->discard_idx as long as it points to buckets that are no longer
273 ja->buckets[j
[all...]
/linux-master/kernel/bpf/
H A Dbpf_local_storage.c24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
751 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
781 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
785 smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
787 if (!smap->buckets) {
793 INIT_HLIST_HEAD(&smap->buckets[i].list);
794 raw_spin_lock_init(&smap->buckets[i].lock);
817 kvfree(smap->buckets);
850 b = &smap->buckets[
[all...]
H A Dstackmap.c31 struct stack_map_bucket *buckets[] __counted_by(n_buckets);
233 bucket = READ_ONCE(smap->buckets[id]);
277 old_bucket = xchg(&smap->buckets[id], new_bucket);
587 bucket = xchg(&smap->buckets[id], NULL);
595 old_bucket = xchg(&smap->buckets[id], bucket);
614 if (id >= smap->n_buckets || !smap->buckets[id])
620 while (id < smap->n_buckets && !smap->buckets[id])
646 old_bucket = xchg(&smap->buckets[id], NULL);
/linux-master/drivers/md/
H A Ddm-region-hash.c73 struct list_head *buckets; member in struct:dm_region_hash
179 * Calculate a suitable number of buckets for our hash
209 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
210 if (!rh->buckets) {
217 INIT_LIST_HEAD(rh->buckets + i);
231 vfree(rh->buckets);
247 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
258 vfree(rh->buckets);
277 struct list_head *bucket = rh->buckets
[all...]
/linux-master/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dpno.c298 struct brcmf_gscan_bucket_config **buckets,
323 *buckets = NULL;
355 *buckets = fw_buckets;
396 struct brcmf_gscan_bucket_config *buckets; local
403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
437 memcpy(gscan_cfg->bucket, buckets,
438 array_size(n_buckets, sizeof(*buckets)));
463 kfree(buckets);
296 brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi, struct brcmf_pno_config_le *pno_cfg, struct brcmf_gscan_bucket_config **buckets, u32 *scan_freq) argument
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lag/
H A Dlag.h61 u8 buckets; member in struct:mlx5_lag
H A Dport_sel.c51 ft_attr.max_fte = ldev->ports * ldev->buckets;
78 for (j = 0; j < ldev->buckets; j++) {
81 idx = i * ldev->buckets + j;
346 for (j = 0; j < ldev->buckets; j++) {
347 idx = i * ldev->buckets + j;
569 for (j = 0; j < ldev->buckets; j++) {
570 idx = i * ldev->buckets + j;
H A Dlag.c207 for (j = 0; j < ldev->buckets; j++) {
208 idx = i * ldev->buckets + j;
279 ldev->buckets = 1;
307 * As we have ldev->buckets slots per port first assume the native
314 u8 buckets,
334 /* Use native mapping by default where each port's buckets
338 for (j = 0; j < buckets; j++) {
339 idx = i * buckets + j;
350 for (j = 0; j < buckets; j++) {
352 ports[disabled[i] * buckets
312 mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, u8 num_ports, u8 buckets, u8 *ports) argument
[all...]
/linux-master/net/ceph/crush/
H A Dmapper.c492 /* choose through intervening buckets */
527 itemtype = map->buckets[-1-item]->type;
540 in = map->buckets[-1-item];
564 map->buckets[-1-item],
697 /* choose through intervening buckets */
741 itemtype = map->buckets[-1-item]->type;
758 in = map->buckets[-1-item];
778 map->buckets[-1-item],
865 if (!map->buckets[b])
869 switch (map->buckets[
[all...]
/linux-master/drivers/md/persistent-data/
H A Ddm-transaction-manager.c98 struct hlist_head buckets[DM_HASH_SIZE]; member in struct:dm_transaction_manager
112 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
136 hlist_add_head(&si->hlist, tm->buckets + bucket);
150 bucket = tm->buckets + i;
179 INIT_HLIST_HEAD(tm->buckets + i);
/linux-master/fs/nfs/
H A Dnfs42xattr.c49 * 64 buckets is a good default. There is likely no reasonable
70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member in struct:nfs4_xattr_cache
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist);
112 spin_lock_init(&cache->buckets[i].lock);
113 cache->buckets[i].cache = cache;
114 cache->buckets[i].draining = false;
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
278 cache->buckets[i].draining = false;
369 * Mark all buckets as draining, so that no new entries are added. This
394 bucket = &cache->buckets[
[all...]
/linux-master/net/sched/
H A Dsch_hhf.c19 * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
22 * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
103 #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member in struct:hhf_sched_data
147 struct list_head new_buckets; /* list of new buckets */
148 struct list_head old_buckets; /* list of old buckets */
244 /* Assigns packets to WDRR buckets. Implements a multi-stage filter to
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_H
[all...]
/linux-master/block/
H A Dkyber-iosched.c88 * Requests latencies are recorded in a histogram with buckets defined relative
102 * The width of the latency histogram buckets is
107 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
111 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
134 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member in struct:kyber_cpu_latency
214 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; local
215 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
219 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
230 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; local
234 samples += buckets[bucke
[all...]
/linux-master/drivers/md/bcache/
H A Dalloc.c7 * Allocation in bcache is done in terms of buckets:
17 * of buckets on disk, with a pointer to them in the journal header.
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
31 * smaller freelist, and buckets on that list are always ready to be used.
36 * There is another freelist, because sometimes we have buckets that we know
38 * priorities to be rewritten. These come from freed btree nodes and buckets
40 * them (because they were overwritten). That's the unused list - buckets on the
57 * buckets are ready.
59 * invalidate_buckets_(lru|fifo)() find buckets tha
[all...]
H A Dbcache.h42 * To do this, we first divide the cache device up into buckets. A bucket is the
47 * it. The gens and priorities for all the buckets are stored contiguously and
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
49 * of bcache's metadata is stored in buckets).
56 * The generation is used for invalidating buckets. Each pointer also has an 8
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
113 * (If buckets are really big we'll only use part of the bucket for a btree node
143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
145 * allocation can reuse buckets sooner when they've been mostly overwritten.
434 * When allocating new buckets, prio_writ
458 struct bucket *buckets; member in struct:cache
[all...]
/linux-master/lib/
H A Drhashtable.c59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
158 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
164 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
184 tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
206 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
257 flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
260 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
264 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
/linux-master/net/openvswitch/
H A Dflow_table.h55 struct hlist_head *buckets; member in struct:table_instance
/linux-master/net/netfilter/
H A Dnft_set_hash.c457 /* Number of buckets is stored in u32, so cap our result to 1U<<31 */
482 u32 buckets; member in struct:nft_hash
502 hash = reciprocal_scale(hash, priv->buckets);
523 hash = reciprocal_scale(hash, priv->buckets);
544 hash = reciprocal_scale(hash, priv->buckets);
568 hash = reciprocal_scale(hash, priv->buckets);
649 for (i = 0; i < priv->buckets; i++) {
676 priv->buckets = nft_hash_buckets(desc->size);
690 for (i = 0; i < priv->buckets; i++) {
/linux-master/drivers/infiniband/ulp/ipoib/
H A Dipoib_main.c1313 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1359 struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1438 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1464 rcu_dereference_protected(htbl->buckets[hash_val],
1466 rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1523 np = &htbl->buckets[hash_val];
1548 struct ipoib_neigh __rcu **buckets; local
1557 buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
1558 if (!buckets) {
1581 struct ipoib_neigh __rcu **buckets = htbl->buckets; local
[all...]
/linux-master/include/linux/crush/
H A Dcrush.h100 * other buckets). Items within a bucket are chosen using one of a
190 * an item from the bucket __map->buckets[N]__ bucket, provided it
237 * CRUSH map includes all buckets, rules, etc.
240 struct crush_bucket **buckets; member in struct:crush_map
/linux-master/include/linux/
H A Dbpf_local_storage.h53 * multiple buckets to improve contention.
55 struct bpf_local_storage_map_bucket *buckets; member in struct:bpf_local_storage_map

Completed in 321 milliseconds

1234