/linux-master/fs/nfs/ |
H A D | internal.h | 566 pnfs_bucket_clear_pnfs_ds_commit_verifiers(struct pnfs_commit_bucket *buckets, argument 572 buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW; 581 pnfs_bucket_clear_pnfs_ds_commit_verifiers(array->buckets,
|
/linux-master/kernel/trace/ |
H A D | trace_events_hist.c | 160 unsigned long buckets; member in struct:hist_field 296 unsigned long buckets = hist_field->buckets; local 300 if (WARN_ON_ONCE(!buckets)) 304 val = div64_ul(val, buckets); 306 val = (u64)((unsigned long)val / buckets); 307 return val * buckets; 1712 flags_str = "buckets"; 2295 char *field_str, unsigned long *flags, unsigned long *buckets) 2338 ret = kstrtoul(modifier, 0, buckets); 2294 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, char *field_str, unsigned long *flags, unsigned long *buckets) argument 2451 unsigned long buckets = 0; local 5402 unsigned long buckets = key_field->buckets; local [all...] |
H A D | trace.h | 839 struct hlist_head *buckets; member in struct:ftrace_hash
|
/linux-master/net/ceph/ |
H A D | osdmap.c | 389 arg->ids_size != c->buckets[bucket_index]->size) 414 if (!c->buckets[b]) 417 switch (c->buckets[b]->alg) { 427 c->working_size += c->buckets[b]->size * sizeof(__u32); 467 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 468 if (c->buckets == NULL) 474 /* buckets */ 482 c->buckets[i] = NULL; 508 b = c->buckets[ [all...] |
/linux-master/net/core/ |
H A D | neighbour.c | 65 Neighbour hash table buckets are protected with rwlock tbl->lock. 67 - All the scans/updates to hash buckets MUST be made under this lock. 535 struct neighbour __rcu **buckets; local 542 buckets = kzalloc(size, GFP_ATOMIC); 544 buckets = (struct neighbour __rcu **) 547 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 549 if (!buckets) { 553 ret->hash_buckets = buckets; 566 struct neighbour __rcu **buckets = nht->hash_buckets; local 569 kfree(buckets); [all...] |
H A D | bpf_sk_storage.c | 734 b = &smap->buckets[bucket_id++]; 748 b = &smap->buckets[i];
|
H A D | dev.c | 530 * MUST BE last in hash buckets and checking protocol handlers 4727 if (likely(fl->buckets[old_flow])) 4728 fl->buckets[old_flow]--; 4730 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
|
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_execbuffer.c | 313 struct hlist_head *buckets; /** ht for relocation handles */ member in struct:i915_execbuffer 363 eb->buckets = kzalloc(sizeof(struct hlist_head) << size, 365 if (eb->buckets) 579 &eb->buckets[hash_32(entry->handle, 1076 head = &eb->buckets[hash_32(handle, eb->lut_size)]; 1110 kfree(eb->buckets);
|
/linux-master/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | devlink_trap_l3_exceptions.sh | 464 ip nexthop add id 10 group 1 type resilient buckets 32
|
/linux-master/include/media/ |
H A D | v4l2-ctrls.h | 376 * @buckets: Buckets for the hashing. Allows for quick control lookup. 382 * @nr_of_buckets: Total number of buckets in the array. 402 struct v4l2_ctrl_ref **buckets; member in struct:v4l2_ctrl_handler 500 * it is way off, then you either waste memory (too many buckets 502 * buckets are allocated, so there are more slow list lookups). 512 * Return: returns an error if the buckets could not be allocated. This 529 * it is way off, then you either waste memory (too many buckets 531 * buckets are allocated, so there are more slow list lookups).
|
/linux-master/fs/bcachefs/ |
H A D | alloc_foreground.c | 5 * Foreground allocator code: allocate buckets from freelist, and allocate in 10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices 21 #include "buckets.h" 55 * Open buckets represent a bucket that's currently being allocated from. They 58 * - They track buckets that have been partially allocated, allowing for 61 * - They provide a reference to the buckets they own that mark and sweep GC 379 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx 519 * @cl: if not NULL, closure to be used to wait if buckets not available 540 if (usage->d[BCH_DATA_need_discard].buckets > avai [all...] |
H A D | recovery.c | 11 #include "buckets.h" 368 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); 970 * Write out the superblock and journal buckets, now that we can do
|
H A D | journal_io.c | 8 #include "buckets.h" 711 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu", 712 le64_to_cpu(u->d[i].buckets), 990 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]), 1094 bucket_to_sector(ca, ja->buckets[bucket]), 1141 pr_debug("%u journal buckets", ja->nr); 1475 ja->buckets[ja->cur_idx]) + 1657 * more buckets:
|
H A D | bcachefs_format.h | 535 __le64 buckets[]; member in struct:bch_sb_field_journal 1392 __le64 buckets; member in struct:jset_entry_dev_usage_type
|
H A D | alloc_background.c | 13 #include "buckets.h" 1178 " device %llu buckets %llu-%llu", 1330 * valid for buckets that exist; this just checks for keys for nonexistent 1331 * buckets. 1367 "bucket_gens key for invalid buckets:\n %s", 1656 bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets) 2056 * Scan the alloc btree for every bucket on @ca, and add buckets to the 2225 * We need to reserve buckets (from the number 2226 * of currently available buckets) against 2234 * available buckets) [all...] |
H A D | super.c | 1971 bch_err_msg(ca, ret, "resizing buckets"); 1995 ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets;
|
/linux-master/net/unix/ |
H A D | af_unix.c | 322 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]); 373 sk_for_each(s, &net->unx.table.buckets[hash]) { 3268 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]); 3618 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE, 3621 if (!net->unx.table.buckets) 3626 INIT_HLIST_HEAD(&net->unx.table.buckets[i]); 3645 kvfree(net->unx.table.buckets);
|
/linux-master/tools/testing/selftests/net/ |
H A D | fib_nexthops.sh | 355 local buckets=$2 364 # create a resilient group with $buckets buckets and dump them 366 run_cmd "$IP nexthop add id 1000 group 100 type resilient buckets $buckets" 368 log_test $? 0 "Dump large (x$buckets) nexthop buckets" 942 # migration of nexthop buckets - equal weights 946 run_cmd "$IP nexthop add id 102 group 62/63 type resilient buckets 2 idle_timer 0" 950 "id 102 group 62 type resilient buckets [all...] |
/linux-master/include/trace/events/ |
H A D | bcache.h | 441 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
|
/linux-master/drivers/md/bcache/ |
H A D | btree.c | 6 * All allocation is done in buckets, which should match the erase block size 10 * bucket priority is increased on cache hit, and periodically all the buckets 83 * Add a sysfs tunable for the number of open data buckets 815 * Don't worry about the mca_rereserve buckets 1591 * would run out of the buckets (since no new bucket 1775 /* don't reclaim buckets to which writeback keys point */ 1802 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 1806 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); 2132 * We need to put some unused buckets directly on the prio freelist in 2133 * order to get the allocator thread started - it needs freed buckets i [all...] |
H A D | sysfs.c | 1082 p[i] = ca->buckets[i].prio;
|
/linux-master/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib.h | 298 struct ipoib_neigh __rcu **buckets; member in struct:ipoib_neigh_hash
|
/linux-master/lib/ |
H A D | test_rhashtable.c | 486 pos = rht_ptr_exclusive(tbl->buckets + i);
|
/linux-master/include/linux/ |
H A D | nfs_xdr.h | 1307 struct pnfs_commit_bucket buckets[]; member in struct:pnfs_commit_array
|
/linux-master/drivers/net/ |
H A D | amt.c | 418 int i, buckets; local 420 buckets = amt->hash_buckets; 430 for (i = 0; i < buckets; i++) {
|