Searched refs:entries (Results 1 - 25 of 999) sorted by relevance

1234567891011>>

/linux-master/tools/build/feature/
H A Dtest-backtrace.c8 size_t entries; local
10 entries = backtrace(backtrace_fns, 10);
11 backtrace_symbols_fd(backtrace_fns, entries, 1);
/linux-master/tools/testing/selftests/bpf/progs/
H A Dget_branch_snapshot.c16 struct perf_branch_entry entries[ENTRY_CNT] = {}; variable in typeref:struct:perf_branch_entry
28 total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0);
34 if (gbs_in_range(entries[i].from) && gbs_in_range(entries[i].to))
H A Dbpf_iter_task_stack.c9 unsigned long entries[MAX_STACK_TRACE_DEPTH] = {}; variable
22 retlen = bpf_get_task_stack(task, entries,
31 BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]);
51 res = bpf_get_task_stack(task, entries,
64 * past the end of entries in bpf_seq_write call
66 bpf_seq_write(seq, &entries, buf_sz);
H A Dtest_perf_branches.c24 __u64 entries[4 * 3] = {0}; local
28 written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
/linux-master/tools/perf/util/
H A Darm64-frame-pointer-unwind-support.c12 struct entries { struct
25 struct entries *entries = arg; local
27 entries->stack[entries->length++] = entry->ip;
34 struct entries entries = {}; local
56 ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
59 if (ret || entries.length != 2)
62 return callchain_param.order == ORDER_CALLER ? entries
[all...]
H A Dpstack.c18 void *entries[]; member in struct:pstack
45 if (pstack->entries[i] == key) {
47 memmove(pstack->entries + i,
48 pstack->entries + i + 1,
63 pstack->entries[pstack->top++] = key;
75 ret = pstack->entries[--pstack->top];
76 pstack->entries[pstack->top] = NULL;
84 return pstack->entries[pstack->top - 1];
H A Dmem2node.c50 struct phys_entry *entries, *tmp_entries; local
62 entries = zalloc(sizeof(*entries) * max);
63 if (!entries)
84 struct phys_entry *prev = &entries[j - 1];
93 phys_entry__init(&entries[j++], start, bsize, n->node);
97 /* Cut unused entries, due to merging. */
98 tmp_entries = realloc(entries, sizeof(*entries) * j);
101 entries
[all...]
H A Drb_resort.h6 * a new sort criteria, that must be present in the entries of the source
15 * fields to be present in each of the entries in the new, sorted, rb_tree.
18 * pre-calculating them from multiple entries in the original 'entry' from
19 * the rb_tree used as a source for the entries to be sorted:
72 struct rb_root entries; \
79 struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \
88 rb_insert_color(sorted_nd, &sorted->entries); \
92 struct rb_root *entries) \
96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \
103 static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \
[all...]
H A Drblist.c15 struct rb_node **p = &rblist->entries.rb_root.rb_node;
40 rb_insert_color_cached(new_node, &rblist->entries, leftmost);
48 rb_erase_cached(rb_node, &rblist->entries);
57 struct rb_node **p = &rblist->entries.rb_root.rb_node;
82 &rblist->entries, leftmost);
103 rblist->entries = RB_ROOT_CACHED;
112 struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
133 for (node = rb_first_cached(&rblist->entries); node;
/linux-master/drivers/net/ethernet/engleder/
H A Dtsnep_selftests.c357 qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
361 qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
367 qopt->entries[0].gate_mask = 0x02;
368 qopt->entries[0].interval = 200000;
369 qopt->entries[1].gate_mask = 0x03;
370 qopt->entries[1].interval = 800000;
371 qopt->entries[2].gate_mask = 0x07;
372 qopt->entries[2].interval = 240000;
373 qopt->entries[3].gate_mask = 0x01;
374 qopt->entries[
[all...]
/linux-master/lib/
H A Dhashtable_test.c90 /* Both entries should have been visited exactly once. */
125 struct hashtable_test_entry entries[3]; local
130 /* Add three entries to the hashtable. */
132 entries[i].key = i;
133 entries[i].data = i + 10;
134 entries[i].visited = 0;
135 hash_add(hash, &entries[i].node, entries[i].key);
149 KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
154 struct hashtable_test_entry entries[ local
187 struct hashtable_test_entry entries[4]; local
241 struct hashtable_test_entry entries[4]; local
[all...]
H A Dstackdepot.c113 static void init_stack_table(unsigned long entries) argument
117 for (i = 0; i < entries; i++)
124 unsigned long entries = 0; local
156 * If stack_bucket_number_order is not set, leave entries as 0 to rely
160 entries = 1UL << stack_bucket_number_order;
164 entries,
176 if (!entries) {
178 * Obtain the number of entries that was calculated by
181 entries = stack_hash_mask + 1;
183 init_stack_table(entries);
192 unsigned long entries; local
381 depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) argument
512 hash_stack(unsigned long *entries, unsigned int size) argument
535 find_stack(struct list_head *bucket, unsigned long *entries, int size, u32 hash, depot_flags_t flags) argument
584 stack_depot_save_flags(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags, depot_flags_t depot_flags) argument
681 stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) argument
698 stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) argument
748 unsigned long *entries; local
760 unsigned long *entries; local
[all...]
H A Dlist-test.c387 struct list_head entries[3], *cur; local
392 list_add_tail(&entries[0], &list1);
393 list_add_tail(&entries[1], &list1);
394 list_add_tail(&entries[2], &list1);
396 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
397 list_cut_position(&list2, &list1, &entries[1]);
398 /* after: [list2] -> entries[0] -> entries[
415 struct list_head entries[3], *cur; local
443 struct list_head entries[5], *cur; local
468 struct list_head entries[5], *cur; local
493 struct list_head entries[5], *cur; local
520 struct list_head entries[5], *cur; local
643 struct list_head entries[3], *cur; local
661 struct list_head entries[3], *cur; local
679 struct list_head entries[3], *cur, *n; local
700 struct list_head entries[3], *cur, *n; local
720 struct list_test_struct entries[5], *cur; local
741 struct list_test_struct entries[5], *cur; local
1030 struct hlist_node entries[3], *cur; local
1049 struct hlist_node entries[3], *cur, *n; local
1069 struct hlist_test_struct entries[5], *cur; local
1092 struct hlist_test_struct entries[5], *cur; local
1123 struct hlist_test_struct entries[5], *cur; local
1151 struct hlist_test_struct entries[5], *cur; local
[all...]
/linux-master/kernel/events/
H A Dcallchain.c50 struct callchain_cpus_entries *entries; local
53 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
56 kfree(entries->cpu_entries[cpu]);
58 kfree(entries);
63 struct callchain_cpus_entries *entries; local
65 entries = callchain_cpus_entries;
67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
74 struct callchain_cpus_entries *entries; local
83 entries = kzalloc(size, GFP_KERNEL);
84 if (!entries)
155 struct callchain_cpus_entries *entries; local
[all...]
/linux-master/drivers/misc/vmw_vmci/
H A Dvmci_handle_array.c22 array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC);
48 size_t new_size = struct_size(array, entries,
62 array->entries[array->size] = handle;
78 if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
79 handle = array->entries[i];
81 array->entries[i] = array->entries[array->size];
82 array->entries[array->size] = VMCI_INVALID_HANDLE;
99 handle = array->entries[array->size];
100 array->entries[arra
[all...]
/linux-master/tools/lib/api/fd/
H A Darray.c15 fda->entries = NULL;
27 struct pollfd *entries = realloc(fda->entries, size); local
29 if (entries == NULL)
34 free(entries);
38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr);
42 fda->entries = entries;
65 free(fda->entries);
84 fda->entries[fd
[all...]
/linux-master/drivers/gpu/drm/nouveau/include/nvif/
H A Dclb069.h6 __u32 entries; member in struct:nvif_clb069_v0
/linux-master/fs/bcachefs/
H A Ddisk_groups_types.h15 struct bch_disk_group_cpu entries[] __counted_by(nr);
H A Dsb-errors.c39 if (!BCH_SB_ERROR_ENTRY_NR(&e->entries[i])) {
41 bch2_sb_error_id_to_text(err, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
47 BCH_SB_ERROR_ENTRY_ID(&e->entries[i]) >=
48 BCH_SB_ERROR_ENTRY_ID(&e->entries[i + 1])) {
49 prt_printf(err, "entries out of order");
67 bch2_sb_error_id_to_text(out, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
69 prt_u64(out, BCH_SB_ERROR_ENTRY_NR(&e->entries[i]));
71 bch2_prt_datetime(out, le64_to_cpu(e->entries[i].last_error_time));
122 SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
123 SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[
[all...]
/linux-master/kernel/
H A Dbacktracetest.c43 unsigned long entries[8]; local
49 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
50 stack_trace_print(entries, nr_entries, 0);
/linux-master/arch/powerpc/mm/book3s64/
H A Diommu_api.c34 u64 entries; /* number of entries in hpas/hpages[] */ member in struct:mm_iommu_table_group_mem_t
57 unsigned long entries, unsigned long dev_hpa,
66 ret = account_locked_vm(mm, entries, true);
70 locked_entries = entries;
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
88 * we use @ua and @entries natural alignment to allow IOMMU pages
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
102 chunk = min(chunk, entries);
56 mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) argument
186 mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) argument
194 mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) argument
308 mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_bo_list.h59 struct amdgpu_bo_list_entry entries[] __counted_by(num_entries);
75 for (e = list->entries; \
76 e != &list->entries[list->num_entries]; \
80 for (e = &list->entries[list->first_userptr]; \
81 e != &list->entries[list->num_entries]; \
/linux-master/tools/perf/tests/
H A Dfdarray.c14 fda->entries[fd].fd = fda->nr - fd;
15 fda->entries[fd].events = revents;
16 fda->entries[fd].revents = revents;
58 fda->entries[2].revents = POLLIN;
60 pr_debug("\nfiltering all but fda->entries[2]:");
70 fda->entries[0].revents = POLLIN;
71 fda->entries[3].revents = POLLIN;
73 pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
103 if (fda->entries[_id
[all...]
/linux-master/fs/nfs_common/
H A Dnfsacl.c13 * four instead of three entries.
16 * the ACL_MASK and ACL_GROUP_OBJ entries may differ.)
18 * entries contain the identifiers of the owner and owning group.
20 * - ACL entries in the kernel are kept sorted in ascending order
95 int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; local
99 .array_len = encode_entries ? entries : 0,
110 if (entries > NFS_ACL_MAX_ENTRIES ||
111 xdr_encode_word(buf, base, entries))
122 /* Insert entries in canonical order: other orders seem
157 u32 entries local
345 u32 entries; local
394 u32 entries; local
[all...]
/linux-master/tools/perf/trace/beauty/
H A Dioctl.c41 if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL)
42 return scnprintf(bf, size, "%s", strarray__ioctl_tty_cmd.entries[nr]);
52 if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL)
53 return scnprintf(bf, size, "DRM_%s", strarray__drm_ioctl_cmds.entries[nr]);
63 if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] != NULL)
64 return scnprintf(bf, size, "SNDRV_PCM_%s", strarray__sndrv_pcm_ioctl_cmds.entries[nr]);
74 if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] != NULL)
75 return scnprintf(bf, size, "SNDRV_CTL_%s", strarray__sndrv_ctl_ioctl_cmds.entries[nr]);
85 if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL)
86 return scnprintf(bf, size, "KVM_%s", strarray__kvm_ioctl_cmds.entries[n
[all...]

Completed in 565 milliseconds

1234567891011>>