/linux-master/tools/build/feature/ |
H A D | test-backtrace.c | 8 size_t entries; local 10 entries = backtrace(backtrace_fns, 10); 11 backtrace_symbols_fd(backtrace_fns, entries, 1);
|
/linux-master/tools/testing/selftests/bpf/progs/ |
H A D | get_branch_snapshot.c | 16 struct perf_branch_entry entries[ENTRY_CNT] = {}; variable in typeref:struct:perf_branch_entry 28 total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); 34 if (gbs_in_range(entries[i].from) && gbs_in_range(entries[i].to))
|
H A D | bpf_iter_task_stack.c | 9 unsigned long entries[MAX_STACK_TRACE_DEPTH] = {}; variable 22 retlen = bpf_get_task_stack(task, entries, 31 BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]); 51 res = bpf_get_task_stack(task, entries, 64 * past the end of entries in bpf_seq_write call 66 bpf_seq_write(seq, &entries, buf_sz);
|
H A D | test_perf_branches.c | 24 __u64 entries[4 * 3] = {0}; local 28 written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
|
/linux-master/tools/perf/util/ |
H A D | arm64-frame-pointer-unwind-support.c | 12 struct entries { struct 25 struct entries *entries = arg; local 27 entries->stack[entries->length++] = entry->ip; 34 struct entries entries = {}; local 56 ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true); 59 if (ret || entries.length != 2) 62 return callchain_param.order == ORDER_CALLER ? entries [all...] |
H A D | pstack.c | 18 void *entries[]; member in struct:pstack 45 if (pstack->entries[i] == key) { 47 memmove(pstack->entries + i, 48 pstack->entries + i + 1, 63 pstack->entries[pstack->top++] = key; 75 ret = pstack->entries[--pstack->top]; 76 pstack->entries[pstack->top] = NULL; 84 return pstack->entries[pstack->top - 1];
|
H A D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; local 62 entries = zalloc(sizeof(*entries) * max); 63 if (!entries) 84 struct phys_entry *prev = &entries[j - 1]; 93 phys_entry__init(&entries[j++], start, bsize, n->node); 97 /* Cut unused entries, due to merging. */ 98 tmp_entries = realloc(entries, sizeof(*entries) * j); 101 entries [all...] |
H A D | rb_resort.h | 6 * a new sort criteria, that must be present in the entries of the source 15 * fields to be present in each of the entries in the new, sorted, rb_tree. 18 * pre-calculating them from multiple entries in the original 'entry' from 19 * the rb_tree used as a source for the entries to be sorted: 72 struct rb_root entries; \ 79 struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \ 88 rb_insert_color(sorted_nd, &sorted->entries); \ 92 struct rb_root *entries) \ 96 for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ 103 static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \ [all...] |
H A D | rblist.c | 15 struct rb_node **p = &rblist->entries.rb_root.rb_node; 40 rb_insert_color_cached(new_node, &rblist->entries, leftmost); 48 rb_erase_cached(rb_node, &rblist->entries); 57 struct rb_node **p = &rblist->entries.rb_root.rb_node; 82 &rblist->entries, leftmost); 103 rblist->entries = RB_ROOT_CACHED; 112 struct rb_node *pos, *next = rb_first_cached(&rblist->entries); 133 for (node = rb_first_cached(&rblist->entries); node;
|
/linux-master/io_uring/ |
H A D | alloc_cache.h | 15 cache->entries[cache->nr_cached++] = entry; 24 void *entry = cache->entries[--cache->nr_cached]; 37 cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); 38 if (cache->entries) { 52 if (!cache->entries) 58 kvfree(cache->entries); 59 cache->entries = NULL;
|
/linux-master/drivers/net/ethernet/engleder/ |
H A D | tsnep_selftests.c | 357 qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); 361 qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; 367 qopt->entries[0].gate_mask = 0x02; 368 qopt->entries[0].interval = 200000; 369 qopt->entries[1].gate_mask = 0x03; 370 qopt->entries[1].interval = 800000; 371 qopt->entries[2].gate_mask = 0x07; 372 qopt->entries[2].interval = 240000; 373 qopt->entries[3].gate_mask = 0x01; 374 qopt->entries[ [all...] |
/linux-master/lib/ |
H A D | hashtable_test.c | 90 /* Both entries should have been visited exactly once. */ 125 struct hashtable_test_entry entries[3]; local 130 /* Add three entries to the hashtable. */ 132 entries[i].key = i; 133 entries[i].data = i + 10; 134 entries[i].visited = 0; 135 hash_add(hash, &entries[i].node, entries[i].key); 149 KUNIT_EXPECT_EQ(test, entries[j].visited, 1); 154 struct hashtable_test_entry entries[ local 187 struct hashtable_test_entry entries[4]; local 241 struct hashtable_test_entry entries[4]; local [all...] |
H A D | stackdepot.c | 113 static void init_stack_table(unsigned long entries) argument 117 for (i = 0; i < entries; i++) 124 unsigned long entries = 0; local 156 * If stack_bucket_number_order is not set, leave entries as 0 to rely 160 entries = 1UL << stack_bucket_number_order; 164 entries, 176 if (!entries) { 178 * Obtain the number of entries that was calculated by 181 entries = stack_hash_mask + 1; 183 init_stack_table(entries); 192 unsigned long entries; local 381 depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) argument 512 hash_stack(unsigned long *entries, unsigned int size) argument 535 find_stack(struct list_head *bucket, unsigned long *entries, int size, u32 hash, depot_flags_t flags) argument 584 stack_depot_save_flags(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags, depot_flags_t depot_flags) argument 674 stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) argument 691 stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) argument 741 unsigned long *entries; local 753 unsigned long *entries; local [all...] |
H A D | list-test.c | 387 struct list_head entries[3], *cur; local 392 list_add_tail(&entries[0], &list1); 393 list_add_tail(&entries[1], &list1); 394 list_add_tail(&entries[2], &list1); 396 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ 397 list_cut_position(&list2, &list1, &entries[1]); 398 /* after: [list2] -> entries[0] -> entries[ 415 struct list_head entries[3], *cur; local 443 struct list_head entries[5], *cur; local 468 struct list_head entries[5], *cur; local 493 struct list_head entries[5], *cur; local 520 struct list_head entries[5], *cur; local 643 struct list_head entries[3], *cur; local 661 struct list_head entries[3], *cur; local 679 struct list_head entries[3], *cur, *n; local 700 struct list_head entries[3], *cur, *n; local 720 struct list_test_struct entries[5], *cur; local 741 struct list_test_struct entries[5], *cur; local 1030 struct hlist_node entries[3], *cur; local 1049 struct hlist_node entries[3], *cur, *n; local 1069 struct hlist_test_struct entries[5], *cur; local 1092 struct hlist_test_struct entries[5], *cur; local 1123 struct hlist_test_struct entries[5], *cur; local 1151 struct hlist_test_struct entries[5], *cur; local [all...] |
/linux-master/kernel/events/ |
H A D | callchain.c | 50 struct callchain_cpus_entries *entries; local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); 56 kfree(entries->cpu_entries[cpu]); 58 kfree(entries); 63 struct callchain_cpus_entries *entries; local 65 entries = callchain_cpus_entries; 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); 74 struct callchain_cpus_entries *entries; local 83 entries = kzalloc(size, GFP_KERNEL); 84 if (!entries) 155 struct callchain_cpus_entries *entries; local [all...] |
/linux-master/drivers/misc/vmw_vmci/ |
H A D | vmci_handle_array.c | 22 array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC); 48 size_t new_size = struct_size(array, entries, 62 array->entries[array->size] = handle; 78 if (vmci_handle_is_equal(array->entries[i], entry_handle)) { 79 handle = array->entries[i]; 81 array->entries[i] = array->entries[array->size]; 82 array->entries[array->size] = VMCI_INVALID_HANDLE; 99 handle = array->entries[array->size]; 100 array->entries[arra [all...] |
/linux-master/tools/lib/api/fd/ |
H A D | array.c | 15 fda->entries = NULL; 27 struct pollfd *entries = realloc(fda->entries, size); local 29 if (entries == NULL) 34 free(entries); 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); 42 fda->entries = entries; 65 free(fda->entries); 84 fda->entries[fd [all...] |
/linux-master/drivers/gpu/drm/nouveau/include/nvif/ |
H A D | clb069.h | 6 __u32 entries; member in struct:nvif_clb069_v0
|
/linux-master/fs/bcachefs/ |
H A D | disk_groups_types.h | 15 struct bch_disk_group_cpu entries[] __counted_by(nr);
|
H A D | sb-downgrade_format.h | 14 struct bch_sb_field_downgrade_entry entries[]; member in struct:bch_sb_field_downgrade
|
H A D | replicas_format.h | 13 struct bch_replicas_entry_v0 entries[]; member in struct:bch_sb_field_replicas_v0 25 struct bch_replicas_entry_v1 entries[]; member in struct:bch_sb_field_replicas
|
/linux-master/kernel/ |
H A D | backtracetest.c | 43 unsigned long entries[8]; local 49 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 50 stack_trace_print(entries, nr_entries, 0);
|
/linux-master/arch/powerpc/mm/book3s64/ |
H A D | iommu_api.c | 34 u64 entries; /* number of entries in hpas/hpages[] */ member in struct:mm_iommu_table_group_mem_t 57 unsigned long entries, unsigned long dev_hpa, 66 ret = account_locked_vm(mm, entries, true); 70 locked_entries = entries; 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); 88 * we use @ua and @entries natural alignment to allow IOMMU pages 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); 92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); 102 chunk = min(chunk, entries); 56 mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) argument 186 mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) argument 194 mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) argument 308 mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) argument [all...] |
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_bo_list.h | 59 struct amdgpu_bo_list_entry entries[] __counted_by(num_entries); 75 for (e = list->entries; \ 76 e != &list->entries[list->num_entries]; \ 80 for (e = &list->entries[list->first_userptr]; \ 81 e != &list->entries[list->num_entries]; \
|
/linux-master/tools/perf/tests/ |
H A D | fdarray.c | 14 fda->entries[fd].fd = fda->nr - fd; 15 fda->entries[fd].events = revents; 16 fda->entries[fd].revents = revents; 58 fda->entries[2].revents = POLLIN; 60 pr_debug("\nfiltering all but fda->entries[2]:"); 70 fda->entries[0].revents = POLLIN; 71 fda->entries[3].revents = POLLIN; 73 pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):"); 103 if (fda->entries[_id [all...] |