Searched refs:threads (Results 126 - 150 of 271) sorted by relevance

1234567891011

/linux-master/tools/perf/util/
H A Dbpf_off_cpu.c77 perf_thread_map__pid(evlist->core.threads, 0) != -1) {
85 pid = perf_thread_map__pid(evlist->core.threads, 0);
177 ntasks = perf_thread_map__nr(evlist->core.threads);
247 pid = perf_thread_map__pid(evlist->core.threads, i);
H A Dpython.c744 struct perf_thread_map *threads; member in struct:pyrf_thread_map
757 pthreads->threads = thread_map__new(pid, tid, uid);
758 if (pthreads->threads == NULL)
765 perf_thread_map__put(pthreads->threads);
773 return perf_thread_map__nr(pthreads->threads);
780 if (i >= perf_thread_map__nr(pthreads->threads))
783 return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
936 struct perf_thread_map *threads = NULL; local
939 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
946 threads
1005 struct perf_thread_map *threads; local
[all...]
H A Devsel.c376 evsel->core.threads = perf_thread_map__get(orig->core.threads);
1477 perf_thread_map__put(evsel->core.threads);
1715 struct perf_thread_map *threads,
1718 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
1723 /* The system wide setup does not work with threads. */
1732 if (threads->nr == 1)
1737 * because thread_map__remove() will decrease threads->nr.
1739 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
1742 if (thread_map__remove(threads, threa
1713 evsel__ignore_missing_thread(struct evsel *evsel, int nr_cpus, int cpu_map_idx, struct perf_thread_map *threads, int thread, int err) argument
1793 __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) argument
1870 evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) argument
2000 evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads, int start_cpu_map_idx, int end_cpu_map_idx) argument
2152 evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) argument
2172 evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) argument
3142 struct perf_thread_map *threads = evsel->core.threads; local
[all...]
H A Devlist.c72 struct perf_thread_map *threads)
75 perf_evlist__set_maps(&evlist->core, cpus, threads);
1039 struct perf_thread_map *threads; local
1057 * thread_map__new_all_cpus to enumerate all threads.
1059 threads = thread_map__new_str(target->pid, target->tid, target->uid,
1062 if (!threads)
1075 perf_evlist__set_maps(&evlist->core, cpus, threads);
1079 perf_thread_map__put(threads);
1084 perf_thread_map__put(threads);
1351 struct perf_thread_map *threads; local
71 evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, struct perf_thread_map *threads) argument
[all...]
H A Dsideband_evlist.c118 evlist->core.threads) < 0)
H A Dsynthetic-events.c79 char *name, *tgids, *ppids, *vmpeak, *threads; local
110 threads = NULL;
112 threads = strstr(ppids ?: bf, "Threads:");
145 if (!vmpeak && threads)
270 * threads set parent pid to main thread. ie., assume main thread
271 * spawns all threads in a process
818 /* some threads may exit just after scan, ignore it */
856 struct perf_thread_map *threads,
884 for (thread = 0; thread < threads->nr; ++thread) {
887 perf_thread_map__pid(threads, threa
855 perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data) argument
1199 perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine) argument
1927 __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct perf_thread_map *threads, perf_event__handler_t process, bool needs_mmap, bool data_mmap, unsigned int nr_threads_synthesize) argument
1962 machine__synthesize_threads(struct machine *machine, struct target *target, struct perf_thread_map *threads, bool needs_mmap, bool data_mmap, unsigned int nr_threads_synthesize) argument
[all...]
/linux-master/tools/perf/tests/
H A Dswitch-tracking.c340 struct perf_thread_map *threads = NULL; local
348 threads = thread_map__new(-1, getpid(), UINT_MAX);
349 if (!threads) {
366 perf_evlist__set_maps(&evlist->core, cpus, threads);
577 perf_thread_map__put(threads);
H A Dcode-reading.c547 struct perf_thread_map *threads = NULL; local
593 threads = thread_map__new_by_tid(pid);
594 if (!threads) {
599 ret = perf_event__synthesize_thread_map(NULL, threads,
628 perf_evlist__set_maps(&evlist->core, cpus, threads);
657 * Both cpus and threads are now owned by evlist
662 perf_thread_map__get(threads);
703 perf_thread_map__put(threads);
H A Dopenat-syscall-tp-fields.c67 perf_thread_map__set_pid(evlist->core.threads, 0, getpid());
/linux-master/drivers/acpi/acpica/
H A Ddbexec.c437 * Allow any handlers in separate threads to complete.
518 * Prevent acpi_gbl_db_method_info from being modified by multiple threads
533 if (info->threads && (info->num_created < info->num_threads)) {
534 info->threads[info->num_created++] = acpi_os_get_thread_id();
709 * PARAMETERS: num_threads_arg - Number of threads to create
715 * DESCRIPTION: Create threads to execute method(s)
745 * the created threads with the main thread.
757 * between the created threads.
762 "synchronization between the created threads, %s\n",
782 /* Array to store IDs of threads */
[all...]
/linux-master/tools/testing/radix-tree/
H A Didr-test.c560 pthread_t threads[20]; local
563 for (i = 0; i < ARRAY_SIZE(threads); i++)
564 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
570 pthread_join(threads[i], NULL);
572 for (i = 0; i < ARRAY_SIZE(threads); i++)
573 if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
579 pthread_join(threads[i], NULL);
/linux-master/tools/lib/perf/
H A Devsel.c114 struct perf_thread_map *threads)
131 if (threads == NULL) {
140 threads = empty_thread_map;
144 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
148 for (thread = 0; thread < threads->nr; thread++) {
162 threads->map[thread].pid,
505 return evsel->threads;
113 perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) argument
/linux-master/drivers/gpu/drm/i915/selftests/
H A Di915_request.c464 struct smoke_thread *threads; local
470 * threads. A very simple test to only catch the most egregious of bugs.
474 threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
475 if (!threads)
502 threads[n].worker = worker;
503 threads[n].t = &t;
504 threads[n].stop = false;
505 threads[n].result = 0;
507 kthread_init_work(&threads[
1618 struct parallel_thread *threads; local
1732 struct smoke_thread *threads; local
[all...]
/linux-master/drivers/md/dm-vdo/
H A Dvdo.c402 * vdo_make_thread() - Construct a single vdo work_queue and its associated thread (or threads for
407 * @queue_count: The number of actual threads/queues contained in the "thread".
411 * config, and completions can be enqueued to the queue and run on the threads comprising this
420 struct vdo_thread *thread = &vdo->threads[thread_id];
496 vdo_log_info("zones: %d logical, %d physical, %d hash; total threads: %d",
565 struct vdo_thread, __func__, &vdo->threads);
634 if (vdo->threads == NULL)
641 vdo_finish_work_queue(vdo->threads[i].queue);
711 if (vdo->threads != NULL) {
713 free_listeners(&vdo->threads[
[all...]
/linux-master/tools/perf/bench/
H A Dnuma.c134 /* Global, read-writable area, accessible to all processes and threads: */
151 struct thread_data *threads; member in struct:global_info
171 OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"),
198 "convergence is reached when each process (all its threads) is running on a single NUMA node."),
520 * threads of this process, or only be accessed by this thread:
631 td = g->threads + t;
758 td = g->threads + t;
912 g->threads[task_nr].curr_cpu = cpu;
917 * Count the number of nodes a process's threads
941 td = g->threads
[all...]
/linux-master/kernel/kcsan/
H A Dkcsan_test.c49 static struct task_struct **threads; /* Lists of threads. */ variable in typeref:struct:task_struct
1402 pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
1407 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
1512 if (WARN_ON(threads))
1524 threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL);
1525 if (WARN_ON(!threads))
1528 threads[nthreads] = NULL;
1530 if (torture_create_kthread(access_thread, NULL, threads[i]))
1539 kfree(threads);
[all...]
/linux-master/arch/arm/mach-mediatek/
H A Dplatsmp.c14 #include <linux/threads.h>
/linux-master/arch/x86/include/asm/
H A Dfixmap.h40 #include <linux/threads.h>
/linux-master/arch/sparc/include/asm/
H A Dptrace.h11 #include <linux/threads.h>
/linux-master/include/linux/
H A Dkernel_stat.h6 #include <linux/threads.h>
/linux-master/arch/m68k/include/asm/
H A Dpgtable_mm.h17 #include <linux/threads.h>
/linux-master/arch/powerpc/include/asm/nohash/32/
H A Dpgtable.h9 #include <linux/threads.h>
/linux-master/tools/lib/perf/include/internal/
H A Devlist.h29 struct perf_thread_map *threads; member in struct:perf_evlist
/linux-master/arch/x86/kernel/
H A Dsmpboot.c104 /* CPUs which are the primary SMT threads */
124 /* Maximum number of SMT threads on any online core */
257 * the core code starts the primary threads first and leaves the
258 * secondary threads waiting for SIPI. Loading microcode on
558 int i, threads; local
591 threads = cpumask_weight(topology_sibling_cpumask(cpu));
592 if (threads > __max_smt_threads)
593 __max_smt_threads = threads;
596 cpu_data(i).smt_active = threads > 1;
611 if (threads
1163 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); local
[all...]
/linux-master/arch/mips/kernel/
H A Dhead.S17 #include <linux/threads.h>

Completed in 428 milliseconds

1234567891011