Searched refs:nthreads (Results 1 - 25 of 36) sorted by relevance

12

/linux-master/tools/perf/util/
H A Dcounts.c10 struct perf_counts *perf_counts__new(int ncpus, int nthreads) argument
17 values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
25 values = xyarray__new(ncpus, nthreads, sizeof(bool));
61 int nthreads = perf_thread_map__nr(evsel->core.threads); local
63 evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads);
H A Dcounts.h37 struct perf_counts *perf_counts__new(int ncpus, int nthreads);
/linux-master/tools/perf/bench/
H A Dfutex-requeue.c53 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
77 params.nthreads,
131 threads_starting = params.nthreads;
138 for (i = 0; i < params.nthreads; i++) {
191 if (!params.nthreads)
192 params.nthreads = perf_cpu_map__nr(cpu);
194 worker = calloc(params.nthreads, sizeof(*worker));
201 if (params.nrequeue > params.nthreads)
202 params.nrequeue = params.nthreads;
205 params.nrequeue = params.nthreads;
[all...]
H A Dbreakpoint.c22 unsigned int nthreads; member in struct:__anon370
26 .nthreads = 1,
33 OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"),
91 threads = calloc(thread_params.nthreads, sizeof(threads[0]));
97 for (i = 0; i < thread_params.nthreads; i++) {
102 futex_wake(&done, thread_params.nthreads, 0);
103 for (i = 0; i < thread_params.nthreads; i++)
111 // then starts nparallel threads which create and join bench_repeat batches of nthreads threads.
161 (double)result_usec / bench_repeat / thread_params.nthreads);
164 thread_params.nthreads * thread_param
200 unsigned int i, nthreads, result_usec, done = 0; local
[all...]
H A Dfutex-wake.c53 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
93 params.nthreads,
104 threads_starting = params.nthreads;
111 for (i = 0; i < params.nthreads; i++) {
166 if (!params.nthreads)
167 params.nthreads = perf_cpu_map__nr(cpu);
169 worker = calloc(params.nthreads, sizeof(*worker));
178 getpid(), params.nthreads, params.fshared ? "shared":"private",
205 while (nwoken != params.nthreads)
216 j + 1, nwoken, params.nthreads,
[all...]
H A Dfutex-wake-parallel.c63 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
155 threads_starting = params.nthreads;
162 for (i = 0; i < params.nthreads; i++) {
203 params.nthreads, waketime_avg / USEC_PER_MSEC,
218 params.nthreads,
271 if (!params.nthreads)
272 params.nthreads = perf_cpu_map__nr(cpu);
275 if (params.nwakes > params.nthreads ||
277 params.nwakes = params.nthreads;
279 if (params.nthreads
[all...]
H A Dfutex-lock-pi.c48 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
128 threads_starting = params.nthreads;
134 for (i = 0; i < params.nthreads; i++) {
189 if (!params.nthreads)
190 params.nthreads = perf_cpu_map__nr(cpu);
192 worker = calloc(params.nthreads, sizeof(*worker));
200 getpid(), params.nthreads, params.runtime);
207 threads_starting = params.nthreads;
221 for (i = 0; i < params.nthreads; i++) {
232 for (i = 0; i < params.nthreads;
[all...]
H A Dfutex-hash.c56 OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
155 if (!params.nthreads) /* default to the number of CPUs */
156 params.nthreads = perf_cpu_map__nr(cpu);
158 worker = calloc(params.nthreads, sizeof(*worker));
166 getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime);
173 threads_starting = params.nthreads;
182 for (i = 0; i < params.nthreads; i++) {
216 for (i = 0; i < params.nthreads; i++) {
227 for (i = 0; i < params.nthreads; i++) {
H A Depoll-wait.c91 static unsigned int nthreads = 0; variable
128 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
317 for (i = 0; i < nthreads; i++) {
394 shuffle((void *)worker, nthreads, sizeof(*worker));
397 for (i = 0; i < nthreads; i++) {
468 if (!nthreads)
469 nthreads = perf_cpu_map__nr(cpu) - 1;
471 worker = calloc(nthreads, sizeof(*worker));
478 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
486 getpid(), nthreads, onesho
[all...]
H A Depoll-ctl.c36 static unsigned int nthreads = 0; variable
75 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
240 for (i = 0; i < nthreads; i++) {
349 if (!nthreads)
350 nthreads = perf_cpu_map__nr(cpu);
352 worker = calloc(nthreads, sizeof(*worker));
358 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
366 getpid(), nthreads, nfds, nsecs);
375 threads_starting = nthreads;
391 for (i = 0; i < nthreads;
[all...]
H A Dfutex.h24 unsigned int nthreads; member in struct:bench_futex_parameters
/linux-master/tools/testing/selftests/mm/
H A Dmigration.c27 int nthreads; local
37 self->nthreads = numa_num_task_cpus() - 1;
51 self->threads = malloc(self->nthreads * sizeof(*self->threads));
53 self->pids = malloc(self->nthreads * sizeof(*self->pids));
123 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
131 for (i = 0; i < self->nthreads - 1; i++)
136 for (i = 0; i < self->nthreads - 1; i++)
149 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
157 for (i = 0; i < self->nthreads - 1; i++) {
171 for (i = 0; i < self->nthreads
[all...]
H A Dgup_test.c94 int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret; local
131 nthreads = atoi(optarg);
204 ksft_set_plan(nthreads);
257 tid = malloc(sizeof(pthread_t) * nthreads);
259 for (i = 0; i < nthreads; i++) {
263 for (i = 0; i < nthreads; i++) {
/linux-master/tools/lib/perf/include/internal/
H A Devsel.h82 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
88 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
/linux-master/arch/x86/kernel/cpu/
H A Ddebugfs.c61 unsigned int dom, nthreads = 1; local
64 nthreads *= x86_topo_system.dom_size[dom];
67 x86_topo_system.dom_size[dom], nthreads);
H A Dtopology_amd.c94 unsigned int nthreads = leaf.core_nthreads + 1; local
96 topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
/linux-master/arch/powerpc/platforms/pseries/
H A Dhotplug-cpu.c145 * find_cpu_id_range - found a linear ranger of @nthreads free CPU ids.
146 * @nthreads : the number of threads (cpu ids)
153 static int find_cpu_id_range(unsigned int nthreads, int assigned_node, argument
164 for (cpu = 0; cpu < nthreads; cpu++)
193 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
213 int len, nthreads, node, cpu, assigned_node; local
222 nthreads = len / sizeof(u32);
240 rc = find_cpu_id_range(nthreads, node, &cpu_mask);
246 rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask);
272 cpu, cpu + nthreads
296 int len, nthreads, i; local
329 int len, nthreads, i; local
385 int len, nthreads, i; local
[all...]
/linux-master/kernel/locking/
H A Dtest-ww_mutex.c315 static int __test_cycle(unsigned int nthreads) argument
318 unsigned int n, last = nthreads - 1;
321 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
325 for (n = 0; n < nthreads; n++) {
344 for (n = 0; n < nthreads; n++)
350 for (n = 0; n < nthreads; n++) {
357 n, nthreads, cycle->result);
362 for (n = 0; n < nthreads; n++)
574 static int stress(int nlocks, int nthreads, unsigned int flags) argument
584 stress_array = kmalloc_array(nthreads, sizeo
[all...]
/linux-master/kernel/kcsan/
H A Dkcsan_test.c1372 long nthreads = (long)prev; local
1374 if (nthreads < 0 || nthreads >= 32)
1375 nthreads = 0; /* stop */
1376 else if (!nthreads)
1377 nthreads = 2; /* initial value */
1378 else if (nthreads < 5)
1379 nthreads++;
1380 else if (nthreads == 5)
1381 nthreads
1497 int nthreads; local
[all...]
/linux-master/kernel/
H A Dscftorture.c53 torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs.");
161 for (i = 0; i < nthreads; i++) {
504 "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
505 verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_rpc, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
520 if (nthreads && scf_stats_p)
521 for (i = 0; i < nthreads; i++)
627 if (nthreads < 0)
628 nthreads = num_online_cpus();
629 scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL);
636 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads);
[all...]
/linux-master/arch/powerpc/kernel/
H A Dsetup-common.c415 static int assign_threads(unsigned int cpu, unsigned int nthreads, bool present, argument
418 for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) {
456 int nthreads = 1; local
489 nthreads = len / sizeof(int);
499 cpu = nthreads;
505 assign_threads(0, nthreads, avail, intserv);
515 cpu = assign_threads(cpu, nthreads, avail, intserv);
518 /* If no SMT supported, nthreads is forced to 1 */
520 DBG(" SMT disabled ! nthreads forced to 1\n");
521 nthreads
[all...]
/linux-master/lib/
H A Dtest_objpool.c39 atomic_t nthreads ____cacheline_aligned_in_smp;
142 atomic_set(&data->nthreads, 1);
223 atomic_inc(&test->data.nthreads);
236 if (atomic_dec_and_test(&test->data.nthreads))
245 int cpu, nthreads = 0; local
266 nthreads++;
272 pr_info("ALL: \tnthreads: %d duration: %lluus\n", nthreads, duration);
388 if (atomic_dec_and_test(&test->data.nthreads))
579 if (atomic_dec_and_test(&test->data.nthreads))
/linux-master/tools/lib/perf/
H A Devsel.c52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) argument
54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
60 for (thread = 0; thread < nthreads; thread++) {
72 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads) argument
74 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
513 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) argument
515 if (ncpus == 0 || nthreads == 0)
518 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
522 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
/linux-master/fs/nfsd/
H A Dnfssvc.c702 int nfsd_get_nrthreads(int n, int *nthreads, struct net *net) argument
710 nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
714 int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) argument
732 nthreads[i] = min(nthreads[i], NFSD_MAXSERVS);
733 tot += nthreads[i];
738 int new = nthreads[i] * NFSD_MAXSERVS / tot;
739 tot -= (nthreads[i] - new);
740 nthreads[i] = new;
743 nthreads[
[all...]
/linux-master/tools/testing/selftests/filesystems/binderfs/
H A Dbinderfs_test.c403 int i, j, k, nthreads; local
453 nthreads = get_nprocs_conf();
454 if (nthreads > DEFAULT_THREADS)
455 nthreads = DEFAULT_THREADS;
460 for (i = 0; i < nthreads; i++) {

Completed in 235 milliseconds

12