• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching defs:cpu

45 #include <linux/cpu.h>
55 /* Number of pinned cpu breakpoints in a cpu */
58 /* Number of pinned task breakpoints in a cpu */
61 /* Number of non-pinned cpu/task breakpoints in a cpu */
95 * have in this cpu
97 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
100 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
130 * a given cpu (cpu > -1) or in all of them (cpu = -1).
136 int cpu = bp->cpu;
139 if (cpu >= 0) {
140 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
142 slots->pinned += max_task_bp_pinned(cpu, type);
145 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
150 for_each_online_cpu(cpu) {
153 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
155 nr += max_task_bp_pinned(cpu, type);
162 nr = per_cpu(nr_bp_flexible[type], cpu);
172 * in a same cpu.
183 static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
196 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
215 int cpu = bp->cpu;
218 /* Pinned counter cpu profiling */
222 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
233 if (cpu >= 0) {
234 toggle_bp_task_slot(bp, cpu, enable, type, weight);
236 for_each_online_cpu(cpu)
237 toggle_bp_task_slot(bp, cpu, enable, type, weight);
260 * - If attached to a single cpu, check:
262 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
263 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
265 * -> If there are already non-pinned counters in this cpu, it means
268 * breakpoints (for this cpu) plus the number of per cpu breakpoint
269 * (for this cpu) doesn't cover every registers.
276 * -> This is roughly the same, except we check the number of per cpu
277 * bp for every cpu and we keep the max one. Same for the per tasks
283 * - If attached to a single cpu, check:
285 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
286 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
510 int cpu;
517 for_each_online_cpu(cpu) {
518 pevent = per_cpu_ptr(cpu_events, cpu);
519 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
533 for_each_online_cpu(cpu) {
534 pevent = per_cpu_ptr(cpu_events, cpu);
548 * @cpu_events: the per cpu set of events to unregister
552 int cpu;
555 for_each_possible_cpu(cpu) {
556 pevent = per_cpu_ptr(cpu_events, cpu);
572 int cpu, err_cpu;
578 for_each_possible_cpu(cpu) {
580 task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
594 if (err_cpu == cpu)
597 kfree(per_cpu(nr_task_bp_pinned[i], cpu));