Searched refs:cgroups (Results 1 - 25 of 25) sorted by relevance

/linux-master/tools/cgroup/
H A Dmemcg_shrinker.py11 cgroups = {}
17 cgroups[ino] = path
20 return cgroups
44 cgroups = scan_cgroups("/sys/fs/cgroup/")
58 cg = cgroups[ino]
/linux-master/tools/testing/selftests/bpf/prog_tests/
H A Dcgroup_hierarchical_stats.c12 * propagates the changes to the ancestor cgroups.
18 * processes in the leaf cgroups and makes sure all the counters are aggregated
54 } cgroups[] = { variable in typeref:struct:__anon3580
64 #define N_CGROUPS ARRAY_SIZE(cgroups)
119 /* sets up cgroups, returns 0 on success. */
133 fd = create_and_get_cgroup(cgroups[i].path);
137 cgroups[i].fd = fd;
138 cgroups[i].id = get_cgroup_id(cgroups[i].path);
147 close(cgroups[
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dpercpu_alloc_cgrp_local_storage.c30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
H A Dcgrp_ls_sleepable.c86 /* task->cgroups is untrusted in sleepable prog outside of RCU CS */
87 __no_rcu_lock(task->cgroups->dfl_cgrp);
119 cgrp = task->cgroups->dfl_cgrp;
H A Dcgrp_ls_recursion.c59 __on_update(task->cgroups->dfl_cgrp);
92 __on_enter(regs, id, task->cgroups->dfl_cgrp);
H A Dcgrp_ls_tp_btf.c86 __on_enter(regs, id, task->cgroups->dfl_cgrp);
124 __on_exit(regs, id, task->cgroups->dfl_cgrp);
H A Drcu_read_lock.c33 struct css_set *cgroups; local
41 cgroups = task->cgroups;
42 if (!cgroups)
44 cgroup_id = cgroups->dfl_cgrp->kn->id;
H A Dprofiler.inc.h258 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
269 BPF_CORE_READ(task, cgroups, subsys[i]);
630 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
/linux-master/tools/perf/util/
H A Dcgroup.c259 /* collect given cgroups only */
299 /* collect all cgroups first and then match with the pattern */
315 /* collect all cgroups in the cgroup_list */
323 /* allow empty cgroups, i.e., skip */
370 fprintf(stderr, "must define events before cgroups\n");
378 /* allow empty cgroups, i.e., skip */
389 /* nr_cgroups is increased een for empty cgroups */
428 fprintf(stderr, "must define events before cgroups\n");
567 down_write(&env->cgroups.lock);
568 cgrp = __cgroup__findnew(&env->cgroups
[all...]
H A Dcgroup.h20 extern int nr_cgroups; /* number of explicit cgroups defined */
31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
54 /* read all cgroups in the system and save them in the rbtree */
H A Dlock-contention.h141 struct rb_root cgroups; member in struct:lock_contention
H A Dbpf_lock_contention.c173 read_all_cgroups(&con->cgroups);
367 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
528 while (!RB_EMPTY_ROOT(&con->cgroups)) {
529 struct rb_node *node = rb_first(&con->cgroups);
532 rb_erase(node, &con->cgroups);
H A Denv.h125 } cgroups; member in struct:perf_env
/linux-master/include/linux/
H A Dpsi.h63 rcu_assign_pointer(p->cgroups, to);
H A Dcgroup.h391 rcu_dereference_check((task)->cgroups, \
398 rcu_dereference((task)->cgroups)
H A Dsched.h1234 struct css_set __rcu *cgroups; member in struct:task_struct
/linux-master/tools/perf/util/bpf_skel/vmlinux/
H A Dvmlinux.h106 struct css_set *cgroups; member in struct:task_struct
/linux-master/tools/perf/util/bpf_skel/
H A Doff_cpu.bpf.c125 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id);
136 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup);
H A Dbperf_cgroup.bpf.c97 cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
H A Dlock_contention.bpf.c166 cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
/linux-master/tools/testing/selftests/mm/
H A Dcharge_reserved_hugetlb.sh506 echo Test normal case, multiple cgroups.
552 echo Test normal case with write, multiple cgroups.
/linux-master/kernel/sched/
H A Dpsi.c926 * Set TSK_ONCPU on @next's cgroups. If @next shares any
1144 * from the outside, so we move cgroups from inside sched/.
1146 rcu_assign_pointer(task->cgroups, to);
1168 * task->cgroups = to
1182 rcu_assign_pointer(task->cgroups, to);
/linux-master/tools/perf/
H A Dbuiltin-lock.c2098 .cgroups = RB_ROOT,
2638 "Filter specific cgroups", parse_cgroup_filter),
/linux-master/kernel/cgroup/
H A Dcgroup.c84 * css_set_lock protects task->cgroups pointer, the list of css_set
194 * cgroups with bigger numbers are newer than those with smaller numbers.
447 /* threaded cgroups can only have threaded controllers */
467 /* threaded cgroups can only have threaded controllers */
725 * reference-counted, to improve performance when child cgroups
828 * populated counters of all associated cgroups accordingly.
907 * account cgroups in empty hierarchies.
982 * associated with the same set of cgroups but different csses.
1000 * different cgroups in hierarchies. As different cgroups ma
[all...]
/linux-master/kernel/bpf/
H A Dverifier.c6338 struct css_set __rcu *cgroups; local
6529 * 'cgroups' pointer is untrusted if task->cgroups dereference

Completed in 326 milliseconds