• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/block/

Lines Matching refs:blkcg

56 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
59 list_add(&pn->node, &blkcg->policy_list);
62 /* Must be called with blkcg->lock held */
68 /* Must be called with blkcg->lock held */
70 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
74 list_for_each_entry(pn, &blkcg->policy_list, node) {
343 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
348 spin_lock_irqsave(&blkcg->lock, flags);
351 blkg->blkcg_id = css_id(&blkcg->css);
352 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
353 spin_unlock_irqrestore(&blkcg->lock, flags);
355 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
372 struct blkio_cgroup *blkcg;
380 blkcg = container_of(css, struct blkio_cgroup, css);
381 spin_lock_irqsave(&blkcg->lock, flags);
386 spin_unlock_irqrestore(&blkcg->lock, flags);
395 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
401 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
415 struct blkio_cgroup *blkcg; \
417 blkcg = cgroup_to_blkio_cgroup(cgroup); \
418 return (u64)blkcg->__VAR; \
427 struct blkio_cgroup *blkcg;
436 blkcg = cgroup_to_blkio_cgroup(cgroup);
438 spin_lock_irq(&blkcg->lock);
439 blkcg->weight = (unsigned int)val;
441 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
442 pn = blkio_policy_search_node(blkcg, blkg->dev);
449 blkcg->weight);
451 spin_unlock_irq(&blkcg->lock);
459 struct blkio_cgroup *blkcg;
470 blkcg = cgroup_to_blkio_cgroup(cgroup);
471 spin_lock_irq(&blkcg->lock);
472 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
501 spin_unlock_irq(&blkcg->lock);
600 struct blkio_cgroup *blkcg; \
608 blkcg = cgroup_to_blkio_cgroup(cgroup); \
610 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
718 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
723 pn = blkio_policy_search_node(blkcg, dev);
727 return blkcg->weight;
738 struct blkio_cgroup *blkcg;
758 blkcg = cgroup_to_blkio_cgroup(cgrp);
760 spin_lock_irq(&blkcg->lock);
762 pn = blkio_policy_search_node(blkcg, newpn->dev);
765 blkio_policy_insert_node(blkcg, newpn);
768 spin_unlock_irq(&blkcg->lock);
775 spin_unlock_irq(&blkcg->lock);
778 spin_unlock_irq(&blkcg->lock);
785 spin_lock_irq(&blkcg->lock);
787 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
793 blkcg->weight);
797 spin_unlock_irq(&blkcg->lock);
811 struct blkio_cgroup *blkcg;
816 blkcg = cgroup_to_blkio_cgroup(cgrp);
817 if (!list_empty(&blkcg->policy_list)) {
818 spin_lock_irq(&blkcg->lock);
819 list_for_each_entry(pn, &blkcg->policy_list, node) {
823 spin_unlock_irq(&blkcg->lock);
909 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
918 spin_lock_irqsave(&blkcg->lock, flags);
920 if (hlist_empty(&blkcg->blkg_list)) {
921 spin_unlock_irqrestore(&blkcg->lock, flags);
925 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
930 spin_unlock_irqrestore(&blkcg->lock, flags);
945 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
950 free_css_id(&blkio_subsys, &blkcg->css);
952 if (blkcg != &blkio_root_cgroup)
953 kfree(blkcg);
959 struct blkio_cgroup *blkcg;
963 blkcg = &blkio_root_cgroup;
971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
972 if (!blkcg)
975 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
977 spin_lock_init(&blkcg->lock);
978 INIT_HLIST_HEAD(&blkcg->blkg_list);
980 INIT_LIST_HEAD(&blkcg->policy_list);
981 return &blkcg->css;