• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/block/

Lines Matching defs:blkg

91  * This should be called with the blkg->stats_lock held.
109 * This should be called with the blkg->stats_lock held.
130 /* This should be called with the blkg->stats_lock held. */
131 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
134 if (blkio_blkg_waiting(&blkg->stats))
136 if (blkg == curr_blkg)
138 blkg->stats.start_group_wait_time = sched_clock();
139 blkio_mark_blkg_waiting(&blkg->stats);
142 /* This should be called with the blkg->stats_lock held. */
156 /* This should be called with the blkg->stats_lock held. */
170 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
174 spin_lock_irqsave(&blkg->stats_lock, flags);
175 BUG_ON(blkio_blkg_idling(&blkg->stats));
176 blkg->stats.start_idle_time = sched_clock();
177 blkio_mark_blkg_idling(&blkg->stats);
178 spin_unlock_irqrestore(&blkg->stats_lock, flags);
182 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
188 spin_lock_irqsave(&blkg->stats_lock, flags);
189 stats = &blkg->stats;
196 spin_unlock_irqrestore(&blkg->stats_lock, flags);
200 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
205 spin_lock_irqsave(&blkg->stats_lock, flags);
206 stats = &blkg->stats;
212 spin_unlock_irqrestore(&blkg->stats_lock, flags);
216 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
221 spin_lock_irqsave(&blkg->stats_lock, flags);
222 stats = &blkg->stats;
226 spin_unlock_irqrestore(&blkg->stats_lock, flags);
236 spin_unlock_irqrestore(&blkg->stats_lock, flags);
242 spin_unlock_irqrestore(&blkg->stats_lock, flags);
246 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
249 blkg->stats.dequeue += dequeue;
253 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
258 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
264 spin_lock_irqsave(&blkg->stats_lock, flags);
265 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
267 blkio_end_empty_time(&blkg->stats);
268 blkio_set_start_group_wait_time(blkg, curr_blkg);
269 spin_unlock_irqrestore(&blkg->stats_lock, flags);
273 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
278 spin_lock_irqsave(&blkg->stats_lock, flags);
279 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
281 spin_unlock_irqrestore(&blkg->stats_lock, flags);
285 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
289 spin_lock_irqsave(&blkg->stats_lock, flags);
290 blkg->stats.time += time;
291 spin_unlock_irqrestore(&blkg->stats_lock, flags);
295 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
301 spin_lock_irqsave(&blkg->stats_lock, flags);
302 stats = &blkg->stats;
308 spin_unlock_irqrestore(&blkg->stats_lock, flags);
312 void blkiocg_update_completion_stats(struct blkio_group *blkg,
319 spin_lock_irqsave(&blkg->stats_lock, flags);
320 stats = &blkg->stats;
327 spin_unlock_irqrestore(&blkg->stats_lock, flags);
331 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
336 spin_lock_irqsave(&blkg->stats_lock, flags);
337 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
339 spin_unlock_irqrestore(&blkg->stats_lock, flags);
344 struct blkio_group *blkg, void *key, dev_t dev)
349 spin_lock_init(&blkg->stats_lock);
350 rcu_assign_pointer(blkg->key, key);
351 blkg->blkcg_id = css_id(&blkcg->css);
352 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
355 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
356 blkg->dev = dev;
360 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
362 hlist_del_init_rcu(&blkg->blkcg_node);
363 blkg->blkcg_id = 0;
370 int blkiocg_del_blkio_group(struct blkio_group *blkg)
378 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
382 if (!hlist_unhashed(&blkg->blkcg_node)) {
383 __blkiocg_del_blkio_group(blkg);
397 struct blkio_group *blkg;
401 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
402 __key = blkg->key;
404 return blkg;
428 struct blkio_group *blkg;
441 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
442 pn = blkio_policy_search_node(blkcg, blkg->dev);
448 blkiop->ops.blkio_update_group_weight_fn(blkg,
460 struct blkio_group *blkg;
472 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
473 spin_lock(&blkg->stats_lock);
474 stats = &blkg->stats;
499 spin_unlock(&blkg->stats_lock);
546 /* This should be called with blkg->stats_lock held */
547 static uint64_t blkio_get_stat(struct blkio_group *blkg,
556 blkg->stats.time, cb, dev);
559 blkg->stats.sectors, cb, dev);
562 uint64_t sum = blkg->stats.avg_queue_size_sum;
563 uint64_t samples = blkg->stats.avg_queue_size_samples;
572 blkg->stats.group_wait_time, cb, dev);
575 blkg->stats.idle_time, cb, dev);
578 blkg->stats.empty_time, cb, dev);
581 blkg->stats.dequeue, cb, dev);
587 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
589 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
590 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
601 struct blkio_group *blkg; \
610 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
611 if (blkg->dev) { \
612 spin_lock_irq(&blkg->stats_lock); \
613 cgroup_total += blkio_get_stat(blkg, cb, \
614 blkg->dev, type); \
615 spin_unlock_irq(&blkg->stats_lock); \
739 struct blkio_group *blkg;
787 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
788 if (newpn->dev == blkg->dev) {
790 blkiop->ops.blkio_update_group_weight_fn(blkg,
911 struct blkio_group *blkg;
925 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
927 key = rcu_dereference(blkg->key);
928 __blkiocg_del_blkio_group(blkg);
941 blkiop->ops.blkio_unlink_group_fn(key, blkg);