• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/trace/

Lines Matching defs:cpu_buffer

488 	struct ring_buffer_per_cpu	*cpu_buffer;
639 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
669 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
682 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
686 head = cpu_buffer->head_page;
693 rb_set_list_to_head(cpu_buffer, head->list.prev);
707 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
712 rb_list_head_clear(cpu_buffer->pages);
714 list_for_each(hd, cpu_buffer->pages)
718 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
741 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
746 return rb_head_page_set(cpu_buffer, head, prev,
750 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
755 return rb_head_page_set(cpu_buffer, head, prev,
759 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
764 return rb_head_page_set(cpu_buffer, head, prev,
768 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
777 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
784 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
788 list = cpu_buffer->pages;
789 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
792 page = head = cpu_buffer->head_page;
801 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
802 cpu_buffer->head_page = page;
805 rb_inc_page(cpu_buffer, &page);
809 RB_WARN_ON(cpu_buffer, 1);
834 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
866 if (tail_page == cpu_buffer->tail_page) {
891 old_tail = cmpxchg(&cpu_buffer->tail_page,
901 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
906 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
915 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
918 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
920 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
927 * @cpu_buffer: CPU buffer with pages to test
932 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
934 struct list_head *head = cpu_buffer->pages;
937 rb_head_page_deactivate(cpu_buffer);
939 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
941 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
944 if (rb_check_list(cpu_buffer, head))
948 if (RB_WARN_ON(cpu_buffer,
951 if (RB_WARN_ON(cpu_buffer,
954 if (rb_check_list(cpu_buffer, &bpage->list))
958 rb_head_page_activate(cpu_buffer);
963 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
975 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
979 rb_check_bpage(cpu_buffer, bpage);
995 cpu_buffer->pages = pages.next;
998 rb_check_pages(cpu_buffer);
1013 struct ring_buffer_per_cpu *cpu_buffer;
1018 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1020 if (!cpu_buffer)
1023 cpu_buffer->cpu = cpu;
1024 cpu_buffer->buffer = buffer;
1025 spin_lock_init(&cpu_buffer->reader_lock);
1026 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1027 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1034 rb_check_bpage(cpu_buffer, bpage);
1036 cpu_buffer->reader_page = bpage;
1043 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1045 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1049 cpu_buffer->head_page
1050 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1051 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1053 rb_head_page_activate(cpu_buffer);
1055 return cpu_buffer;
1058 free_buffer_page(cpu_buffer->reader_page);
1061 kfree(cpu_buffer);
1065 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1067 struct list_head *head = cpu_buffer->pages;
1070 free_buffer_page(cpu_buffer->reader_page);
1072 rb_head_page_deactivate(cpu_buffer);
1083 kfree(cpu_buffer);
1213 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1216 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1222 spin_lock_irq(&cpu_buffer->reader_lock);
1223 rb_head_page_deactivate(cpu_buffer);
1226 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1228 p = cpu_buffer->pages->next;
1233 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1236 rb_reset_cpu(cpu_buffer);
1237 rb_check_pages(cpu_buffer);
1240 spin_unlock_irq(&cpu_buffer->reader_lock);
1244 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1251 spin_lock_irq(&cpu_buffer->reader_lock);
1252 rb_head_page_deactivate(cpu_buffer);
1255 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1260 list_add_tail(&bpage->list, cpu_buffer->pages);
1262 rb_reset_cpu(cpu_buffer);
1263 rb_check_pages(cpu_buffer);
1266 spin_unlock_irq(&cpu_buffer->reader_lock);
1280 struct ring_buffer_per_cpu *cpu_buffer;
1324 cpu_buffer = buffer->buffers[cpu];
1325 rb_remove_pages(cpu_buffer, rm_pages);
1360 cpu_buffer = buffer->buffers[cpu];
1361 rb_insert_pages(cpu_buffer, &pages, new_pages);
1410 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1412 return __rb_page_index(cpu_buffer->reader_page,
1413 cpu_buffer->reader_page->read);
1444 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1446 return rb_page_commit(cpu_buffer->commit_page);
1458 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1467 return cpu_buffer->commit_page->page == (void *)addr &&
1468 rb_commit_index(cpu_buffer) == index;
1472 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1485 max_count = cpu_buffer->buffer->pages * 100;
1487 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1488 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1490 if (RB_WARN_ON(cpu_buffer,
1491 rb_is_reader_page(cpu_buffer->tail_page)))
1493 local_set(&cpu_buffer->commit_page->page->commit,
1494 rb_page_write(cpu_buffer->commit_page));
1495 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1496 cpu_buffer->write_stamp =
1497 cpu_buffer->commit_page->page->time_stamp;
1501 while (rb_commit_index(cpu_buffer) !=
1502 rb_page_write(cpu_buffer->commit_page)) {
1504 local_set(&cpu_buffer->commit_page->page->commit,
1505 rb_page_write(cpu_buffer->commit_page));
1506 RB_WARN_ON(cpu_buffer,
1507 local_read(&cpu_buffer->commit_page->page->commit) &
1520 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1524 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1526 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1527 cpu_buffer->reader_page->read = 0;
1532 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1540 if (iter->head_page == cpu_buffer->reader_page)
1541 iter->head_page = rb_set_head_page(cpu_buffer);
1543 rb_inc_page(cpu_buffer, &iter->head_page);
1593 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1609 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1630 local_add(entries, &cpu_buffer->overrun);
1661 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1680 rb_inc_page(cpu_buffer, &new_head);
1682 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1699 RB_WARN_ON(cpu_buffer, 1);
1718 if (cpu_buffer->tail_page != tail_page &&
1719 cpu_buffer->tail_page != next_page)
1720 rb_head_page_set_normal(cpu_buffer, new_head,
1731 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1734 if (RB_WARN_ON(cpu_buffer,
1760 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1827 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1831 struct buffer_page *commit_page = cpu_buffer->commit_page;
1832 struct ring_buffer *buffer = cpu_buffer->buffer;
1838 rb_inc_page(cpu_buffer, &next_page);
1846 local_inc(&cpu_buffer->commit_overrun);
1864 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1870 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1878 ret = rb_handle_head_page(cpu_buffer,
1896 if (unlikely((cpu_buffer->commit_page !=
1897 cpu_buffer->tail_page) &&
1898 (cpu_buffer->commit_page ==
1899 cpu_buffer->reader_page))) {
1900 local_inc(&cpu_buffer->commit_overrun);
1906 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1918 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1925 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1931 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1938 tail_page = cpu_buffer->tail_page;
1947 return rb_move_tail(cpu_buffer, length, tail,
1971 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1984 bpage = cpu_buffer->tail_page;
2007 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2017 (unsigned long long)cpu_buffer->write_stamp);
2023 event = __rb_reserve_next(cpu_buffer,
2034 if (rb_event_is_commit(cpu_buffer, event)) {
2045 if (!rb_try_to_discard(cpu_buffer, event)) {
2051 cpu_buffer->write_stamp = *ts;
2056 if (!rb_try_to_discard(cpu_buffer, event)) {
2069 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2071 local_inc(&cpu_buffer->committing);
2072 local_inc(&cpu_buffer->commits);
2075 static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2079 if (RB_WARN_ON(cpu_buffer,
2080 !local_read(&cpu_buffer->committing)))
2084 commits = local_read(&cpu_buffer->commits);
2087 if (local_read(&cpu_buffer->committing) == 1)
2088 rb_set_commit_to_write(cpu_buffer);
2090 local_dec(&cpu_buffer->committing);
2100 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2101 !local_read(&cpu_buffer->committing)) {
2102 local_inc(&cpu_buffer->committing);
2109 struct ring_buffer_per_cpu *cpu_buffer,
2117 rb_start_commit(cpu_buffer);
2127 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2128 local_dec(&cpu_buffer->committing);
2129 local_dec(&cpu_buffer->commits);
2145 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2148 ts = rb_time_stamp(cpu_buffer->buffer);
2158 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2159 rb_page_write(cpu_buffer->tail_page) ==
2160 rb_commit_index(cpu_buffer))) {
2163 diff = ts - cpu_buffer->write_stamp;
2169 if (unlikely(ts < cpu_buffer->write_stamp))
2175 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
2182 RB_WARN_ON(cpu_buffer, commit < 0);
2187 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
2194 if (!rb_event_is_commit(cpu_buffer, event))
2202 rb_end_commit(cpu_buffer);
2263 struct ring_buffer_per_cpu *cpu_buffer;
2284 cpu_buffer = buffer->buffers[cpu];
2286 if (atomic_read(&cpu_buffer->record_disabled))
2292 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2308 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2315 if (rb_event_is_commit(cpu_buffer, event))
2316 cpu_buffer->write_stamp += event->time_delta;
2319 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2322 local_inc(&cpu_buffer->entries);
2323 rb_update_write_stamp(cpu_buffer, event);
2324 rb_end_commit(cpu_buffer);
2339 struct ring_buffer_per_cpu *cpu_buffer;
2342 cpu_buffer = buffer->buffers[cpu];
2344 rb_commit(cpu_buffer, event);
2371 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2375 struct buffer_page *bpage = cpu_buffer->commit_page;
2390 rb_inc_page(cpu_buffer, &bpage);
2397 rb_inc_page(cpu_buffer, &bpage);
2401 RB_WARN_ON(cpu_buffer, 1);
2426 struct ring_buffer_per_cpu *cpu_buffer;
2433 cpu_buffer = buffer->buffers[cpu];
2440 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2442 rb_decrement_entry(cpu_buffer, event);
2443 if (rb_try_to_discard(cpu_buffer, event))
2450 rb_update_write_stamp(cpu_buffer, event);
2452 rb_end_commit(cpu_buffer);
2478 struct ring_buffer_per_cpu *cpu_buffer;
2497 cpu_buffer = buffer->buffers[cpu];
2499 if (atomic_read(&cpu_buffer->record_disabled))
2505 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2513 rb_commit(cpu_buffer, event);
2523 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2525 struct buffer_page *reader = cpu_buffer->reader_page;
2526 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2527 struct buffer_page *commit = cpu_buffer->commit_page;
2568 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2579 struct ring_buffer_per_cpu *cpu_buffer;
2584 cpu_buffer = buffer->buffers[cpu];
2585 atomic_inc(&cpu_buffer->record_disabled);
2599 struct ring_buffer_per_cpu *cpu_buffer;
2604 cpu_buffer = buffer->buffers[cpu];
2605 atomic_dec(&cpu_buffer->record_disabled);
2616 struct ring_buffer_per_cpu *cpu_buffer;
2622 cpu_buffer = buffer->buffers[cpu];
2623 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
2624 - cpu_buffer->read;
2631 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2637 struct ring_buffer_per_cpu *cpu_buffer;
2643 cpu_buffer = buffer->buffers[cpu];
2644 ret = local_read(&cpu_buffer->overrun);
2658 struct ring_buffer_per_cpu *cpu_buffer;
2664 cpu_buffer = buffer->buffers[cpu];
2665 ret = local_read(&cpu_buffer->commit_overrun);
2680 struct ring_buffer_per_cpu *cpu_buffer;
2686 cpu_buffer = buffer->buffers[cpu];
2687 entries += (local_read(&cpu_buffer->entries) -
2688 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
2704 struct ring_buffer_per_cpu *cpu_buffer;
2710 cpu_buffer = buffer->buffers[cpu];
2711 overruns += local_read(&cpu_buffer->overrun);
2720 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2723 if (list_empty(&cpu_buffer->reader_page->list)) {
2724 iter->head_page = rb_set_head_page(cpu_buffer);
2729 iter->head_page = cpu_buffer->reader_page;
2730 iter->head = cpu_buffer->reader_page->read;
2733 iter->read_stamp = cpu_buffer->read_stamp;
2736 iter->cache_reader_page = cpu_buffer->reader_page;
2737 iter->cache_read = cpu_buffer->read;
2749 struct ring_buffer_per_cpu *cpu_buffer;
2755 cpu_buffer = iter->cpu_buffer;
2757 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2759 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2769 struct ring_buffer_per_cpu *cpu_buffer;
2771 cpu_buffer = iter->cpu_buffer;
2773 return iter->head_page == cpu_buffer->commit_page &&
2774 iter->head == rb_commit_index(cpu_buffer);
2779 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2792 cpu_buffer->read_stamp += delta;
2799 cpu_buffer->read_stamp += event->time_delta;
2839 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2848 arch_spin_lock(&cpu_buffer->lock);
2857 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2862 reader = cpu_buffer->reader_page;
2865 if (cpu_buffer->reader_page->read < rb_page_size(reader))
2869 if (RB_WARN_ON(cpu_buffer,
2870 cpu_buffer->reader_page->read > rb_page_size(reader)))
2875 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2881 local_set(&cpu_buffer->reader_page->write, 0);
2882 local_set(&cpu_buffer->reader_page->entries, 0);
2883 local_set(&cpu_buffer->reader_page->page->commit, 0);
2884 cpu_buffer->reader_page->real_end = 0;
2890 reader = rb_set_head_page(cpu_buffer);
2891 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2892 cpu_buffer->reader_page->list.prev = reader->list.prev;
2895 * cpu_buffer->pages just needs to point to the buffer, it
2899 cpu_buffer->pages = reader->list.prev;
2902 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2914 overwrite = local_read(&(cpu_buffer->overrun));
2927 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2940 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2941 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2944 cpu_buffer->reader_page = reader;
2945 rb_reset_reader_page(cpu_buffer);
2947 if (overwrite != cpu_buffer->last_overrun) {
2948 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
2949 cpu_buffer->last_overrun = overwrite;
2955 arch_spin_unlock(&cpu_buffer->lock);
2961 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2967 reader = rb_get_reader_page(cpu_buffer);
2970 if (RB_WARN_ON(cpu_buffer, !reader))
2973 event = rb_reader_event(cpu_buffer);
2976 cpu_buffer->read++;
2978 rb_update_read_stamp(cpu_buffer, event);
2981 cpu_buffer->reader_page->read += length;
2986 struct ring_buffer_per_cpu *cpu_buffer;
2990 cpu_buffer = iter->cpu_buffer;
2997 if (iter->head_page == cpu_buffer->commit_page)
3011 if (RB_WARN_ON(cpu_buffer,
3012 (iter->head_page == cpu_buffer->commit_page) &&
3013 (iter->head + length > rb_commit_index(cpu_buffer))))
3022 (iter->head_page != cpu_buffer->commit_page))
3026 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3028 return cpu_buffer->lost_events;
3032 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3046 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3049 reader = rb_get_reader_page(cpu_buffer);
3053 event = rb_reader_event(cpu_buffer);
3058 RB_WARN_ON(cpu_buffer, 1);
3071 rb_advance_reader(cpu_buffer);
3075 rb_advance_reader(cpu_buffer);
3080 *ts = cpu_buffer->read_stamp + event->time_delta;
3081 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3082 cpu_buffer->cpu, ts);
3085 *lost_events = rb_lost_events(cpu_buffer);
3100 struct ring_buffer_per_cpu *cpu_buffer;
3104 cpu_buffer = iter->cpu_buffer;
3105 buffer = cpu_buffer->buffer;
3112 if (unlikely(iter->cache_read != cpu_buffer->read ||
3113 iter->cache_reader_page != cpu_buffer->reader_page))
3128 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
3131 if (rb_per_cpu_empty(cpu_buffer))
3163 cpu_buffer->cpu, ts);
3204 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3216 spin_lock(&cpu_buffer->reader_lock);
3217 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3219 rb_advance_reader(cpu_buffer);
3221 spin_unlock(&cpu_buffer->reader_lock);
3241 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3246 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3248 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3271 struct ring_buffer_per_cpu *cpu_buffer;
3285 cpu_buffer = buffer->buffers[cpu];
3288 spin_lock(&cpu_buffer->reader_lock);
3290 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3292 cpu_buffer->lost_events = 0;
3293 rb_advance_reader(cpu_buffer);
3297 spin_unlock(&cpu_buffer->reader_lock);
3333 struct ring_buffer_per_cpu *cpu_buffer;
3343 cpu_buffer = buffer->buffers[cpu];
3345 iter->cpu_buffer = cpu_buffer;
3347 atomic_inc(&cpu_buffer->record_disabled);
3381 struct ring_buffer_per_cpu *cpu_buffer;
3387 cpu_buffer = iter->cpu_buffer;
3389 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3390 arch_spin_lock(&cpu_buffer->lock);
3392 arch_spin_unlock(&cpu_buffer->lock);
3393 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3407 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3409 atomic_dec(&cpu_buffer->record_disabled);
3425 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3428 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3439 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3456 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3458 rb_head_page_deactivate(cpu_buffer);
3460 cpu_buffer->head_page
3461 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3462 local_set(&cpu_buffer->head_page->write, 0);
3463 local_set(&cpu_buffer->head_page->entries, 0);
3464 local_set(&cpu_buffer->head_page->page->commit, 0);
3466 cpu_buffer->head_page->read = 0;
3468 cpu_buffer->tail_page = cpu_buffer->head_page;
3469 cpu_buffer->commit_page = cpu_buffer->head_page;
3471 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3472 local_set(&cpu_buffer->reader_page->write, 0);
3473 local_set(&cpu_buffer->reader_page->entries, 0);
3474 local_set(&cpu_buffer->reader_page->page->commit, 0);
3475 cpu_buffer->reader_page->read = 0;
3477 local_set(&cpu_buffer->commit_overrun, 0);
3478 local_set(&cpu_buffer->overrun, 0);
3479 local_set(&cpu_buffer->entries, 0);
3480 local_set(&cpu_buffer->committing, 0);
3481 local_set(&cpu_buffer->commits, 0);
3482 cpu_buffer->read = 0;
3484 cpu_buffer->write_stamp = 0;
3485 cpu_buffer->read_stamp = 0;
3487 cpu_buffer->lost_events = 0;
3488 cpu_buffer->last_overrun = 0;
3490 rb_head_page_activate(cpu_buffer);
3500 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3506 atomic_inc(&cpu_buffer->record_disabled);
3508 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3510 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3513 arch_spin_lock(&cpu_buffer->lock);
3515 rb_reset_cpu(cpu_buffer);
3517 arch_spin_unlock(&cpu_buffer->lock);
3520 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3522 atomic_dec(&cpu_buffer->record_disabled);
3545 struct ring_buffer_per_cpu *cpu_buffer;
3555 cpu_buffer = buffer->buffers[cpu];
3558 spin_lock(&cpu_buffer->reader_lock);
3559 ret = rb_per_cpu_empty(cpu_buffer);
3561 spin_unlock(&cpu_buffer->reader_lock);
3579 struct ring_buffer_per_cpu *cpu_buffer;
3589 cpu_buffer = buffer->buffers[cpu];
3592 spin_lock(&cpu_buffer->reader_lock);
3593 ret = rb_per_cpu_empty(cpu_buffer);
3595 spin_unlock(&cpu_buffer->reader_lock);
3761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3791 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3793 reader = rb_get_reader_page(cpu_buffer);
3797 event = rb_reader_event(cpu_buffer);
3803 missed_events = cpu_buffer->lost_events;
3813 cpu_buffer->reader_page == cpu_buffer->commit_page) {
3814 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3831 save_timestamp = cpu_buffer->read_stamp;
3839 rb_advance_reader(cpu_buffer);
3846 event = rb_reader_event(cpu_buffer);
3858 cpu_buffer->read += rb_page_entries(reader);
3879 cpu_buffer->lost_events = 0;
3905 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);