Lines Matching defs:pb

12847 	struct perf_buffer *pb;
12871 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
12877 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
12887 void perf_buffer__free(struct perf_buffer *pb)
12891 if (IS_ERR_OR_NULL(pb))
12893 if (pb->cpu_bufs) {
12894 for (i = 0; i < pb->cpu_cnt; i++) {
12895 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12900 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
12901 perf_buffer__free_cpu_buf(pb, cpu_buf);
12903 free(pb->cpu_bufs);
12905 if (pb->epoll_fd >= 0)
12906 close(pb->epoll_fd);
12907 free(pb->events);
12908 free(pb);
12912 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
12923 cpu_buf->pb = pb;
12936 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
12957 perf_buffer__free_cpu_buf(pb, cpu_buf);
13027 struct perf_buffer *pb;
13062 pb = calloc(1, sizeof(*pb));
13063 if (!pb)
13066 pb->event_cb = p->event_cb;
13067 pb->sample_cb = p->sample_cb;
13068 pb->lost_cb = p->lost_cb;
13069 pb->ctx = p->ctx;
13071 pb->page_size = getpagesize();
13072 pb->mmap_size = pb->page_size * page_cnt;
13073 pb->map_fd = map_fd;
13075 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13076 if (pb->epoll_fd < 0) {
13084 pb->cpu_cnt = p->cpu_cnt;
13086 pb->cpu_cnt = libbpf_num_possible_cpus();
13087 if (pb->cpu_cnt < 0) {
13088 err = pb->cpu_cnt;
13091 if (map.max_entries && map.max_entries < pb->cpu_cnt)
13092 pb->cpu_cnt = map.max_entries;
13095 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13096 if (!pb->events) {
13101 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13102 if (!pb->cpu_bufs) {
13114 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13127 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13133 pb->cpu_bufs[j] = cpu_buf;
13135 err = bpf_map_update_elem(pb->map_fd, &map_key,
13145 pb->events[j].events = EPOLLIN;
13146 pb->events[j].data.ptr = cpu_buf;
13147 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13148 &pb->events[j]) < 0) {
13157 pb->cpu_cnt = j;
13160 return pb;
13164 if (pb)
13165 perf_buffer__free(pb);
13186 struct perf_buffer *pb = cpu_buf->pb;
13190 if (pb->event_cb)
13191 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13197 if (pb->sample_cb)
13198 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13204 if (pb->lost_cb)
13205 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13215 static int perf_buffer__process_records(struct perf_buffer *pb,
13220 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13221 pb->page_size, &cpu_buf->buf,
13229 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13231 return pb->epoll_fd;
13234 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13238 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13243 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13245 err = perf_buffer__process_records(pb, cpu_buf);
13257 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13259 return pb->cpu_cnt;
13267 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13271 if (buf_idx >= pb->cpu_cnt)
13274 cpu_buf = pb->cpu_bufs[buf_idx];
13281 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13285 if (buf_idx >= pb->cpu_cnt)
13288 cpu_buf = pb->cpu_bufs[buf_idx];
13293 *buf_size = pb->mmap_size;
13305 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13309 if (buf_idx >= pb->cpu_cnt)
13312 cpu_buf = pb->cpu_bufs[buf_idx];
13316 return perf_buffer__process_records(pb, cpu_buf);
13319 int perf_buffer__consume(struct perf_buffer *pb)
13323 for (i = 0; i < pb->cpu_cnt; i++) {
13324 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13329 err = perf_buffer__process_records(pb, cpu_buf);