Lines Matching defs:tidq

463 	struct cs_etm_traceid_queue *tidq;
468 tidq = etmq->traceid_queues[idx];
469 cs_etm__clear_packet_queue(&tidq->packet_queue);
474 struct cs_etm_traceid_queue *tidq,
481 cs_etm__clear_packet_queue(&tidq->packet_queue);
484 tidq->trace_chan_id = trace_chan_id;
485 tidq->el = tidq->prev_packet_el = ocsd_EL_unknown;
486 tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
488 tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host);
490 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
491 if (!tidq->packet)
494 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
495 if (!tidq->prev_packet)
503 tidq->last_branch = zalloc(sz);
504 if (!tidq->last_branch)
506 tidq->last_branch_rb = zalloc(sz);
507 if (!tidq->last_branch_rb)
511 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
512 if (!tidq->event_buf)
518 zfree(&tidq->last_branch_rb);
519 zfree(&tidq->last_branch);
520 zfree(&tidq->prev_packet);
521 zfree(&tidq->packet);
532 struct cs_etm_traceid_queue *tidq, **traceid_queues;
551 tidq = malloc(sizeof(*tidq));
552 if (!tidq)
555 memset(tidq, 0, sizeof(*tidq));
567 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
583 traceid_queues[idx] = tidq;
594 free(tidq);
602 struct cs_etm_traceid_queue *tidq;
604 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
605 if (tidq)
606 return &tidq->packet_queue;
612 struct cs_etm_traceid_queue *tidq)
630 tmp = tidq->packet;
631 tidq->packet = tidq->prev_packet;
632 tidq->prev_packet = tmp;
633 tidq->prev_packet_el = tidq->el;
634 thread__put(tidq->prev_packet_thread);
635 tidq->prev_packet_thread = thread__get(tidq->thread);
815 struct cs_etm_traceid_queue *tidq;
823 tidq = etmq->traceid_queues[idx];
824 thread__zput(tidq->thread);
825 thread__zput(tidq->prev_packet_thread);
826 zfree(&tidq->event_buf);
827 zfree(&tidq->last_branch);
828 zfree(&tidq->last_branch_rb);
829 zfree(&tidq->prev_packet);
830 zfree(&tidq->packet);
831 zfree(&tidq);
975 struct cs_etm_traceid_queue *tidq;
982 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
983 if (!tidq)
997 assert(tidq->el == ocsd_EL1 || tidq->el == ocsd_EL0);
999 assert(tidq->el == ocsd_EL2);
1001 assert(tidq->el == ocsd_EL3);
1004 cpumode = cs_etm__cpu_mode(etmq, address, tidq->el);
1006 if (!thread__find_map(tidq->thread, cpumode, address, &al))
1021 len = dso__data_read_offset(dso, maps__machine(thread__maps(tidq->thread)),
1201 struct cs_etm_traceid_queue *tidq)
1203 struct branch_stack *bs_src = tidq->last_branch_rb;
1204 struct branch_stack *bs_dst = tidq->last_branch;
1224 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
1226 &bs_src->entries[tidq->last_branch_pos],
1239 sizeof(struct branch_entry) * tidq->last_branch_pos);
1244 void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
1246 tidq->last_branch_pos = 0;
1247 tidq->last_branch_rb->nr = 0;
1305 struct cs_etm_traceid_queue *tidq)
1307 struct branch_stack *bs = tidq->last_branch_rb;
1316 if (!tidq->last_branch_pos)
1317 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1319 tidq->last_branch_pos -= 1;
1321 be = &bs->entries[tidq->last_branch_pos];
1322 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1323 be->to = cs_etm__first_executed_instr(tidq->packet);
1387 struct cs_etm_traceid_queue *tidq, pid_t tid,
1393 thread__zput(tidq->thread);
1394 tidq->thread = machine__find_thread(machine, -1, tid);
1398 if (!tidq->thread)
1399 tidq->thread = machine__idle_thread(machine);
1401 tidq->el = el;
1407 struct cs_etm_traceid_queue *tidq;
1409 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1410 if (!tidq)
1413 cs_etm__set_thread(etmq, tidq, tid, el);
1462 struct cs_etm_traceid_queue *tidq)
1465 struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue;
1474 struct cs_etm_traceid_queue *tidq,
1479 union perf_event *event = tidq->event_buf;
1483 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr, tidq->el);
1487 sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1490 sample.pid = thread__pid(tidq->thread);
1491 sample.tid = thread__tid(tidq->thread);
1495 sample.cpu = tidq->packet->cpu;
1496 sample.flags = tidq->prev_packet->flags;
1499 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1502 sample.branch_stack = tidq->last_branch;
1526 struct cs_etm_traceid_queue *tidq)
1531 union perf_event *event = tidq->event_buf;
1539 ip = cs_etm__last_executed_instr(tidq->prev_packet);
1543 tidq->prev_packet_el);
1547 sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1550 sample.pid = thread__pid(tidq->prev_packet_thread);
1551 sample.tid = thread__tid(tidq->prev_packet_thread);
1552 sample.addr = cs_etm__first_executed_instr(tidq->packet);
1556 sample.cpu = tidq->packet->cpu;
1557 sample.flags = tidq->prev_packet->flags;
1560 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1710 struct cs_etm_traceid_queue *tidq)
1714 u8 trace_chan_id = tidq->trace_chan_id;
1718 instrs_prev = tidq->period_instructions;
1720 tidq->period_instructions += tidq->packet->instr_count;
1727 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1728 tidq->prev_packet->last_instr_taken_branch)
1729 cs_etm__update_last_branch_rb(etmq, tidq);
1732 tidq->period_instructions >= etm->instructions_sample_period) {
1755 * tidq->packet->instr_count
1763 * tidq->packet->instr_count represents the number of
1777 * to tidq->period_instructions for next round calculation.
1790 cs_etm__copy_last_branch_rb(etmq, tidq);
1792 while (tidq->period_instructions >=
1801 tidq->packet, offset - 1);
1803 etmq, tidq, addr,
1809 tidq->period_instructions -=
1818 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1822 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1823 tidq->prev_packet->last_instr_taken_branch)
1827 ret = cs_etm__synth_branch_sample(etmq, tidq);
1833 cs_etm__packet_swap(etm, tidq);
1838 static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1851 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1852 tidq->prev_packet->last_instr_taken_branch = true;
1858 struct cs_etm_traceid_queue *tidq)
1864 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1869 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1873 cs_etm__copy_last_branch_rb(etmq, tidq);
1882 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1885 etmq, tidq, addr,
1886 tidq->period_instructions);
1890 tidq->period_instructions = 0;
1895 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1896 err = cs_etm__synth_branch_sample(etmq, tidq);
1902 cs_etm__packet_swap(etm, tidq);
1906 cs_etm__reset_last_branch_rb(tidq);
1912 struct cs_etm_traceid_queue *tidq)
1927 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1931 cs_etm__copy_last_branch_rb(etmq, tidq);
1937 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1940 etmq, tidq, addr,
1941 tidq->period_instructions);
1945 tidq->period_instructions = 0;
2049 struct cs_etm_traceid_queue *tidq, u64 magic)
2051 u8 trace_chan_id = tidq->trace_chan_id;
2052 struct cs_etm_packet *packet = tidq->packet;
2053 struct cs_etm_packet *prev_packet = tidq->prev_packet;
2074 static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
2077 struct cs_etm_packet *packet = tidq->packet;
2101 struct cs_etm_traceid_queue *tidq,
2104 u8 trace_chan_id = tidq->trace_chan_id;
2105 struct cs_etm_packet *packet = tidq->packet;
2106 struct cs_etm_packet *prev_packet = tidq->prev_packet;
2150 struct cs_etm_traceid_queue *tidq)
2152 struct cs_etm_packet *packet = tidq->packet;
2153 struct cs_etm_packet *prev_packet = tidq->prev_packet;
2154 u8 trace_chan_id = tidq->trace_chan_id;
2257 if (cs_etm__is_syscall(etmq, tidq, magic))
2265 else if (cs_etm__is_async_exception(tidq, magic))
2274 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
2357 struct cs_etm_traceid_queue *tidq)
2362 packet_queue = &tidq->packet_queue;
2367 tidq->packet);
2382 ret = cs_etm__set_sample_flags(etmq, tidq);
2386 switch (tidq->packet->sample_type) {
2393 cs_etm__sample(etmq, tidq);
2402 cs_etm__exception(tidq);
2409 cs_etm__flush(etmq, tidq);
2430 struct cs_etm_traceid_queue *tidq;
2435 tidq = etmq->traceid_queues[idx];
2438 cs_etm__process_traceid_queue(etmq, tidq);
2444 cs_etm__flush(etmq, tidq);
2451 struct cs_etm_traceid_queue *tidq;
2453 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2454 if (!tidq)
2474 err = cs_etm__process_traceid_queue(etmq, tidq);
2480 err = cs_etm__end_block(etmq, tidq);
2489 struct cs_etm_traceid_queue *tidq;
2513 tidq = etmq->traceid_queues[idx];
2514 cs_etm__process_traceid_queue(etmq, tidq);
2520 tidq = etmq->traceid_queues[idx];
2522 err = cs_etm__end_block(etmq, tidq);
2540 struct cs_etm_traceid_queue *tidq;
2546 tidq = cs_etm__etmq_get_traceid_queue(
2549 if (!tidq)
2552 if (tid == -1 || thread__tid(tidq->thread) == tid)
2569 struct cs_etm_traceid_queue *tidq;
2602 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2603 if (!tidq) {
2617 ret = cs_etm__process_traceid_queue(etmq, tidq);