Lines Matching refs:ipt_dev

137 static zx_status_t x86_pt_free(insntrace_device_t* ipt_dev);
213 static void make_topa(insntrace_device_t* ipt_dev, ipt_per_trace_state_t* per_trace) {
287 static uint32_t compute_topa_entry_count(insntrace_device_t* ipt_dev,
303 static size_t compute_capture_size(insntrace_device_t* ipt_dev,
309 zxlogf(DEBUG1, "IPT: compute_capture_size: trace %tu\n", per_trace - ipt_dev->per_trace_state);
336 static zx_status_t x86_pt_alloc_buffer1(insntrace_device_t* ipt_dev,
352 status = io_buffer_init_aligned(&per_trace->chunks[i], ipt_dev->bti,
377 uint32_t entry_count = compute_topa_entry_count(ipt_dev, per_trace);
398 status = io_buffer_init(&per_trace->topas[i], ipt_dev->bti,
409 make_topa(ipt_dev, per_trace);
414 static void x86_pt_free_buffer1(insntrace_device_t* ipt_dev, ipt_per_trace_state_t* per_trace) {
434 static zx_status_t x86_pt_alloc_buffer(insntrace_device_t* ipt_dev,
506 for (descriptor = 0; descriptor < ipt_dev->num_traces; ++descriptor) {
507 if (!ipt_dev->per_trace_state[descriptor].allocated)
510 if (descriptor == ipt_dev->num_traces)
513 ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[descriptor];
515 zx_status_t status = x86_pt_alloc_buffer1(ipt_dev, per_trace,
518 x86_pt_free_buffer1(ipt_dev, per_trace);
535 static zx_status_t x86_pt_assign_thread_buffer(insntrace_device_t* ipt_dev,
543 static zx_status_t x86_pt_release_thread_buffer(insntrace_device_t* ipt_dev,
551 static zx_status_t x86_pt_free_buffer(insntrace_device_t* ipt_dev,
553 if (ipt_dev->active)
555 if (descriptor >= ipt_dev->num_traces)
557 assert(ipt_dev->per_trace_state);
558 ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[descriptor];
561 x86_pt_free_buffer1(ipt_dev, per_trace);
601 insntrace_device_t* ipt_dev = calloc(1, sizeof(*dev->insntrace));
602 if (!ipt_dev)
605 ipt_dev->num_traces = zx_system_get_num_cpus();
606 ipt_dev->bti = dev->bti;
608 ipt_dev->per_trace_state = calloc(ipt_dev->num_traces, sizeof(ipt_dev->per_trace_state[0]));
609 if (!ipt_dev->per_trace_state) {
610 free(ipt_dev);
619 free(ipt_dev->per_trace_state);
620 free(ipt_dev);
624 ipt_dev->mode = internal_mode;
625 dev->insntrace = ipt_dev;
630 insntrace_device_t* ipt_dev = dev->insntrace;
631 if (ipt_dev->active)
634 for (uint32_t i = 0; i < ipt_dev->num_traces; ++i) {
635 ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[i];
637 x86_pt_free_buffer1(ipt_dev, per_trace);
648 free(ipt_dev->per_trace_state);
649 free(ipt_dev);
654 static zx_status_t ipt_get_trace_config(insntrace_device_t* ipt_dev,
661 switch (ipt_dev->mode) {
676 static zx_status_t ipt_alloc_buffer(insntrace_device_t* ipt_dev,
688 zx_status_t status = x86_pt_alloc_buffer(ipt_dev, &config, &descriptor);
696 static zx_status_t ipt_assign_thread_buffer(insntrace_device_t* ipt_dev,
703 return x86_pt_assign_thread_buffer(ipt_dev, assign.descriptor, assign.thread);
706 static zx_status_t ipt_release_thread_buffer(insntrace_device_t* ipt_dev,
713 return x86_pt_release_thread_buffer(ipt_dev, assign.descriptor, assign.thread);
716 static zx_status_t ipt_get_buffer_config(insntrace_device_t* ipt_dev,
729 if (descriptor >= ipt_dev->num_traces)
731 const ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[descriptor];
748 static zx_status_t ipt_get_buffer_info(insntrace_device_t* ipt_dev,
760 if (ipt_dev->active)
764 if (descriptor >= ipt_dev->num_traces)
766 const ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[descriptor];
771 data.capture_end = compute_capture_size(ipt_dev, per_trace);
777 static zx_status_t ipt_get_chunk_handle(insntrace_device_t* ipt_dev,
790 if (req.descriptor >= ipt_dev->num_traces)
792 const ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[req.descriptor];
806 static zx_status_t ipt_free_buffer(insntrace_device_t* ipt_dev,
813 x86_pt_free_buffer(ipt_dev, descriptor);
818 static zx_status_t ipt_start(insntrace_device_t* ipt_dev) {
819 if (ipt_dev->active)
821 if (ipt_dev->mode != IPT_TRACE_CPUS)
823 assert(ipt_dev->per_trace_state);
829 for (uint32_t cpu = 0; cpu < ipt_dev->num_traces; ++cpu) {
830 const ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[cpu];
835 for (uint32_t cpu = 0; cpu < ipt_dev->num_traces; ++cpu) {
836 const ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[cpu];
861 ipt_dev->active = true;
866 static zx_status_t ipt_stop(insntrace_device_t* ipt_dev) {
867 if (!ipt_dev->active)
869 assert(ipt_dev->per_trace_state);
878 ipt_dev->active = false;
880 for (uint32_t cpu = 0; cpu < ipt_dev->num_traces; ++cpu) {
881 ipt_per_trace_state_t* per_trace = &ipt_dev->per_trace_state[cpu];
914 insntrace_device_t* ipt_dev = dev->insntrace;
916 if (!ipt_dev)
934 return ipt_get_trace_config(ipt_dev, reply, replymax, out_actual);
937 return ipt_alloc_buffer(ipt_dev, cmd, cmdlen, reply, replymax, out_actual);
942 return ipt_assign_thread_buffer(ipt_dev, cmd, cmdlen);
947 return ipt_release_thread_buffer(ipt_dev, cmd, cmdlen);
950 return ipt_get_buffer_config(ipt_dev, cmd, cmdlen, reply, replymax, out_actual);
953 return ipt_get_buffer_info(ipt_dev, cmd, cmdlen, reply, replymax, out_actual);
956 return ipt_get_chunk_handle(ipt_dev, cmd, cmdlen, reply, replymax, out_actual);
961 return ipt_free_buffer(ipt_dev, cmd, cmdlen);
966 return ipt_start(ipt_dev);
971 return ipt_stop(ipt_dev);