Searched refs:event (Results 176 - 200 of 2972) sorted by relevance

1234567891011>>

/linux-master/drivers/perf/
H A Dfsl_imx9_ddr_perf.c127 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
137 /* counter0 cycles event */
245 PMU_FORMAT_ATTR(event, "config:0-7");
312 * The performance monitor must be reset before event counting
351 /* Freeze counter disabled, condition enabled, and program event.*/
367 int event, counter; local
369 event = cfg & 0x000000FF;
374 if (counter == 2 && event == 73)
376 else if (counter == 2 && event != 73)
379 if (counter == 3 && event
399 ddr_perf_event_update(struct perf_event *event) argument
413 ddr_perf_event_init(struct perf_event *event) argument
451 ddr_perf_event_start(struct perf_event *event, int flags) argument
463 ddr_perf_event_add(struct perf_event *event, int flags) argument
488 ddr_perf_event_stop(struct perf_event *event, int flags) argument
500 ddr_perf_event_del(struct perf_event *event, int flags) argument
551 struct perf_event *event; local
[all...]
H A Dalibaba_uncore_drw_pmu.c111 #define GET_DRW_EVENTID(event) FIELD_GET(DRW_CONFIG_EVENTID, (event)->attr.config)
124 * PMU event attributes
209 ALI_DRW_PMU_FORMAT_ATTR(event, "config:0-7"),
265 /* find a counter for event, then in add func, hw.idx will equal to counter */
266 static int ali_drw_get_counter_idx(struct perf_event *event) argument
268 struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
280 static u64 ali_drw_pmu_read_counter(struct perf_event *event) argument
282 struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
285 if (GET_DRW_EVENTID(event)
297 ali_drw_pmu_event_update(struct perf_event *event) argument
316 ali_drw_pmu_event_set_period(struct perf_event *event) argument
334 ali_drw_pmu_enable_counter(struct perf_event *event) argument
352 ali_drw_pmu_disable_counter(struct perf_event *event) argument
379 struct perf_event *event; local
525 ali_drw_pmu_event_init(struct perf_event *event) argument
572 ali_drw_pmu_start(struct perf_event *event, int flags) argument
597 ali_drw_pmu_stop(struct perf_event *event, int flags) argument
613 ali_drw_pmu_add(struct perf_event *event, int flags) argument
642 ali_drw_pmu_del(struct perf_event *event, int flags) argument
659 ali_drw_pmu_read(struct perf_event *event) argument
[all...]
H A Darm-cci.c77 * @fixed_hw_cntrs - Number of fixed event counters
78 * @num_hw_cntrs - Maximum number of programmable event counters
79 * @cntr_size - Size of an event counter mapping
160 * Instead of an event id to monitor CCI cycles, a dedicated counter is
162 * make use of this event in hardware.
172 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
173 * ports and bits 4:0 are event codes. There are different event codes
180 * the different revisions and are used to validate the event to be monitored.
188 #define CCI400_PMU_EVENT_SOURCE(event) \
645 struct perf_event *event = cci_hw->events[i]; local
733 pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) argument
786 pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) argument
804 pmu_map_event(struct perf_event *event) argument
862 pmu_read_counter(struct perf_event *event) argument
889 struct perf_event *event = cci_hw->events[i]; local
951 struct perf_event *event = cci_pmu->hw_events.events[i]; local
970 pmu_event_update(struct perf_event *event) argument
988 pmu_read(struct perf_event *event) argument
993 pmu_event_set_period(struct perf_event *event) argument
1030 struct perf_event *event = events->events[idx]; local
1070 hw_perf_event_destroy(struct perf_event *event) argument
1119 cci_pmu_start(struct perf_event *event, int pmu_flags) argument
1153 cci_pmu_stop(struct perf_event *event, int pmu_flags) argument
1176 cci_pmu_add(struct perf_event *event, int flags) argument
1201 cci_pmu_del(struct perf_event *event, int flags) argument
1215 validate_event(struct pmu *cci_pmu, struct cci_pmu_hw_events *hw_events, struct perf_event *event) argument
1239 validate_group(struct perf_event *event) argument
1267 __hw_perf_event_init(struct perf_event *event) argument
1303 cci_pmu_event_init(struct perf_event *event) argument
[all...]
H A Dqcom_l2_pmu.c75 #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
76 #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
132 * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
297 static void l2_cache_event_update(struct perf_event *event) argument
299 struct hw_perf_event *hwc = &event->hw;
316 local64_add(delta, &event->count);
340 struct perf_event *event)
342 struct hw_perf_event *hwc = &event
339 l2_cache_get_event_idx(struct cluster_pmu *cluster, struct perf_event *event) argument
374 l2_cache_clear_event_idx(struct cluster_pmu *cluster, struct perf_event *event) argument
397 struct perf_event *event = cluster->events[idx]; local
438 l2_cache_event_init(struct perf_event *event) argument
541 l2_cache_event_start(struct perf_event *event, int flags) argument
572 l2_cache_event_stop(struct perf_event *event, int flags) argument
588 l2_cache_event_add(struct perf_event *event, int flags) argument
615 l2_cache_event_del(struct perf_event *event, int flags) argument
630 l2_cache_event_read(struct perf_event *event) argument
[all...]
H A Darm-ccn.c154 struct perf_event *event; member in struct:arm_ccn_dt::__anon136
226 static CCN_FORMAT_ATTR(event, "config:16-23");
259 u32 event; member in struct:arm_ccn_pmu_event
278 .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
284 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
289 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
293 .type = CCN_TYPE_HNF, .event = _event, }
296 .type = CCN_TYPE_XP, .event = _event, \
302 * as they all share the same event types.
305 .type = CCN_TYPE_RNI_3P, .event
318 struct arm_ccn_pmu_event *event = container_of(attr, local
363 struct arm_ccn_pmu_event *event = container_of(dev_attr, local
622 arm_ccn_pmu_event_alloc(struct perf_event *event) argument
680 arm_ccn_pmu_event_release(struct perf_event *event) argument
704 arm_ccn_pmu_event_init(struct perf_event *event) argument
859 arm_ccn_pmu_event_update(struct perf_event *event) argument
875 arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) argument
908 arm_ccn_pmu_event_start(struct perf_event *event, int flags) argument
921 arm_ccn_pmu_event_stop(struct perf_event *event, int flags) argument
934 arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) argument
982 arm_ccn_pmu_xp_event_config(struct perf_event *event) argument
1003 arm_ccn_pmu_node_event_config(struct perf_event *event) argument
1040 arm_ccn_pmu_event_config(struct perf_event *event) argument
1084 arm_ccn_pmu_event_add(struct perf_event *event, int flags) argument
1113 arm_ccn_pmu_event_del(struct perf_event *event, int flags) argument
1125 arm_ccn_pmu_event_read(struct perf_event *event) argument
1161 struct perf_event *event = dt->pmu_counters[idx].event; local
[all...]
H A Dmarvell_cn10k_ddr_pmu.c36 /* 8 Generic event counter + 2 fixed event counters */
44 /* Generic event counter registers */
48 /* Two dedicated event counters for DDR reads and writes */
53 * programmable events IDs in programmable event counters.
54 * DO NOT change these event-id numbers, they are used to
55 * program event bitmap in h/w.
108 /* Fixed event counter enable/disable register */
113 /* Fixed event counter control register */
118 /* Fixed event counte
289 cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, struct perf_event *event) argument
323 cn10k_ddr_perf_event_init(struct perf_event *event) argument
406 cn10k_ddr_perf_event_update(struct perf_event *event) argument
422 cn10k_ddr_perf_event_start(struct perf_event *event, int flags) argument
435 cn10k_ddr_perf_event_add(struct perf_event *event, int flags) argument
481 cn10k_ddr_perf_event_stop(struct perf_event *event, int flags) argument
495 cn10k_ddr_perf_event_del(struct perf_event *event, int flags) argument
552 struct perf_event *event; local
[all...]
/linux-master/arch/powerpc/perf/
H A Dmpc7450-pmu.c13 #define MAX_ALT 3 /* Maximum number of event alternative codes */
16 * Bits in event code for MPC7450 family
33 * -1: event code is invalid
37 static int mpc7450_classify_event(u32 event) argument
41 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
47 event &= PM_PMCSEL_MSK;
48 if (event <= 1)
50 if (event <= 7)
52 if (event <= 13)
54 if (event <
77 mpc7450_threshold_use(u32 event) argument
150 mpc7450_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1 __maybe_unused) argument
208 find_alternative(u32 event) argument
222 mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[]) argument
259 mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], u32 flags __maybe_unused) argument
[all...]
H A Dppc970-pmu.c15 * Bits in event code for PPC970
23 #define PM_BYTE_SH 4 /* Byte number of event bus to use */
115 * 28-31: Byte 0 event source 0xf000_0000
116 * Encoding as for the event code
119 * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
142 * Returns 1 if event counts things relating to marked instructions
145 static int p970_marked_instr_event(u64 event) argument
150 pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
151 psel = event & PM_PMCSEL_MSK;
164 byte = (event >> PM_BYTE_S
192 p970_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1 __maybe_unused) argument
244 p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) argument
257 p970_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], u32 flags __maybe_unused) argument
[all...]
H A Dpower6-pmu.c16 * Bits in event code for POWER6
21 #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
26 #define PM_BYTE_SH 12 /* Byte of event bus to use */
28 #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
55 * top 4 bits say what sort of event:
56 * 0 = direct marked event,
57 * 1 = byte decode event,
58 * 4 = add/and event (PMC1 -> bits 0 & 4),
59 * 5 = add/and event (PMC1 -> bits 1 & 5),
60 * 6 = add/and event (PMC
137 power6_marked_instr_event(u64 event) argument
175 p6_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[], u32 flags __maybe_unused) argument
269 p6_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1 __maybe_unused) argument
303 p6_limited_pmc_event(u64 event) argument
338 find_alternatives_list(u64 event) argument
373 p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) argument
[all...]
H A Dhv-24x7.c109 /* Domains for which more than one result element are returned for each event. */
280 * Otherwise return the address of the byte just following the event.
353 * Each event we find in the catalog, will have a sysfs entry. Format the
354 * data for this sysfs entry based on the event's domain.
388 static char *event_fmt(struct hv_24x7_event_data *event, unsigned int domain) argument
416 be16_to_cpu(event->event_counter_offs) +
417 be16_to_cpu(event->event_group_record_offs),
470 * Allocate and initialize strings representing event attributes.
510 struct hv_24x7_event_data *event,
519 pr_warn("catalog event
509 event_to_attr(unsigned int ix, struct hv_24x7_event_data *event, unsigned int domain, int nonce) argument
551 event_to_desc_attr(struct hv_24x7_event_data *event, int nonce) argument
566 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) argument
579 event_data_to_attrs(unsigned int ix, struct attribute **attrs, struct hv_24x7_event_data *event, int nonce) argument
689 catalog_event_len_validate(struct hv_24x7_event_data *event, size_t event_idx, size_t event_data_bytes, size_t event_entry_count, size_t offset, void *end) argument
770 struct hv_24x7_event_data *event; local
1212 add_event_to_24x7_request(struct perf_event *event, struct hv_24x7_request_buffer *request_buffer) argument
1275 get_count_from_result(struct perf_event *event, struct hv_24x7_data_result_buffer *resb, struct hv_24x7_result *res, u64 *countp, struct hv_24x7_result **next) argument
1342 single_24x7_request(struct perf_event *event, u64 *count) argument
1375 h_24x7_event_init(struct perf_event *event) argument
1441 h_24x7_get_value(struct perf_event *event) argument
1452 update_event_count(struct perf_event *event, u64 now) argument
1460 h_24x7_event_read(struct perf_event *event) argument
1507 h_24x7_event_start(struct perf_event *event, int flags) argument
1513 h_24x7_event_stop(struct perf_event *event, int flags) argument
1518 h_24x7_event_add(struct perf_event *event, int flags) argument
1606 struct perf_event *event = h24x7hw->events[res->result_ix]; local
[all...]
/linux-master/tools/testing/selftests/bpf/progs/
H A Dpyperf.h172 Event *event; member in struct:process_frame_ctx
184 Event *event = ctx->event; local
206 event->stack[i] = *symbol_id;
208 event->stack_len = i + 1;
231 Event* event = bpf_map_lookup_elem(&eventmap, &zero); local
232 if (!event)
235 event->pid = pid;
237 event->tid = (pid_t)pid_tgid;
238 bpf_get_current_comm(&event
[all...]
/linux-master/tools/perf/util/
H A Dordered-events.c36 * last event might point to some random place in the list as it's
37 * the last queued event. We expect that the new event is close to
65 union perf_event *event)
70 new_event = memdup(event, event->header.size);
72 oe->cur_alloc_size += event->header.size;
79 union perf_event *event)
81 return oe->copy_on_queue ? __dup_event(oe, event) : event;
64 __dup_event(struct ordered_events *oe, union perf_event *event) argument
78 dup_event(struct ordered_events *oe, union perf_event *event) argument
84 __free_dup_event(struct ordered_events *oe, union perf_event *event) argument
92 free_dup_event(struct ordered_events *oe, union perf_event *event) argument
99 alloc_event(struct ordered_events *oe, union perf_event *event) argument
172 ordered_events__new_event(struct ordered_events *oe, u64 timestamp, union perf_event *event) argument
186 ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) argument
194 ordered_events__queue(struct ordered_events *oe, union perf_event *event, u64 timestamp, u64 file_offset, const char *file_path) argument
352 struct ordered_event *event; local
[all...]
H A Ddlfilter.h31 union perf_event *event; member in struct:dlfilter
59 union perf_event *event,
70 union perf_event *event,
79 return dlfilter__do_filter_event(d, event, sample, evsel, machine, al, addr_al, false);
83 union perf_event *event,
92 return dlfilter__do_filter_event(d, event, sample, evsel, machine, al, addr_al, true);
69 dlfilter__filter_event(struct dlfilter *d, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, struct machine *machine, struct addr_location *al, struct addr_location *addr_al) argument
82 dlfilter__filter_event_early(struct dlfilter *d, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, struct machine *machine, struct addr_location *al, struct addr_location *addr_al) argument
/linux-master/arch/x86/events/
H A Dcore.c111 * Propagate event elapsed time into the generic event.
112 * Can only be executed on the CPU where the event is active.
115 u64 x86_perf_event_update(struct perf_event *event) argument
117 struct hw_perf_event *hwc = &event->hw;
126 * Careful: an NMI might modify the previous event value.
130 * count to the generic event atomically:
141 * (event-)time and add that to the generic event.
149 local64_add(delta, &event
158 x86_pmu_extra_regs(u64 config, struct perf_event *event) argument
340 hw_perf_event_destroy(struct perf_event *event) argument
346 hw_perf_lbr_event_destroy(struct perf_event *event) argument
360 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) argument
474 x86_setup_perfctr(struct perf_event *event) argument
519 precise_br_compat(struct perf_event *event) argument
561 x86_pmu_hw_config(struct perf_event *event) argument
652 __x86_pmu_event_init(struct perf_event *event) argument
749 is_x86_event(struct perf_event *event) argument
786 int event; /* event index */ member in struct:sched_state
1114 add_nr_metric_event(struct cpu_hw_events *cpuc, struct perf_event *event) argument
1127 del_nr_metric_event(struct cpu_hw_events *cpuc, struct perf_event *event) argument
1134 collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, int max_count, int n) argument
1162 struct perf_event *event; local
1212 x86_assign_hw_event(struct perf_event *event, struct cpu_hw_events *cpuc, int i) argument
1265 x86_perf_rdpmc_index(struct perf_event *event) argument
1286 struct perf_event *event; local
1365 x86_perf_event_set_period(struct perf_event *event) argument
1424 x86_pmu_enable_event(struct perf_event *event) argument
1437 x86_pmu_add(struct perf_event *event, int flags) argument
1495 x86_pmu_start(struct perf_event *event, int flags) argument
1582 x86_pmu_stop(struct perf_event *event, int flags) argument
1605 x86_pmu_del(struct perf_event *event, int flags) argument
1669 struct perf_event *event; local
1958 x86_event_sysfs_show(char *page, u64 config, u64 event) argument
2036 _x86_pmu_read(struct perf_event *event) argument
2203 x86_pmu_read(struct perf_event *event) argument
2347 validate_event(struct perf_event *event) argument
2381 validate_group(struct perf_event *event) argument
2433 x86_pmu_event_init(struct perf_event *event) argument
2496 x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) argument
2517 x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) argument
2526 x86_pmu_event_idx(struct perf_event *event) argument
2641 x86_pmu_check_period(struct perf_event *event, u64 value) argument
2656 x86_pmu_aux_output_match(struct perf_event *event) argument
2707 arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) argument
[all...]
H A Dperf_event.h89 static inline bool is_topdown_count(struct perf_event *event) argument
91 return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
94 static inline bool is_metric_event(struct perf_event *event) argument
96 u64 config = event->attr.config;
103 static inline bool is_slots_event(struct perf_event *event) argument
105 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
108 static inline bool is_topdown_event(struct perf_event *event) argument
110 return is_metric_event(event) || is_slots_event(event);
113 static inline bool is_branch_counters_group(struct perf_event *event) argument
570 unsigned int event; member in struct:extra_reg
630 u64 event:8, member in struct:x86_pmu_config::__anon101
1185 x86_pmu_disable_event(struct perf_event *event) argument
1359 amd_pmu_brs_add(struct perf_event *event) argument
1371 amd_pmu_brs_del(struct perf_event *event) argument
1392 amd_brs_hw_config(struct perf_event *event) argument
1398 amd_pmu_brs_add(struct perf_event *event) argument
1402 amd_pmu_brs_del(struct perf_event *event) argument
1441 is_pebs_pt(struct perf_event *event) argument
1448 intel_pmu_has_bts_period(struct perf_event *event, u64 period) argument
1462 intel_pmu_has_bts(struct perf_event *event) argument
[all...]
/linux-master/drivers/s390/net/
H A Dfsm.h57 int event; member in struct:__anon8034
79 * Description of a state-event combination
127 fsm_record_history(fsm_instance *fi, int state, int event);
131 * Emits an event to a FSM.
132 * If an action function is defined for the current state/event combination,
135 * @param fi Pointer to FSM which should receive the event.
136 * @param event The event do be delivered.
140 * 1 if current state or event is out of range
141 * !0 if state and event i
144 fsm_event(fsm_instance *fi, int event, void *arg) argument
[all...]
/linux-master/tools/virtio/ringtest/
H A Dring.c6 * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
16 * Prev - "Next" value when event triggered previously.
17 * Event - Peer requested event after writing this entry.
19 static inline bool need_event(unsigned short event, argument
23 return (unsigned short)(next - event - 1) < (unsigned short)(next - prev);
44 struct event { struct
57 struct event *event; variable in typeref:struct:event
87 event = calloc(1, sizeof(*event));
[all...]
/linux-master/drivers/s390/cio/
H A Dvfio_ccw_trace.h23 int event),
24 TP_ARGS(schid, mask, event),
31 __field(int, event)
39 __entry->event = event;
42 TP_printk("schid=%x.%x.%04x mask=0x%x event=%d",
47 __entry->event)
81 TP_PROTO(struct subchannel_id schid, int state, int event),
82 TP_ARGS(schid, state, event),
89 __field(int, event)
[all...]
/linux-master/kernel/trace/
H A Dtrace_output.h23 struct trace_event *event);
30 int flags, struct trace_event *event);
35 extern int __unregister_trace_event(struct trace_event *event);
/linux-master/drivers/fpga/
H A Ddfl-fme-perf.c165 * @event_init: callback invoked during event init.
166 * @event_destroy: callback invoked during event destroy.
170 int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
171 void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
173 u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
217 PMU_FORMAT_ATTR(event, "config:0-11");
288 static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) argument
290 if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
297 u32 event, u32 portid)
304 static int cache_event_init(struct fme_perf_priv *priv, u32 event, u3 argument
296 basic_read_event_counter(struct fme_perf_priv *priv, u32 event, u32 portid) argument
313 cache_read_event_counter(struct fme_perf_priv *priv, u32 event, u32 portid) argument
349 is_fabric_event_supported(struct fme_perf_priv *priv, u32 event, u32 portid) argument
363 fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) argument
414 fabric_event_destroy(struct fme_perf_priv *priv, u32 event, u32 portid) argument
422 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event, u32 portid) argument
444 vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) argument
453 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event, u32 portid) argument
477 vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) argument
486 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event, u32 portid) argument
786 fme_perf_event_destroy(struct perf_event *event) argument
795 fme_perf_event_init(struct perf_event *event) argument
842 fme_perf_event_update(struct perf_event *event) argument
856 fme_perf_event_start(struct perf_event *event, int flags) argument
867 fme_perf_event_stop(struct perf_event *event, int flags) argument
872 fme_perf_event_add(struct perf_event *event, int flags) argument
880 fme_perf_event_del(struct perf_event *event, int flags) argument
885 fme_perf_event_read(struct perf_event *event) argument
[all...]
/linux-master/tools/testing/selftests/ftrace/test.d/event/
H A Devent-pid.tc3 # description: event tracing - restricts events based on pid
10 echo 0 > options/event-fork
20 echo 0 > options/event-fork
49 echo 1 > options/event-fork
/linux-master/tools/testing/selftests/powerpc/pmu/event_code_tests/
H A Dblacklisted_events_test.c9 #include "../event.h"
94 * event code should cause event_open to fail in power9
99 struct event event; local
116 event_init(&event, blacklist_events_dd21[i]);
117 FAIL_IF(!event_open(&event));
121 event_init(&event, blacklist_events_dd22[i]);
122 FAIL_IF(!event_open(&event));
/linux-master/drivers/hwtracing/coresight/
H A Dcoresight-etm-perf.c32 * An ETM context for a running event includes the perf aux handle
34 * the trace path and the sink configuration. The event data is accessible
41 * until "free_aux()", which cannot happen as long as the event is active on
151 static void etm_event_read(struct perf_event *event) {} argument
153 static int etm_addr_filters_alloc(struct perf_event *event) argument
156 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
162 if (event->parent)
163 memcpy(filters, event->parent->hw.addr_filters,
166 event
171 etm_event_destroy(struct perf_event *event) argument
177 etm_event_init(struct perf_event *event) argument
306 etm_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) argument
447 etm_event_start(struct perf_event *event, int flags) argument
538 etm_event_stop(struct perf_event *event, int mode) argument
634 etm_event_add(struct perf_event *event, int mode) argument
650 etm_event_del(struct perf_event *event, int mode) argument
694 etm_addr_filters_sync(struct perf_event *event) argument
[all...]
/linux-master/drivers/perf/hisilicon/
H A Dhns3_pmu.c77 * Each hardware event contains two registers (counter and ext_counter) for
79 * be triggered to run at the same when a hardware event is enabled. The meaning
80 * of counter and ext_counter of different event type are different, their
84 * | event type | counter | ext_counter |
98 * Performance of each hardware event is calculated by: counter / ext_counter.
101 * ext_counter as a separate event for userspace and use bit 16 to indicate it.
102 * For example, event 0x00001 and 0x10001 are actually one event for hardware
103 * because bit 0-15 are same. If the bit 16 of one event is 0 means to read
224 /* filter mode supported by each bandwidth event */
297 u32 event; member in struct:hns3_pmu_event_attr
369 struct hns3_pmu_event_attr *event; local
382 struct hns3_pmu_event_attr *event; local
717 hns3_pmu_get_event(struct perf_event *event) argument
724 hns3_pmu_get_real_event(struct perf_event *event) argument
766 hns3_pmu_cmp_event(struct perf_event *target, struct perf_event *event) argument
772 hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu, struct perf_event *event) argument
880 hns3_pmu_get_pmu_event(u32 event) argument
901 hns3_pmu_set_func_mode(struct perf_event *event, struct hns3_pmu *hns3_pmu) argument
915 hns3_pmu_set_func_queue_mode(struct perf_event *event, struct hns3_pmu *hns3_pmu) argument
936 hns3_pmu_is_enabled_global_mode(struct perf_event *event, struct hns3_pmu_event_attr *pmu_event) argument
947 hns3_pmu_is_enabled_func_mode(struct perf_event *event, struct hns3_pmu_event_attr *pmu_event) argument
962 hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event, struct hns3_pmu_event_attr *pmu_event) argument
976 hns3_pmu_is_enabled_port_mode(struct perf_event *event, struct hns3_pmu_event_attr *pmu_event) argument
988 hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event, struct hns3_pmu_event_attr *pmu_event) argument
1000 hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event, struct hns3_pmu *hns3_pmu, struct hns3_pmu_event_attr *pmu_event) argument
1012 hns3_pmu_select_filter_mode(struct perf_event *event, struct hns3_pmu *hns3_pmu) argument
1054 hns3_pmu_validate_event_group(struct perf_event *event) argument
1101 hns3_pmu_get_filter_condition(struct perf_event *event) argument
1128 hns3_pmu_config_filter(struct perf_event *event) argument
1210 hns3_pmu_read_counter(struct perf_event *event) argument
1217 hns3_pmu_write_counter(struct perf_event *event, u64 value) argument
1226 hns3_pmu_init_counter(struct perf_event *event) argument
1234 hns3_pmu_event_init(struct perf_event *event) argument
1278 hns3_pmu_read(struct perf_event *event) argument
1293 hns3_pmu_start(struct perf_event *event, int flags) argument
1312 hns3_pmu_stop(struct perf_event *event, int flags) argument
1331 hns3_pmu_add(struct perf_event *event, int flags) argument
1364 hns3_pmu_del(struct perf_event *event, int flags) argument
[all...]
/linux-master/arch/x86/events/amd/
H A Dcore.c246 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
247 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
344 * AMD64 events are detected based on their event codes.
364 static int amd_core_hw_config(struct perf_event *event) argument
366 if (event->attr.exclude_host && event->attr.exclude_guest)
372 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
374 else if (event->attr.exclude_host)
375 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
376 else if (event
400 amd_pmu_hw_config(struct perf_event *event) argument
421 __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) argument
478 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, struct event_constraint *c) argument
746 amd_pmu_enable_event(struct perf_event *event) argument
767 amd_pmu_v2_enable_event(struct perf_event *event) argument
793 amd_pmu_disable_event(struct perf_event *event) argument
831 amd_pmu_add_event(struct perf_event *event) argument
839 amd_pmu_del_event(struct perf_event *event) argument
946 struct perf_event *event; local
1035 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) argument
1047 amd_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) argument
1144 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) argument
1219 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) argument
1230 amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc, struct perf_event *event) argument
1260 amd_get_event_constraints_f19h(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) argument
1286 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | local
1292 amd_pmu_limit_period(struct perf_event *event, s64 *left) argument
[all...]

Completed in 412 milliseconds

1234567891011>>