Searched refs:ctx (Results 1 - 25 of 3527) sorted by last modified time

1234567891011>>

/linux-master/net/bridge/
H A Dbr_device.c383 static int br_fill_forward_path(struct net_device_path_ctx *ctx, argument
390 if (netif_is_bridge_port(ctx->dev))
393 br = netdev_priv(ctx->dev);
395 br_vlan_fill_forward_path_pvid(br, ctx, path);
397 f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
410 ctx->dev = dst->dev;
414 if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
416 ctx->vlan[ctx
[all...]
/linux-master/mm/
H A Duserfaultfd.c34 if (!dst_vma->vm_userfaultfd_ctx.ctx)
492 struct userfaultfd_ctx *ctx,
517 up_read(&ctx->map_changing_lock);
560 down_read(&ctx->map_changing_lock);
562 if (atomic_read(&ctx->mmap_changing))
606 up_read(&ctx->map_changing_lock);
635 up_read(&ctx->map_changing_lock);
648 extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
700 static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, argument
706 struct mm_struct *dst_mm = ctx
491 mfill_atomic_hugetlb( struct userfaultfd_ctx *ctx, struct vm_area_struct *dst_vma, unsigned long dst_start, unsigned long src_start, unsigned long len, uffd_flags_t flags) argument
861 mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, unsigned long len, uffd_flags_t flags) argument
869 mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len) argument
877 mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len, uffd_flags_t flags) argument
893 mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len, uffd_flags_t flags) argument
929 mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len, bool enable_wp) argument
1364 validate_move_areas(struct userfaultfd_ctx *ctx, struct vm_area_struct *src_vma, struct vm_area_struct *dst_vma) argument
1595 move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, unsigned long len, __u64 mode) argument
[all...]
H A Dshmem.c3931 struct shmem_options *ctx = fc->fs_private; local
3954 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3955 ctx->seen |= SHMEM_SEEN_BLOCKS;
3958 ctx->blocks = memparse(param->string, &rest);
3959 if (*rest || ctx->blocks > LONG_MAX)
3961 ctx->seen |= SHMEM_SEEN_BLOCKS;
3964 ctx->inodes = memparse(param->string, &rest);
3965 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
3967 ctx->seen |= SHMEM_SEEN_INODES;
3970 ctx
4149 struct shmem_options *ctx = fc->fs_private; local
4332 struct shmem_options *ctx = fc->fs_private; local
4445 struct shmem_options *ctx = fc->fs_private; local
4630 struct shmem_options *ctx; local
[all...]
H A Dksm.c370 static inline unsigned long prev_scan_time(struct advisor_ctx *ctx, argument
373 return ctx->scan_time ? ctx->scan_time : scan_time;
H A Dgup.c612 struct follow_page_context *ctx)
634 ctx->page_mask = (1U << huge_page_order(h)) - 1;
653 struct follow_page_context *ctx)
688 int flags, struct follow_page_context *ctx)
721 ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
722 if (!ctx->pgmap)
736 ctx->page_mask = HPAGE_PUD_NR - 1;
782 struct follow_page_context *ctx)
819 ctx
609 follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned int flags, struct follow_page_context *ctx) argument
650 follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned int flags, struct follow_page_context *ctx) argument
686 follow_huge_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int flags, struct follow_page_context *ctx) argument
779 follow_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, struct follow_page_context *ctx) argument
825 follow_huge_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int flags, struct follow_page_context *ctx) argument
832 follow_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, struct follow_page_context *ctx) argument
1012 follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) argument
1067 follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) argument
1098 follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) argument
1143 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) argument
1172 struct follow_page_context ctx = { NULL }; local
1525 struct follow_page_context ctx = { NULL }; local
[all...]
/linux-master/mm/damon/
H A Dsysfs-schemes.c1952 struct damon_ctx *ctx)
1957 damon_for_each_scheme(scheme, ctx) {
1978 struct damon_ctx *ctx)
1983 damon_for_each_scheme(scheme, ctx) {
2096 int damon_sysfs_set_schemes(struct damon_ctx *ctx, argument
2102 damon_for_each_scheme_safe(scheme, next, ctx) {
2116 damon_for_each_scheme_safe(scheme, next, ctx)
2120 damon_add_scheme(ctx, scheme);
2127 struct damon_ctx *ctx)
2132 damon_for_each_scheme(scheme, ctx) {
1951 damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) argument
1976 damos_sysfs_update_effective_quotas( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) argument
2125 damon_sysfs_schemes_update_stats( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) argument
2161 damon_sysfs_before_damos_apply(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *s) argument
2210 damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) argument
2229 damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) argument
2251 damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx) argument
2264 damos_tried_regions_init_upd_status( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx) argument
2289 damon_sysfs_schemes_update_regions_start( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, bool total_bytes_only) argument
2322 damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) argument
[all...]
H A Dpaddr.c76 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) argument
81 damon_for_each_target(t, ctx) {
182 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) argument
188 damon_for_each_target(t, ctx) {
190 __damon_pa_check_access(r, &ctx->attrs);
328 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, argument
H A Dcore.c92 * @ctx: monitoring context to use the operations.
96 * @ctx to use it.
100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) argument
111 ctx->ops = damon_registered_ops[id];
387 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) argument
389 unsigned long sample_interval = ctx->attrs.sample_interval ?
390 ctx->attrs.sample_interval : 1;
392 s->apply_interval_us : ctx->attrs.aggr_interval;
394 s->next_apply_sis = ctx->passed_sample_intervals +
398 void damon_add_scheme(struct damon_ctx *ctx, struc argument
449 damon_add_target(struct damon_ctx *ctx, struct damon_target *t) argument
454 damon_targets_empty(struct damon_ctx *ctx) argument
486 struct damon_ctx *ctx; local
514 damon_destroy_targets(struct damon_ctx *ctx) argument
527 damon_destroy_ctx(struct damon_ctx *ctx) argument
585 damon_update_monitoring_results(struct damon_ctx *ctx, struct damon_attrs *new_attrs) argument
618 damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) argument
654 damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, ssize_t nr_schemes) argument
681 damon_region_sz_limit(struct damon_ctx *ctx) argument
710 __damon_start(struct damon_ctx *ctx) argument
778 __damon_stop(struct damon_ctx *ctx) argument
934 __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos_filter *filter) argument
982 damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *s) argument
1434 kdamond_split_regions(struct damon_ctx *ctx) argument
1466 kdamond_need_stop(struct damon_ctx *ctx) argument
1541 kdamond_wait_activation(struct damon_ctx *ctx) argument
1568 kdamond_init_intervals_sis(struct damon_ctx *ctx) argument
1592 struct damon_ctx *ctx = data; local
[all...]
/linux-master/kernel/trace/
H A Dtrace_uprobe.c685 struct traceprobe_parse_context ctx = { local
690 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
691 traceprobe_finish_parse(&ctx);
1078 enum uprobe_filter_ctx ctx,
1344 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1343 uprobe_perf_filter(struct uprobe_consumer *uc, enum uprobe_filter_ctx ctx, struct mm_struct *mm) argument
H A Dtrace_probe.h428 struct traceprobe_parse_context *ctx);
431 struct traceprobe_parse_context *ctx);
441 void traceprobe_finish_parse(struct traceprobe_parse_context *ctx);
H A Dtrace_probe.c291 struct traceprobe_parse_context *ctx)
296 head = trace_get_fields(ctx->event);
353 struct traceprobe_parse_context *ctx)
355 struct btf *btf = ctx->btf;
357 if (!btf || !ctx->last_type)
361 if (btf_type_is_char_array(btf, ctx->last_type))
365 if (btf_type_is_char_ptr(btf, ctx->last_type)) {
369 trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
381 trace_probe_log_err(ctx->offset, BAD_TYPE4STR);
387 struct traceprobe_parse_context *ctx)
290 parse_trace_event_arg(char *arg, struct fetch_insn *code, struct traceprobe_parse_context *ctx) argument
351 check_prepare_btf_string_fetch(char *typename, struct fetch_insn **pcode, struct traceprobe_parse_context *ctx) argument
385 fetch_type_from_btf_type(struct btf *btf, const struct btf_type *type, struct traceprobe_parse_context *ctx) argument
439 query_btf_context(struct traceprobe_parse_context *ctx) argument
481 clear_btf_context(struct traceprobe_parse_context *ctx) argument
493 split_next_field(char *varname, char **next_field, struct traceprobe_parse_context *ctx) argument
522 parse_btf_field(char *fieldname, const struct btf_type *type, struct fetch_insn **pcode, struct fetch_insn *end, struct traceprobe_parse_context *ctx) argument
600 parse_btf_arg(char *varname, struct fetch_insn **pcode, struct fetch_insn *end, struct traceprobe_parse_context *ctx) argument
695 find_fetch_type_from_btf_type( struct traceprobe_parse_context *ctx) argument
707 parse_btf_bitfield(struct fetch_insn **pcode, struct traceprobe_parse_context *ctx) argument
730 clear_btf_context(struct traceprobe_parse_context *ctx) argument
735 query_btf_context(struct traceprobe_parse_context *ctx) argument
740 parse_btf_arg(char *varname, struct fetch_insn **pcode, struct fetch_insn *end, struct traceprobe_parse_context *ctx) argument
748 parse_btf_bitfield(struct fetch_insn **pcode, struct traceprobe_parse_context *ctx) argument
758 check_prepare_btf_string_fetch(char *typename, struct fetch_insn **pcode, struct traceprobe_parse_context *ctx) argument
874 parse_probe_vars(char *orig_arg, const struct fetch_type *t, struct fetch_insn **pcode, struct fetch_insn *end, struct traceprobe_parse_context *ctx) argument
1014 parse_probe_arg(char *arg, const struct fetch_type *type, struct fetch_insn **pcode, struct fetch_insn *end, struct traceprobe_parse_context *ctx) argument
1220 parse_probe_arg_type(char *arg, struct probe_arg *parg, struct traceprobe_parse_context *ctx) argument
1284 finalize_fetch_insn(struct fetch_insn *code, struct probe_arg *parg, char *type, int type_offset, struct traceprobe_parse_context *ctx) argument
1403 traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, struct probe_arg *parg, struct traceprobe_parse_context *ctx) argument
1546 traceprobe_parse_probe_arg(struct trace_probe *tp, int i, const char *arg, struct traceprobe_parse_context *ctx) argument
1600 argv_has_var_arg(int argc, const char *argv[], int *args_idx, struct traceprobe_parse_context *ctx) argument
1636 sprint_nth_btf_arg(int idx, const char *type, char *buf, int bufsize, struct traceprobe_parse_context *ctx) argument
1661 traceprobe_expand_meta_args(int argc, const char *argv[], int *new_argc, char *buf, int bufsize, struct traceprobe_parse_context *ctx) argument
1803 traceprobe_finish_parse(struct traceprobe_parse_context *ctx) argument
[all...]
H A Dtrace_kprobe.c742 struct sym_count_ctx *ctx = data; local
744 if (strcmp(name, ctx->name) == 0)
745 ctx->count++;
752 struct sym_count_ctx ctx = { .count = 0, .name = func_name }; local
754 kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
756 module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
758 return ctx.count;
804 struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL }; local
885 ctx.flags |= TPARG_FL_RETURN;
888 ctx
[all...]
H A Dtrace_fprobe.c1000 struct traceprobe_parse_context ctx = { local
1077 ctx.flags |= TPARG_FL_RETURN;
1079 ctx.flags |= TPARG_FL_FENTRY;
1082 ctx.flags |= TPARG_FL_TPOINT;
1089 ctx.funcname = kallsyms_lookup(
1093 ctx.funcname = symbol;
1097 abuf, MAX_BTF_ARGS_LEN, &ctx);
1129 ctx.offset = 0;
1130 ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
1158 traceprobe_finish_parse(&ctx);
[all...]
/linux-master/kernel/sched/
H A Dfair.c8316 static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context *ctx) argument
8318 set_cpus_allowed_common(p, ctx);
H A Ddeadline.c2576 struct affinity_context *ctx)
2591 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
2605 set_cpus_allowed_common(p, ctx);
2575 set_cpus_allowed_dl(struct task_struct *p, struct affinity_context *ctx) argument
H A Dcore.c2393 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2396 struct affinity_context *ctx);
2736 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) argument
2738 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2739 p->cpus_ptr = ctx->new_mask;
2743 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2744 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2749 if (ctx->flags & SCA_USER)
2750 swap(p->user_cpus_ptr, ctx->user_mask);
2754 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) argument
3202 __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx) argument
3704 __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx) argument
8355 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) argument
[all...]
/linux-master/kernel/
H A Dkallsyms.c817 struct bpf_iter__ksym ctx; local
826 ctx.meta = &meta;
827 ctx.ksym = m ? m->private : NULL;
828 return bpf_iter_run_prog(prog, &ctx);
/linux-master/kernel/events/
H A Duprobes.c867 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
869 return !uc->filter || uc->filter(uc, ctx, mm);
873 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
880 ret = consumer_filter(uc, ctx, mm);
1847 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; local
1849 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
2173 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, argument
866 consumer_filter(struct uprobe_consumer *uc, enum uprobe_filter_ctx ctx, struct mm_struct *mm) argument
872 filter_chain(struct uprobe *uprobe, enum uprobe_filter_ctx ctx, struct mm_struct *mm) argument
H A Dcore.c159 struct perf_event_context *ctx)
161 raw_spin_lock(&cpuctx->ctx.lock);
162 if (ctx)
163 raw_spin_lock(&ctx->lock);
167 struct perf_event_context *ctx)
169 if (ctx)
170 raw_spin_unlock(&ctx->lock);
171 raw_spin_unlock(&cpuctx->ctx.lock);
190 * On task ctx scheduling...
192 * When !ctx
158 perf_ctx_lock(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) argument
166 perf_ctx_unlock(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) argument
221 struct perf_event_context *ctx = event->ctx; local
265 struct perf_event_context *ctx = event->ctx; local
318 struct perf_event_context *ctx = event->ctx; local
688 perf_ctx_disable(struct perf_event_context *ctx, bool cgroup) argument
699 perf_ctx_enable(struct perf_event_context *ctx, bool cgroup) argument
821 struct perf_event_context *ctx = &cpuctx->ctx; local
968 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) argument
990 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) argument
1059 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) argument
1064 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) argument
1162 get_ctx(struct perf_event_context *ctx) argument
1183 struct perf_event_context *ctx; local
1189 put_ctx(struct perf_event_context *ctx) argument
1269 struct perf_event_context *ctx; local
1296 perf_event_ctx_unlock(struct perf_event *event, struct perf_event_context *ctx) argument
1309 unclone_ctx(struct perf_event_context *ctx) argument
1372 struct perf_event_context *ctx; local
1428 struct perf_event_context *ctx; local
1439 perf_unpin_context(struct perf_event_context *ctx) argument
1451 __update_context_time(struct perf_event_context *ctx, bool adv) argument
1473 update_context_time(struct perf_event_context *ctx) argument
1480 struct perf_event_context *ctx = event->ctx; local
1493 struct perf_event_context *ctx = event->ctx; local
1510 struct perf_event_context *ctx = event->ctx; local
1543 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) argument
1685 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) argument
1711 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) argument
1775 list_add_event(struct perf_event *event, struct perf_event_context *ctx) argument
2006 list_del_event(struct perf_event *event, struct perf_event_context *ctx) argument
2064 struct perf_event_context *ctx = event->ctx; local
2164 struct perf_event_context *ctx = event->ctx; local
2254 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) argument
2316 group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx) argument
2345 __perf_remove_from_context(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) argument
2409 struct perf_event_context *ctx = event->ctx; local
2433 __perf_event_disable(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) argument
2475 struct perf_event_context *ctx = event->ctx; local
2498 struct perf_event_context *ctx; local
2518 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) argument
2577 group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx) argument
2654 add_event_to_ctx(struct perf_event *event, struct perf_event_context *ctx) argument
2661 task_ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) argument
2675 perf_event_sched_in(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) argument
2765 struct perf_event_context *ctx = event->ctx; local
2830 perf_install_in_context(struct perf_event_context *ctx, struct perf_event *event, int cpu) argument
2941 __perf_event_enable(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) argument
2994 struct perf_event_context *ctx = event->ctx; local
3031 struct perf_event_context *ctx; local
3164 struct perf_event_context *ctx; local
3244 struct perf_event_context *ctx = pmu_ctx->ctx; local
3283 ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) argument
3420 perf_event_sync_stat(struct perf_event_context *ctx, struct perf_event_context *next_ctx) argument
3482 perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in) argument
3498 struct perf_event_context *ctx = task->perf_event_ctxp; local
3732 visit_groups_merge(struct perf_event_context *ctx, struct perf_event_groups *groups, int cpu, struct pmu *pmu, int (*func)(struct perf_event *, void *), void *data) argument
3836 struct perf_event_context *ctx = event->ctx; local
3868 pmu_groups_sched_in(struct perf_event_context *ctx, struct perf_event_groups *groups, struct pmu *pmu) argument
3877 ctx_groups_sched_in(struct perf_event_context *ctx, struct perf_event_groups *groups, bool cgroup) argument
3890 __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu) argument
3897 ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) argument
3946 struct perf_event_context *ctx; local
4185 perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle) argument
4219 rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) argument
4345 struct perf_event_context *ctx; local
4363 event_enable_on_exec(struct perf_event *event, struct perf_event_context *ctx) argument
4382 perf_event_enable_on_exec(struct perf_event_context *ctx) argument
4433 perf_event_remove_on_exec(struct perf_event_context *ctx) argument
4502 struct perf_event_context *ctx = event->ctx; local
4703 struct perf_event_context *ctx = event->ctx; local
4734 __perf_event_init_context(struct perf_event_context *ctx) argument
4758 struct perf_event_context *ctx; local
4797 struct perf_event_context *ctx, *clone_ctx = NULL; local
4868 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, struct perf_event *event) argument
4965 struct perf_event_context *ctx = epc->ctx; local
5187 exclusive_event_installable(struct perf_event *event, struct perf_event_context *ctx) argument
5349 struct perf_event_context *ctx = event->ctx; local
5496 struct perf_event_context *ctx; local
5510 struct perf_event_context *ctx = leader->ctx; local
5590 struct perf_event_context *ctx = leader->ctx; local
5697 struct perf_event_context *ctx; local
5744 struct perf_event_context *ctx; local
5782 struct perf_event_context *ctx = event->ctx; local
5794 __perf_event_period(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, void *info) argument
5860 struct perf_event_context *ctx; local
6011 struct perf_event_context *ctx; local
6050 struct perf_event_context *ctx; local
6066 struct perf_event_context *ctx; local
7967 perf_iterate_ctx(struct perf_event_context *ctx, perf_iterate_f output, void *data, bool all) argument
8017 struct perf_event_context *ctx; local
8077 struct perf_event_context *ctx; local
8905 struct perf_event_context *ctx; local
9563 struct bpf_perf_event_data_kern ctx = { local
10295 perf_tp_event_target_task(u64 count, void *record, struct pt_regs *regs, struct perf_sample_data *data, struct perf_event_context *ctx) argument
10358 struct perf_event_context *ctx; local
11030 struct perf_event_context *ctx = event->ctx; local
11662 struct perf_event_context *ctx = NULL; local
12439 struct perf_event_context *ctx; local
12841 struct perf_event_context *ctx; local
12930 __perf_pmu_remove(struct perf_event_context *ctx, int cpu, struct pmu *pmu, struct perf_event_groups *groups, struct list_head *events) argument
12950 __perf_pmu_install_event(struct pmu *pmu, struct perf_event_context *ctx, int cpu, struct perf_event *event) argument
12973 __perf_pmu_install(struct perf_event_context *ctx, int cpu, struct pmu *pmu, struct list_head *events) argument
13064 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) argument
13209 perf_free_event(struct perf_event *event, struct perf_event_context *ctx) argument
13239 struct perf_event_context *ctx; local
13686 struct perf_event_context *ctx = __info; local
13699 struct perf_event_context *ctx; local
13722 struct perf_event_context *ctx; local
[all...]
/linux-master/kernel/bpf/
H A Dsyscall.c2187 struct audit_context *ctx = NULL; local
2195 ctx = audit_context();
2196 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
/linux-master/io_uring/
H A Dnet.c141 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
151 struct io_ring_ctx *ctx = req->ctx; local
154 hdr = io_alloc_cache_get(&ctx->netmsg_cache);
320 if (unlikely(req->ctx->compat)) {
431 if (req->ctx->compat)
691 if (unlikely(req->ctx->compat)) {
802 if (req->ctx->compat)
1200 struct io_ring_ctx *ctx = req->ctx; local
[all...]
H A Dmemmap.c198 struct io_ring_ctx *ctx = file->private_data; local
205 if (ctx->flags & IORING_SETUP_NO_MMAP)
207 return ctx->rings;
210 if (ctx->flags & IORING_SETUP_NO_MMAP)
212 return ctx->sq_sqes;
219 bl = io_pbuf_get_bl(ctx, bgid);
223 io_put_bl(ctx, bl);
231 int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma, argument
244 struct io_ring_ctx *ctx = file->private_data; local
256 return io_uring_mmap_pages(ctx, vm
[all...]
/linux-master/include/net/
H A Drequest_sock.h212 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */ member in struct:fastopen_queue
/linux-master/include/linux/
H A Dskbuff.h540 * The ctx field is used to track device context.
555 void *ctx; member in struct:ubuf_info_msgzc::__anon426::__anon427
1514 u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
H A Dmm_types.h568 struct userfaultfd_ctx *ctx; member in struct:vm_userfaultfd_ctx

Completed in 3496 milliseconds

1234567891011>>