Lines Matching refs:tr

33 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);
37 struct bpf_trampoline *tr = ops->private;
42 * tr->mutex is already locked.
44 lockdep_assert_held_once(&tr->mutex);
51 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
52 !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) {
53 if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY))
56 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
64 * tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c)
72 * mutex_trylock(&tr->mutex) to avoid deadlock in race condition
75 if (!mutex_trylock(&tr->mutex)) {
76 /* sleep 1 ms to make sure whatever holding tr->mutex makes
85 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
87 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
88 !(tr->flags & BPF_TRAMP_F_ORIG_STACK))
89 ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
92 tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY;
94 if (tr->flags & BPF_TRAMP_F_ORIG_STACK)
95 ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
102 mutex_unlock(&tr->mutex);
136 struct bpf_trampoline *tr;
142 hlist_for_each_entry(tr, head, hlist) {
143 if (tr->key == key) {
144 refcount_inc(&tr->refcnt);
148 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
149 if (!tr)
152 tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
153 if (!tr->fops) {
154 kfree(tr);
155 tr = NULL;
158 tr->fops->private = tr;
159 tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
162 tr->key = key;
163 INIT_HLIST_NODE(&tr->hlist);
164 hlist_add_head(&tr->hlist, head);
165 refcount_set(&tr->refcnt, 1);
166 mutex_init(&tr->mutex);
168 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
171 return tr;
174 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
176 void *ip = tr->func.addr;
179 if (tr->func.ftrace_managed)
180 ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false);
187 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
190 void *ip = tr->func.addr;
193 if (tr->func.ftrace_managed) {
195 ret = modify_ftrace_direct(tr->fops, (long)new_addr);
197 ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
205 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
207 void *ip = tr->func.addr;
213 if (!tr->fops)
215 tr->func.ftrace_managed = true;
218 if (tr->func.ftrace_managed) {
219 ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
220 ret = register_ftrace_direct(tr->fops, (long)new_addr);
229 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
242 tlinks[kind].nr_links = tr->progs_cnt[kind];
243 *total += tr->progs_cnt[kind];
246 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
393 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex)
397 u32 orig_flags = tr->flags;
401 tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
406 err = unregister_fentry(tr, tr->cur_image->image);
407 bpf_tramp_image_put(tr->cur_image);
408 tr->cur_image = NULL;
413 tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
420 tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
422 tr->flags |= BPF_TRAMP_F_RESTORE_REGS;
426 tr->flags |= BPF_TRAMP_F_IP_ARG;
430 if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
431 (tr->flags & BPF_TRAMP_F_CALL_ORIG))
432 tr->flags |= BPF_TRAMP_F_ORIG_STACK;
435 size = arch_bpf_trampoline_size(&tr->func.model, tr->flags,
436 tlinks, tr->func.addr);
447 im = bpf_tramp_image_alloc(tr->key, size);
454 &tr->func.model, tr->flags, tlinks,
455 tr->func.addr);
461 WARN_ON(tr->cur_image && total == 0);
462 if (tr->cur_image)
464 err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
467 err = register_fentry(tr, im->image);
476 tr->fops->func = NULL;
477 tr->fops->trampoline = 0;
487 if (tr->cur_image)
488 bpf_tramp_image_put(tr->cur_image);
489 tr->cur_image = im;
493 tr->flags = orig_flags;
524 static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
532 if (tr->extension_prog)
539 cnt += tr->progs_cnt[i];
545 tr->extension_prog = link->link.prog;
546 return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
554 hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
561 hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
562 tr->progs_cnt[kind]++;
563 err = bpf_trampoline_update(tr, true /* lock_direct_mutex */);
566 tr->progs_cnt[kind]--;
571 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
575 mutex_lock(&tr->mutex);
576 err = __bpf_trampoline_link_prog(link, tr);
577 mutex_unlock(&tr->mutex);
581 static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
588 WARN_ON_ONCE(!tr->extension_prog);
589 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
590 tr->extension_prog->bpf_func, NULL);
591 tr->extension_prog = NULL;
595 tr->progs_cnt[kind]--;
596 return bpf_trampoline_update(tr, true /* lock_direct_mutex */);
600 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
604 mutex_lock(&tr->mutex);
605 err = __bpf_trampoline_unlink_prog(link, tr);
606 mutex_unlock(&tr->mutex);
616 /* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
672 static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
679 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
695 struct bpf_trampoline *tr;
710 tr = bpf_trampoline_get(key, &tgt_info);
711 if (!tr)
714 mutex_lock(&tr->mutex);
716 shim_link = cgroup_shim_find(tr, bpf_func);
721 mutex_unlock(&tr->mutex);
722 bpf_trampoline_put(tr); /* bpf_trampoline_get above */
734 err = __bpf_trampoline_link_prog(&shim_link->link, tr);
738 shim_link->trampoline = tr;
739 /* note, we're still holding tr refcnt from above */
741 mutex_unlock(&tr->mutex);
745 mutex_unlock(&tr->mutex);
750 /* have to release tr while _not_ holding its mutex */
751 bpf_trampoline_put(tr); /* bpf_trampoline_get above */
759 struct bpf_trampoline *tr;
767 tr = bpf_trampoline_lookup(key);
768 if (WARN_ON_ONCE(!tr))
771 mutex_lock(&tr->mutex);
772 shim_link = cgroup_shim_find(tr, bpf_func);
773 mutex_unlock(&tr->mutex);
778 bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */
785 struct bpf_trampoline *tr;
787 tr = bpf_trampoline_lookup(key);
788 if (!tr)
791 mutex_lock(&tr->mutex);
792 if (tr->func.addr)
795 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
796 tr->func.addr = (void *)tgt_info->tgt_addr;
798 mutex_unlock(&tr->mutex);
799 return tr;
802 void bpf_trampoline_put(struct bpf_trampoline *tr)
806 if (!tr)
809 if (!refcount_dec_and_test(&tr->refcnt))
811 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
814 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
823 hlist_del(&tr->hlist);
824 if (tr->fops) {
825 ftrace_free_filter(tr->fops);
826 kfree(tr->fops);
828 kfree(tr);
1005 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
1007 percpu_ref_get(&tr->pcref);
1010 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
1012 percpu_ref_put(&tr->pcref);