Lines Matching refs:kprobe

68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
84 * 'kprobe::ainsn.insn' points to the copy of the instruction to be
292 * Check given address is on the page of kprobe instruction slots.
360 static inline void set_kprobe_instance(struct kprobe *kp)
376 struct kprobe *get_kprobe(void *addr)
379 struct kprobe *p;
392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
395 static inline bool kprobe_aggrprobe(struct kprobe *p)
401 static inline bool kprobe_unused(struct kprobe *p)
407 /* Keep all fields in the kprobe consistent. */
408 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
419 * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
424 struct kprobe *kp;
437 static void free_aggr_kprobe(struct kprobe *p)
447 /* Return true if the kprobe is ready for optimization. */
448 static inline int kprobe_optready(struct kprobe *p)
460 /* Return true if the kprobe is disarmed. Note: p must be on hash list */
461 bool kprobe_disarmed(struct kprobe *p)
465 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
475 static bool kprobe_queued(struct kprobe *p)
488 * Return an optimized kprobe whose optimizing code replaces
491 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
494 struct kprobe *p = NULL;
561 /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
589 * This must not happen, but if there is a kprobe
675 /* Optimize kprobe if p is ready to be optimized */
676 static void optimize_kprobe(struct kprobe *p)
680 /* Check if the kprobe is disabled or not ready for optimization. */
724 /* Unoptimize a kprobe if p is optimized */
725 static void unoptimize_kprobe(struct kprobe *p, bool force)
741 * Forcibly unoptimize the kprobe here, and queue it
755 /* Optimized kprobe case */
766 static int reuse_unused_kprobe(struct kprobe *ap)
771 * Unused kprobe MUST be on the way of delayed unoptimizing (means
787 static void kill_optimized_kprobe(struct kprobe *p)
799 * Unused kprobe is on unoptimizing or freeing list. We move it
801 * the kprobe hash list and free it.
812 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
819 static void prepare_optimized_kprobe(struct kprobe *p)
828 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
843 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
847 * NOTE: 'p' must be a normal registered kprobe.
849 static void try_to_optimize_kprobe(struct kprobe *p)
851 struct kprobe *ap;
854 /* Impossible to optimize ftrace-based kprobe. */
869 /* If failed to setup optimizing, fallback to kprobe. */
887 struct kprobe *p;
904 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
913 struct kprobe *p;
937 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
980 static void __arm_kprobe(struct kprobe *p)
982 struct kprobe *_p;
989 /* Fallback to unoptimized kprobe */
993 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
997 static void __disarm_kprobe(struct kprobe *p, bool reopt)
999 struct kprobe *_p;
1008 /* If another kprobe was blocked, re-optimize it. */
1033 static int reuse_unused_kprobe(struct kprobe *ap)
1036 * If the optimized kprobe is NOT supported, the aggr kprobe is
1037 * released at the same time that the last aggregated kprobe is
1039 * Thus there should be no chance to reuse unused kprobe.
1045 static void free_aggr_kprobe(struct kprobe *p)
1051 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1053 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1072 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1080 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
1085 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
1101 static int arm_kprobe_ftrace(struct kprobe *p)
1110 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1119 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
1126 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
1131 static int disarm_kprobe_ftrace(struct kprobe *p)
1145 static inline int arm_kprobe_ftrace(struct kprobe *p)
1150 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1156 static int prepare_kprobe(struct kprobe *p)
1165 static int arm_kprobe(struct kprobe *kp)
1179 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1195 * take care of invoking the individual kprobe handlers on p->list
1197 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1199 struct kprobe *kp;
1213 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1216 struct kprobe *kp;
1229 void kprobes_inc_nmissed_count(struct kprobe *p)
1231 struct kprobe *kp;
1242 static struct kprobe kprobe_busy = {
1263 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1266 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
1276 * Fill in the required fields of the aggregator kprobe. Replace the
1277 * earlier kprobe in the hlist with the aggregator kprobe.
1279 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1287 /* We don't care the kprobe which has gone. */
1299 * This registers the second or subsequent kprobe at the same address.
1301 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1304 struct kprobe *ap = orig_p;
1492 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1499 * Check the 'p' is valid and return the aggregator kprobe
1502 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1504 struct kprobe *ap, *list_p;
1515 /* kprobe p is a valid probe */
1524 * Warn and return error if the kprobe is being re-registered since
1527 static inline int warn_kprobe_rereg(struct kprobe *p)
1539 static int check_ftrace_location(struct kprobe *p)
1564 static int check_kprobe_address_safe(struct kprobe *p,
1625 int register_kprobe(struct kprobe *p)
1628 struct kprobe *old_p;
1686 /* Try to optimize kprobe */
1699 static bool aggr_kprobe_disabled(struct kprobe *ap)
1701 struct kprobe *kp;
1716 static struct kprobe *__disable_kprobe(struct kprobe *p)
1718 struct kprobe *orig_p;
1723 /* Get an original kprobe for return */
1756 * Unregister a kprobe without a scheduler synchronization.
1758 static int __unregister_kprobe_top(struct kprobe *p)
1760 struct kprobe *ap, *list_p;
1762 /* Disable kprobe. This will disarm it if needed. */
1769 * This probe is an independent(and non-optimized) kprobe
1791 * For the kprobe-on-ftrace case, we keep the
1818 static void __unregister_kprobe_bottom(struct kprobe *p)
1820 struct kprobe *ap;
1823 /* This is an independent kprobe */
1827 ap = list_entry(p->list.next, struct kprobe, list);
1834 int register_kprobes(struct kprobe **kps, int num)
1852 void unregister_kprobe(struct kprobe *p)
1858 void unregister_kprobes(struct kprobe **kps, int num)
2066 struct kprobe *prev = kprobe_running();
2099 * This kprobe pre_handler is registered with every kretprobe. When probe
2102 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2128 * This kprobe pre_handler is registered with every kretprobe. When probe
2131 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2355 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2363 /* Set the kprobe gone and remove its instruction buffer. */
2364 static void kill_kprobe(struct kprobe *p)
2366 struct kprobe *kp;
2371 * The module is going away. We should disarm the kprobe which
2396 /* Disable one kprobe */
2397 int disable_kprobe(struct kprobe *kp)
2400 struct kprobe *p;
2404 /* Disable this kprobe */
2414 /* Enable one kprobe */
2415 int enable_kprobe(struct kprobe *kp)
2418 struct kprobe *p;
2430 /* This kprobe has gone, we couldn't enable it. */
2454 void dump_kprobe(struct kprobe *kp)
2456 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
2481 /* Add all symbols in given area into kprobe blacklist */
2529 * since a kprobe need not necessarily be at the beginning
2562 /* Remove all symbols in given area from kprobe blacklist */
2632 struct kprobe *p;
2698 struct kprobe *p;
2764 * Enable kprobe optimization - this kicks the optimizer which
2776 static void report_probe(struct seq_file *pi, struct kprobe *p,
2777 const char *sym, int offset, char *modname, struct kprobe *pp)
2828 struct kprobe *p, *kp;
2904 struct kprobe *p;
2920 /* Arming kprobes doesn't optimize kprobe itself */
2950 struct kprobe *p;