1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 15 * 16 * Copyright (C) 2007 Alan Stern 17 * Copyright (C) IBM Corporation, 2009 18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> 19 * 20 * Thanks to Ingo Molnar for his many suggestions. 21 * 22 * Authors: Alan Stern <stern@rowland.harvard.edu> 23 * K.Prasad <prasad@linux.vnet.ibm.com> 24 * Frederic Weisbecker <fweisbec@gmail.com> 25 */ 26 27/* 28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 29 * using the CPU's debug registers. 30 * This file contains the arch-independent routines. 31 */ 32 33#include <linux/irqflags.h> 34#include <linux/kallsyms.h> 35#include <linux/notifier.h> 36#include <linux/kprobes.h> 37#include <linux/kdebug.h> 38#include <linux/kernel.h> 39#include <linux/module.h> 40#include <linux/percpu.h> 41#include <linux/sched.h> 42#include <linux/init.h> 43#include <linux/slab.h> 44#include <linux/list.h> 45#include <linux/cpu.h> 46#include <linux/smp.h> 47 48#include <linux/hw_breakpoint.h> 49 50 51/* 52 * Constraints data 53 */ 54 55/* Number of pinned cpu breakpoints in a cpu */ 56static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); 57 58/* Number of pinned task breakpoints in a cpu */ 59static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); 60 61/* Number of non-pinned cpu/task breakpoints in a cpu */ 62static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); 63 64static int nr_slots[TYPE_MAX]; 65 66/* Keep track of the breakpoints attached to tasks */ 67static LIST_HEAD(bp_task_head); 68 69static int constraints_initialized; 70 71/* Gather the number of total pinned and un-pinned bp in a cpuset */ 72struct bp_busy_slots { 73 unsigned int pinned; 74 unsigned int flexible; 75}; 76 77/* Serialize accesses to the above constraints */ 78static DEFINE_MUTEX(nr_bp_mutex); 79 80__weak int hw_breakpoint_weight(struct perf_event *bp) 81{ 82 return 1; 83} 84 85static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) 86{ 87 if (bp->attr.bp_type & HW_BREAKPOINT_RW) 88 return TYPE_DATA; 89 90 return TYPE_INST; 91} 92 93/* 94 * Report the maximum number of pinned breakpoints a task 95 * have in this cpu 96 */ 97static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) 98{ 99 int i; 100 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); 101 102 for (i = nr_slots[type] - 1; i >= 0; i--) { 103 if (tsk_pinned[i] > 0) 104 return i + 1; 105 } 106 107 return 0; 108} 109 110/* 111 * Count the number of breakpoints of the same type and same task. 112 * The given event must be not on the list. 113 */ 114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) 115{ 116 struct perf_event_context *ctx = bp->ctx; 117 struct perf_event *iter; 118 int count = 0; 119 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 121 if (iter->ctx == ctx && find_slot_idx(iter) == type) 122 count += hw_breakpoint_weight(iter); 123 } 124 125 return count; 126} 127 128/* 129 * Report the number of pinned/un-pinned breakpoints we have in 130 * a given cpu (cpu > -1) or in all of them (cpu = -1). 131 */ 132static void 133fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, 134 enum bp_type_idx type) 135{ 136 int cpu = bp->cpu; 137 struct task_struct *tsk = bp->ctx->task; 138 139 if (cpu >= 0) { 140 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); 141 if (!tsk) 142 slots->pinned += max_task_bp_pinned(cpu, type); 143 else 144 slots->pinned += task_bp_pinned(bp, type); 145 slots->flexible = per_cpu(nr_bp_flexible[type], cpu); 146 147 return; 148 } 149 150 for_each_online_cpu(cpu) { 151 unsigned int nr; 152 153 nr = per_cpu(nr_cpu_bp_pinned[type], cpu); 154 if (!tsk) 155 nr += max_task_bp_pinned(cpu, type); 156 else 157 nr += task_bp_pinned(bp, type); 158 159 if (nr > slots->pinned) 160 slots->pinned = nr; 161 162 nr = per_cpu(nr_bp_flexible[type], cpu); 163 164 if (nr > slots->flexible) 165 slots->flexible = nr; 166 } 167} 168 169/* 170 * For now, continue to consider flexible as pinned, until we can 171 * ensure no flexible event can ever be scheduled before a pinned event 172 * in a same cpu. 173 */ 174static void 175fetch_this_slot(struct bp_busy_slots *slots, int weight) 176{ 177 slots->pinned += weight; 178} 179 180/* 181 * Add a pinned breakpoint for the given task in our constraint table 182 */ 183static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, 184 enum bp_type_idx type, int weight) 185{ 186 unsigned int *tsk_pinned; 187 int old_count = 0; 188 int old_idx = 0; 189 int idx = 0; 190 191 old_count = task_bp_pinned(bp, type); 192 old_idx = old_count - 1; 193 idx = old_idx + weight; 194 195 /* tsk_pinned[n] is the number of tasks having n breakpoints */ 196 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); 197 if (enable) { 198 tsk_pinned[idx]++; 199 if (old_count > 0) 200 tsk_pinned[old_idx]--; 201 } else { 202 tsk_pinned[idx]--; 203 if (old_count > 0) 204 tsk_pinned[old_idx]++; 205 } 206} 207 208/* 209 * Add/remove the given breakpoint in our constraint table 210 */ 211static void 212toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, 213 int weight) 214{ 215 int cpu = bp->cpu; 216 struct task_struct *tsk = bp->ctx->task; 217 218 /* Pinned counter cpu profiling */ 219 if (!tsk) { 220 221 if (enable) 222 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; 223 else 224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; 225 return; 226 } 227 228 /* Pinned counter task profiling */ 229 230 if (!enable) 231 list_del(&bp->hw.bp_list); 232 233 if (cpu >= 0) { 234 toggle_bp_task_slot(bp, cpu, enable, type, weight); 235 } else { 236 for_each_online_cpu(cpu) 237 toggle_bp_task_slot(bp, cpu, enable, type, weight); 238 } 239 240 if (enable) 241 list_add_tail(&bp->hw.bp_list, &bp_task_head); 242} 243 244/* 245 * Function to perform processor-specific cleanup during unregistration 246 */ 247__weak void arch_unregister_hw_breakpoint(struct perf_event *bp) 248{ 249 /* 250 * A weak stub function here for those archs that don't define 251 * it inside arch/.../kernel/hw_breakpoint.c 252 */ 253} 254 255/* 256 * Contraints to check before allowing this new breakpoint counter: 257 * 258 * == Non-pinned counter == (Considered as pinned for now) 259 * 260 * - If attached to a single cpu, check: 261 * 262 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) 263 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM 264 * 265 * -> If there are already non-pinned counters in this cpu, it means 266 * there is already a free slot for them. 267 * Otherwise, we check that the maximum number of per task 268 * breakpoints (for this cpu) plus the number of per cpu breakpoint 269 * (for this cpu) doesn't cover every registers. 270 * 271 * - If attached to every cpus, check: 272 * 273 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) 274 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM 275 * 276 * -> This is roughly the same, except we check the number of per cpu 277 * bp for every cpu and we keep the max one. Same for the per tasks 278 * breakpoints. 279 * 280 * 281 * == Pinned counter == 282 * 283 * - If attached to a single cpu, check: 284 * 285 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) 286 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM 287 * 288 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep 289 * one register at least (or they will never be fed). 290 * 291 * - If attached to every cpus, check: 292 * 293 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) 294 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM 295 */ 296static int __reserve_bp_slot(struct perf_event *bp) 297{ 298 struct bp_busy_slots slots = {0}; 299 enum bp_type_idx type; 300 int weight; 301 302 /* We couldn't initialize breakpoint constraints on boot */ 303 if (!constraints_initialized) 304 return -ENOMEM; 305 306 /* Basic checks */ 307 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || 308 bp->attr.bp_type == HW_BREAKPOINT_INVALID) 309 return -EINVAL; 310 311 type = find_slot_idx(bp); 312 weight = hw_breakpoint_weight(bp); 313 314 fetch_bp_busy_slots(&slots, bp, type); 315 /* 316 * Simulate the addition of this breakpoint to the constraints 317 * and see the result. 318 */ 319 fetch_this_slot(&slots, weight); 320 321 /* Flexible counters need to keep at least one slot */ 322 if (slots.pinned + (!!slots.flexible) > nr_slots[type]) 323 return -ENOSPC; 324 325 toggle_bp_slot(bp, true, type, weight); 326 327 return 0; 328} 329 330int reserve_bp_slot(struct perf_event *bp) 331{ 332 int ret; 333 334 mutex_lock(&nr_bp_mutex); 335 336 ret = __reserve_bp_slot(bp); 337 338 mutex_unlock(&nr_bp_mutex); 339 340 return ret; 341} 342 343static void __release_bp_slot(struct perf_event *bp) 344{ 345 enum bp_type_idx type; 346 int weight; 347 348 type = find_slot_idx(bp); 349 weight = hw_breakpoint_weight(bp); 350 toggle_bp_slot(bp, false, type, weight); 351} 352 353void release_bp_slot(struct perf_event *bp) 354{ 355 mutex_lock(&nr_bp_mutex); 356 357 arch_unregister_hw_breakpoint(bp); 358 __release_bp_slot(bp); 359 360 mutex_unlock(&nr_bp_mutex); 361} 362 363/* 364 * Allow the kernel debugger to reserve breakpoint slots without 365 * taking a lock using the dbg_* variant of for the reserve and 366 * release breakpoint slots. 367 */ 368int dbg_reserve_bp_slot(struct perf_event *bp) 369{ 370 if (mutex_is_locked(&nr_bp_mutex)) 371 return -1; 372 373 return __reserve_bp_slot(bp); 374} 375 376int dbg_release_bp_slot(struct perf_event *bp) 377{ 378 if (mutex_is_locked(&nr_bp_mutex)) 379 return -1; 380 381 __release_bp_slot(bp); 382 383 return 0; 384} 385 386static int validate_hw_breakpoint(struct perf_event *bp) 387{ 388 int ret; 389 390 ret = arch_validate_hwbkpt_settings(bp); 391 if (ret) 392 return ret; 393 394 if (arch_check_bp_in_kernelspace(bp)) { 395 if (bp->attr.exclude_kernel) 396 return -EINVAL; 397 /* 398 * Don't let unprivileged users set a breakpoint in the trap 399 * path to avoid trap recursion attacks. 400 */ 401 if (!capable(CAP_SYS_ADMIN)) 402 return -EPERM; 403 } 404 405 return 0; 406} 407 408int register_perf_hw_breakpoint(struct perf_event *bp) 409{ 410 int ret; 411 412 ret = reserve_bp_slot(bp); 413 if (ret) 414 return ret; 415 416 ret = validate_hw_breakpoint(bp); 417 418 /* if arch_validate_hwbkpt_settings() fails then release bp slot */ 419 if (ret) 420 release_bp_slot(bp); 421 422 return ret; 423} 424 425/** 426 * register_user_hw_breakpoint - register a hardware breakpoint for user space 427 * @attr: breakpoint attributes 428 * @triggered: callback to trigger when we hit the breakpoint 429 * @tsk: pointer to 'task_struct' of the process to which the address belongs 430 */ 431struct perf_event * 432register_user_hw_breakpoint(struct perf_event_attr *attr, 433 perf_overflow_handler_t triggered, 434 struct task_struct *tsk) 435{ 436 return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), 437 triggered); 438} 439EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); 440 441/** 442 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint 443 * @bp: the breakpoint structure to modify 444 * @attr: new breakpoint attributes 445 * @triggered: callback to trigger when we hit the breakpoint 446 * @tsk: pointer to 'task_struct' of the process to which the address belongs 447 */ 448int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) 449{ 450 u64 old_addr = bp->attr.bp_addr; 451 u64 old_len = bp->attr.bp_len; 452 int old_type = bp->attr.bp_type; 453 int err = 0; 454 455 perf_event_disable(bp); 456 457 bp->attr.bp_addr = attr->bp_addr; 458 bp->attr.bp_type = attr->bp_type; 459 bp->attr.bp_len = attr->bp_len; 460 461 if (attr->disabled) 462 goto end; 463 464 err = validate_hw_breakpoint(bp); 465 if (!err) 466 perf_event_enable(bp); 467 468 if (err) { 469 bp->attr.bp_addr = old_addr; 470 bp->attr.bp_type = old_type; 471 bp->attr.bp_len = old_len; 472 if (!bp->attr.disabled) 473 perf_event_enable(bp); 474 475 return err; 476 } 477 478end: 479 bp->attr.disabled = attr->disabled; 480 481 return 0; 482} 483EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); 484 485/** 486 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint 487 * @bp: the breakpoint structure to unregister 488 */ 489void unregister_hw_breakpoint(struct perf_event *bp) 490{ 491 if (!bp) 492 return; 493 perf_event_release_kernel(bp); 494} 495EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); 496 497/** 498 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel 499 * @attr: breakpoint attributes 500 * @triggered: callback to trigger when we hit the breakpoint 501 * 502 * @return a set of per_cpu pointers to perf events 503 */ 504struct perf_event * __percpu * 505register_wide_hw_breakpoint(struct perf_event_attr *attr, 506 perf_overflow_handler_t triggered) 507{ 508 struct perf_event * __percpu *cpu_events, **pevent, *bp; 509 long err; 510 int cpu; 511 512 cpu_events = alloc_percpu(typeof(*cpu_events)); 513 if (!cpu_events) 514 return (void __percpu __force *)ERR_PTR(-ENOMEM); 515 516 get_online_cpus(); 517 for_each_online_cpu(cpu) { 518 pevent = per_cpu_ptr(cpu_events, cpu); 519 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); 520 521 *pevent = bp; 522 523 if (IS_ERR(bp)) { 524 err = PTR_ERR(bp); 525 goto fail; 526 } 527 } 528 put_online_cpus(); 529 530 return cpu_events; 531 532fail: 533 for_each_online_cpu(cpu) { 534 pevent = per_cpu_ptr(cpu_events, cpu); 535 if (IS_ERR(*pevent)) 536 break; 537 unregister_hw_breakpoint(*pevent); 538 } 539 put_online_cpus(); 540 541 free_percpu(cpu_events); 542 return (void __percpu __force *)ERR_PTR(err); 543} 544EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); 545 546/** 547 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel 548 * @cpu_events: the per cpu set of events to unregister 549 */ 550void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) 551{ 552 int cpu; 553 struct perf_event **pevent; 554 555 for_each_possible_cpu(cpu) { 556 pevent = per_cpu_ptr(cpu_events, cpu); 557 unregister_hw_breakpoint(*pevent); 558 } 559 free_percpu(cpu_events); 560} 561EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); 562 563static struct notifier_block hw_breakpoint_exceptions_nb = { 564 .notifier_call = hw_breakpoint_exceptions_notify, 565 /* we need to be notified first */ 566 .priority = 0x7fffffff 567}; 568 569static int __init init_hw_breakpoint(void) 570{ 571 unsigned int **task_bp_pinned; 572 int cpu, err_cpu; 573 int i; 574 575 for (i = 0; i < TYPE_MAX; i++) 576 nr_slots[i] = hw_breakpoint_slots(i); 577 578 for_each_possible_cpu(cpu) { 579 for (i = 0; i < TYPE_MAX; i++) { 580 task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); 581 *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], 582 GFP_KERNEL); 583 if (!*task_bp_pinned) 584 goto err_alloc; 585 } 586 } 587 588 constraints_initialized = 1; 589 590 return register_die_notifier(&hw_breakpoint_exceptions_nb); 591 592 err_alloc: 593 for_each_possible_cpu(err_cpu) { 594 if (err_cpu == cpu) 595 break; 596 for (i = 0; i < TYPE_MAX; i++) 597 kfree(per_cpu(nr_task_bp_pinned[i], cpu)); 598 } 599 600 return -ENOMEM; 601} 602core_initcall(init_hw_breakpoint); 603 604 605struct pmu perf_ops_bp = { 606 .enable = arch_install_hw_breakpoint, 607 .disable = arch_uninstall_hw_breakpoint, 608 .read = hw_breakpoint_pmu_read, 609}; 610