1/* 2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) 3 * 4 * SPDX-License-Identifier: GPL-2.0-only 5 */ 6 7#include <config.h> 8#ifdef CONFIG_HARDWARE_DEBUG_API 9 10#include <string.h> 11#include <util.h> 12#include <arch/model/statedata.h> 13#include <arch/machine/debug.h> 14#include <arch/machine/debug_conf.h> 15#include <arch/kernel/vspace.h> 16#include <arch/machine/registerset.h> 17#include <armv/debug.h> 18#include <mode/machine/debug.h> 19#include <sel4/constants.h> /* seL4_NumExclusiveBreakpoints/Watchpoints */ 20 21#define DBGDSCR_MDBGEN (BIT(15)) 22#define DBGDSCR_HDBGEN (BIT(14)) 23#define DBGDSCR_USER_ACCESS_DISABLE (BIT(12)) 24 25/* This bit is always RAO */ 26#define DBGLSR_LOCK_IMPLEMENTED (BIT(0)) 27#define DBGLSR_LOCK_ENABLED (BIT(1)) 28#define DBGLAR_UNLOCK_VALUE (0xC5ACCE55u) 29 30#define DBGOSLAR_LOCK_VALUE (0xC5ACCE55u) 31 32#define DBGOSLSR_GET_OSLOCK_MODEL(v) ((((v) >> 2u) & 0x2u) | ((v) & 0x1u)) 33#define DBGOSLSR_LOCK_MODEL_NO_OSLOCK (0u) 34#define DBGOSLSR_LOCK_MODEL_OSLOCK_AND_OSSR (1u) 35#define DBGOSLSR_LOCK_MODEL_OSLOCK_ONLY (2u) 36 37#define DBGPRSR_OSLOCK (BIT(5)) 38#define DBGPRSR_OS_DLOCK (BIT(6)) 39 40#define DBGOSDLR_LOCK_ENABLE (BIT(0)) 41 42#define DBGAUTHSTATUS_NSI_IMPLEMENTED (BIT(1)) 43#define DBGAUTHSTATUS_NSI_ENABLED (BIT(0)) 44#define DBGAUTHSTATUS_SI_IMPLEMENTED (BIT(5)) 45#define DBGAUTHSTATUS_SI_ENABLED (BIT(4)) 46 47#define DBGDRAR_VALID (MASK(2)) 48#define DBGDSAR_VALID (MASK(2)) 49 50#define DBGSDER_ENABLE_SECURE_USER_INVASIVE_DEBUG (BIT(0)) 51 52/* ARMv7 Manuals, c3.3.1: 53 * "Breakpoint debug events are synchronous. That is, the debug event acts 54 * like an exception that cancels the breakpointed instruction." 55 * 56 * ARMv7 Manuals, c3.4.1: 57 * "Watchpoint debug events are precise and can be synchronous or asynchronous: 58 * a synchronous Watchpoint debug event acts like a synchronous abort 59 * exception on the memory access instruction itself. An asynchronous 60 * Watchpoint debug event acts like a precise asynchronous abort exception that 61 * cancels a later instruction." 62 */ 63 64enum breakpoint_privilege /* BCR[2:1] */ { 65 DBGBCR_PRIV_RESERVED = 0u, 66 DBGBCR_PRIV_PRIVILEGED = 1u, 67 DBGBCR_PRIV_USER = 2u, 68 /* Use either when doing context linking, because the linked WVR or BVR that 69 * specifies the vaddr, overrides the context-programmed BCR privilege. 70 */ 71 DBGBCR_BCR_PRIV_EITHER = 3u 72}; 73 74enum watchpoint_privilege /* WCR[2:1] */ { 75 DBGWCR_PRIV_RESERVED = 0u, 76 DBGWCR_PRIV_PRIVILEGED = 1u, 77 DBGWCR_PRIV_USER = 2u, 78 DBGWCR_PRIV_EITHER = 3u 79}; 80 81enum watchpoint_access /* WCR[4:3] */ { 82 DBGWCR_ACCESS_RESERVED = 0u, 83 DBGWCR_ACCESS_LOAD = 1u, 84 DBGWCR_ACCESS_STORE = 2u, 85 DBGWCR_ACCESS_EITHER = 3u 86}; 87 88/** Describes the availability and level of support for the debug features on 89 * a particular CPU. Currently a static local singleton instance, but for 90 * multiprocessor adaptation, just make it per-CPU. 91 * 92 * The majority of the writing to the debug coprocessor is done when a thread 93 * is being context-switched to, so the code in this file always executes on 94 * the target CPU. MP adaptation should come with few surprises. 95 */ 96typedef struct debug_state { 97 bool_t is_available, coprocessor_is_baseline_only, watchpoint_8b_supported, 98 non_secure_invasive_unavailable, secure_invasive_unavailable, 99 cpu_is_in_secure_mode, single_step_supported, breakpoints_supported, 100 watchpoints_supported; 101 uint8_t debug_armv; 102 uint8_t didr_version, oem_variant, oem_revision; 103} debug_state_t; 104static debug_state_t dbg; 105 106bool_t byte8WatchpointsSupported(void) 107{ 108 return dbg.watchpoint_8b_supported; 109} 110 111#define SCR "p15, 0, %0, c1, c1, 0" 112#define DBGDIDR "p14,0,%0,c0,c0,0" 113/* Not guaranteed in v7, only v7.1+ */ 114#define DBGDRCR "" 115#define DBGVCR "p15, 0, %0, c0, c7, 0" 116 117#define DBGDRAR_32 "p14,0,%0,c1,c0,0" 118#define DBGDRAR_64 "p14,0,%Q0,%R0,c1" 119#define DBGDSAR_32 "p14,0,%0,c2,c0,0" 120#define DBGDSAR_64 "p14,0,%Q0,%R0,c2" 121 122/* ARMv7 manual C11.11.41: 123 * "This register is required in all implementations." 124 * "In v7.1 DBGPRSR is not visible in the CP14 interface." 125 */ 126#define DBGPRSR "p14, 0, %0, c1, c5, 4" 127 128#define DBGOSLAR "p14,0,%0,c1,c0,4" 129/* ARMv7 manual: C11.11.32: 130 * "In any implementation, software can read this register to detect whether 131 * the OS Save and Restore mechanism is implemented. If it is not implemented 132 * the read of DBGOSLSR.OSLM returns zero." 133 */ 134#define DBGOSLSR "p14,0,%0,c1,c1,4" 135 136/* ARMv7 manual: C11.11.30: 137 * "This register is only visible in the CP14 interface." 138 * "In v7 Debug, this register is not implemented." 139 * "In v7.1 Debug, this register is required in all implementations." 140 */ 141#define DBGOSDLR "p14, 0, %0, c1, c3, 4" 142 143#define DBGDEVID2 "p14,0,%0,c7,c0,7" 144#define DBGDEVID1 "p14,0,%0,c7,c1,7" 145#define DBGDEVID "p14,0,%0,c7,c2,7" 146#define DBGDEVTYPE "" 147 148/* ARMv7 manual: C11.11.1: DBGAUTHSTATUS: 149 * "This register is required in all implementations." 150 * However, in v7, it is only visible in the memory mapped interface. 151 * However, in the v6 manual, this register is not mentioned at all and doesn't 152 * exist. 153 */ 154#define DBGAUTHSTATUS "p14,0,%0,c7,c14,6" 155 156#endif /* CONFIG_HARDWARE_DEBUG_API */ 157 158#ifdef ARM_BASE_CP14_SAVE_AND_RESTORE 159 160#define DBGBCR_ENABLE (BIT(0)) 161 162#define DBGWCR_ENABLE (BIT(0)) 163 164#define MAKE_P14(crn, crm, opc2) "p14, 0, %0, c" #crn ", c" #crm ", " #opc2 165#define MAKE_DBGBVR(num) MAKE_P14(0, num, 4) 166#define MAKE_DBGBCR(num) MAKE_P14(0, num, 5) 167#define MAKE_DBGWVR(num) MAKE_P14(0, num, 6) 168#define MAKE_DBGWCR(num) MAKE_P14(0, num, 7) 169#define MAKE_DBGXVR(num) MAKE_P14(1, num, 1) 170 171/** Generates read functions for the CP14 control and value registers. 172 */ 173#define DEBUG_GENERATE_READ_FN(_name, _reg) \ 174static word_t \ 175_name(uint16_t bp_num) \ 176{ \ 177 word_t ret; \ 178 \ 179 switch (bp_num) { \ 180 case 1: \ 181 MRC(MAKE_ ## _reg(1), ret); \ 182 return ret; \ 183 case 2: \ 184 MRC(MAKE_ ## _reg(2), ret); \ 185 return ret; \ 186 case 3: \ 187 MRC(MAKE_ ## _reg(3), ret); \ 188 return ret; \ 189 case 4: \ 190 MRC(MAKE_ ## _reg(4), ret); \ 191 return ret; \ 192 case 5: \ 193 MRC(MAKE_ ## _reg(5), ret); \ 194 return ret; \ 195 case 6: \ 196 MRC(MAKE_ ## _reg(6), ret); \ 197 return ret; \ 198 case 7: \ 199 MRC(MAKE_ ## _reg(7), ret); \ 200 return ret; \ 201 case 8: \ 202 MRC(MAKE_ ## _reg(8), ret); \ 203 return ret; \ 204 case 9: \ 205 MRC(MAKE_ ## _reg(9), ret); \ 206 return ret; \ 207 case 10: \ 208 MRC(MAKE_ ## _reg(10), ret); \ 209 return ret; \ 210 case 11: \ 211 MRC(MAKE_ ## _reg(11), ret); \ 212 return ret; \ 213 case 12: \ 214 MRC(MAKE_ ## _reg(12), ret); \ 215 return ret; \ 216 case 13: \ 217 MRC(MAKE_ ## _reg(13), ret); \ 218 return ret; \ 219 case 14: \ 220 MRC(MAKE_ ## _reg(14), ret); \ 221 return ret; \ 222 case 15: \ 223 MRC(MAKE_ ## _reg(15), ret); \ 224 return ret; \ 225 default: \ 226 assert(bp_num == 0); \ 227 MRC(MAKE_ ## _reg(0), ret); \ 228 return ret; \ 229 } \ 230} 231 232/** Generates write functions for the CP14 control and value registers. 233 */ 234#define DEBUG_GENERATE_WRITE_FN(_name, _reg) \ 235static void \ 236_name(uint16_t bp_num, word_t val) \ 237{ \ 238 switch (bp_num) { \ 239 case 1: \ 240 MCR(MAKE_ ## _reg(1), val); \ 241 return; \ 242 case 2: \ 243 MCR(MAKE_ ## _reg(2), val); \ 244 return; \ 245 case 3: \ 246 MCR(MAKE_ ## _reg(3), val); \ 247 return; \ 248 case 4: \ 249 MCR(MAKE_ ## _reg(4), val); \ 250 return; \ 251 case 5: \ 252 MCR(MAKE_ ## _reg(5), val); \ 253 return; \ 254 case 6: \ 255 MCR(MAKE_ ## _reg(6), val); \ 256 return; \ 257 case 7: \ 258 MCR(MAKE_ ## _reg(7), val); \ 259 return; \ 260 case 8: \ 261 MCR(MAKE_ ## _reg(8), val); \ 262 return; \ 263 case 9: \ 264 MCR(MAKE_ ## _reg(9), val); \ 265 return; \ 266 case 10: \ 267 MCR(MAKE_ ## _reg(10), val); \ 268 return; \ 269 case 11: \ 270 MCR(MAKE_ ## _reg(11), val); \ 271 return; \ 272 case 12: \ 273 MCR(MAKE_ ## _reg(12), val); \ 274 return; \ 275 case 13: \ 276 MCR(MAKE_ ## _reg(13), val); \ 277 return; \ 278 case 14: \ 279 MCR(MAKE_ ## _reg(14), val); \ 280 return; \ 281 case 15: \ 282 MCR(MAKE_ ## _reg(15), val); \ 283 return; \ 284 default: \ 285 assert(bp_num == 0); \ 286 MCR(MAKE_ ## _reg(0), val); \ 287 return; \ 288 } \ 289} 290 291DEBUG_GENERATE_READ_FN(readBcrCp, DBGBCR) 292DEBUG_GENERATE_READ_FN(readBvrCp, DBGBVR) 293DEBUG_GENERATE_READ_FN(readWcrCp, DBGWCR) 294DEBUG_GENERATE_READ_FN(readWvrCp, DBGWVR) 295DEBUG_GENERATE_WRITE_FN(writeBcrCp, DBGBCR) 296DEBUG_GENERATE_WRITE_FN(writeBvrCp, DBGBVR) 297DEBUG_GENERATE_WRITE_FN(writeWcrCp, DBGWCR) 298DEBUG_GENERATE_WRITE_FN(writeWvrCp, DBGWVR) 299 300/* These next few functions (read*Context()/write*Context()) read from TCB 301 * context and not from the hardware registers. 302 */ 303static word_t 304readBcrContext(tcb_t *t, uint16_t index) 305{ 306 assert(index < seL4_NumExclusiveBreakpoints); 307 return t->tcbArch.tcbContext.breakpointState.breakpoint[index].cr; 308} 309 310static word_t readBvrContext(tcb_t *t, uint16_t index) 311{ 312 assert(index < seL4_NumExclusiveBreakpoints); 313 return t->tcbArch.tcbContext.breakpointState.breakpoint[index].vr; 314} 315 316static word_t readWcrContext(tcb_t *t, uint16_t index) 317{ 318 assert(index < seL4_NumExclusiveWatchpoints); 319 return t->tcbArch.tcbContext.breakpointState.watchpoint[index].cr; 320} 321 322static word_t readWvrContext(tcb_t *t, uint16_t index) 323{ 324 assert(index < seL4_NumExclusiveWatchpoints); 325 return t->tcbArch.tcbContext.breakpointState.watchpoint[index].vr; 326} 327 328static void writeBcrContext(tcb_t *t, uint16_t index, word_t val) 329{ 330 assert(index < seL4_NumExclusiveBreakpoints); 331 t->tcbArch.tcbContext.breakpointState.breakpoint[index].cr = val; 332} 333 334static void writeBvrContext(tcb_t *t, uint16_t index, word_t val) 335{ 336 assert(index < seL4_NumExclusiveBreakpoints); 337 t->tcbArch.tcbContext.breakpointState.breakpoint[index].vr = val; 338} 339 340static void writeWcrContext(tcb_t *t, uint16_t index, word_t val) 341{ 342 assert(index < seL4_NumExclusiveWatchpoints); 343 t->tcbArch.tcbContext.breakpointState.watchpoint[index].cr = val; 344} 345 346static void writeWvrContext(tcb_t *t, uint16_t index, word_t val) 347{ 348 assert(index < seL4_NumExclusiveWatchpoints); 349 t->tcbArch.tcbContext.breakpointState.watchpoint[index].vr = val; 350} 351 352#endif /* ARM_BASE_CP14_SAVE_AND_RESTORE */ 353 354#ifdef CONFIG_HARDWARE_DEBUG_API 355 356/** For debugging: prints out the debug register pair values as returned by the 357 * coprocessor. 358 * 359 * @param nBp Number of breakpoint reg pairs to print, starting at BP #0. 360 * @param nBp Number of watchpoint reg pairs to print, starting at WP #0. 361 */ 362UNUSED static void dumpBpsAndWpsCp(int nBp, int nWp) 363{ 364 int i; 365 366 for (i = 0; i < nBp; i++) { 367 userError("CP BP %d: Bcr %lx, Bvr %lx", i, readBcrCp(i), readBvrCp(i)); 368 } 369 370 for (i = 0; i < nWp; i++) { 371 userError("CP WP %d: Wcr %lx, Wvr %lx", i, readWcrCp(i), readWvrCp(i)); 372 } 373} 374 375/** Print a thread's saved debug context. For debugging. This differs from 376 * dumpBpsAndWpsCp in that it reads from a thread's saved register context, and 377 * not from the hardware coprocessor registers. 378 * 379 * @param at arch_tcb_t where the thread's reg context is stored. 380 * @param nBp Number of BP regs to print, beginning at BP #0. 381 * @param mWp Number of WP regs to print, beginning at WP #0. 382 */ 383UNUSED static void dumpBpsAndWpsContext(tcb_t *t, int nBp, int nWp) 384{ 385 int i; 386 387 for (i = 0; i < nBp; i++) { 388 userError("Ctxt BP %d: Bcr %lx, Bvr %lx", i, readBcrContext(t, i), readBvrContext(t, i)); 389 } 390 391 for (i = 0; i < nWp; i++) { 392 userError("Ctxt WP %d: Wcr %lx, Wvr %lx", i, readWcrContext(t, i), readWvrContext(t, i)); 393 } 394} 395 396/* ARM allows watchpoint trigger on load, load-exclusive, and "swap" accesses. 397 * store, store-exclusive and "swap" accesses. All accesses. 398 * 399 * The mask defines which bits are EXCLUDED from the comparison. 400 * Always program the DBGDWVR with a WORD aligned address, and use the BAS to 401 * state which bits form part of the match. 402 * 403 * It seems the BAS works as a bitmask of bytes to select in the range. 404 * 405 * To detect support for the 8-bit BAS field: 406 * * If the 8-bit BAS is unsupported, then BAS[7:4] is RAZ/WI. 407 * 408 * When using an 8-byte watchpoint that is not dword aligned, the result is 409 * undefined. You should program it as the aligned base of the range, and select 410 * only the relevant bytes then. 411 * 412 * You cannot do sparse byte selection: you either select a single byte in the 413 * BAS or you select a contiguous range. ARM has deprecated sparse byte 414 * selection. 415 */ 416 417/** Convert a watchpoint size (0, 1, 2, 4 or 8 bytes) into the arch specific 418 * register encoding. 419 */ 420static word_t convertSizeToArch(word_t size) 421{ 422 switch (size) { 423 case 1: 424 return 0x1; 425 case 2: 426 return 0x3; 427 case 8: 428 return 0xFF; 429 default: 430 assert(size == 4); 431 return 0xF; 432 } 433} 434 435/** Convert an arch specific encoded watchpoint size back into a simple integer 436 * representation. 437 */ 438static word_t convertArchToSize(word_t archsize) 439{ 440 switch (archsize) { 441 case 0x1: 442 return 1; 443 case 0x3: 444 return 2; 445 case 0xFF: 446 return 8; 447 default: 448 assert(archsize == 0xF); 449 return 4; 450 } 451} 452 453/** Convert an access perms API value (seL4_BreakOnRead, etc) into the register 454 * encoding that matches it. 455 */ 456static word_t convertAccessToArch(word_t access) 457{ 458 switch (access) { 459 case seL4_BreakOnRead: 460 return DBGWCR_ACCESS_LOAD; 461 case seL4_BreakOnWrite: 462 return DBGWCR_ACCESS_STORE; 463 default: 464 assert(access == seL4_BreakOnReadWrite); 465 return DBGWCR_ACCESS_EITHER; 466 } 467} 468 469/** Convert an arch-specific register encoding back into an API access perms 470 * value. 471 */ 472static word_t convertArchToAccess(word_t archaccess) 473{ 474 switch (archaccess) { 475 case DBGWCR_ACCESS_LOAD: 476 return seL4_BreakOnRead; 477 case DBGWCR_ACCESS_STORE: 478 return seL4_BreakOnWrite; 479 default: 480 assert(archaccess == DBGWCR_ACCESS_EITHER); 481 return seL4_BreakOnReadWrite; 482 } 483} 484 485static uint16_t getBpNumFromType(uint16_t bp_num, word_t type) 486{ 487 assert(type == seL4_InstructionBreakpoint || type == seL4_DataBreakpoint 488 || type == seL4_SingleStep); 489 490 switch (type) { 491 case seL4_InstructionBreakpoint: 492 case seL4_SingleStep: 493 return bp_num; 494 default: /* seL4_DataBreakpoint: */ 495 assert(type == seL4_DataBreakpoint); 496 return bp_num + seL4_NumExclusiveBreakpoints; 497 } 498} 499 500/** Extracts the "Method of Entry" bits from DBGDSCR. 501 * 502 * Used to determine what type of debug exception has occurred. 503 */ 504static inline word_t getMethodOfEntry(void) 505{ 506 dbg_dscr_t dscr; 507 508 dscr.words[0] = readDscrCp(); 509 return dbg_dscr_get_methodOfEntry(dscr); 510} 511 512/** Sets up the requested hardware breakpoint register. 513 * 514 * Acts as the backend for seL4_TCB_SetBreakpoint. Doesn't actually operate 515 * on the hardware coprocessor, but just modifies the thread's debug register 516 * context. The thread will pop off the updated register context when it is 517 * popping its context the next time it runs. 518 * 519 * On ARM the hardware breakpoints are consumed by all operations, including 520 * single-stepping, unlike x86, where single-stepping doesn't require the use 521 * of an actual hardware breakpoint register (just uses the EFLAGS.TF bit). 522 * 523 * @param at arch_tcb_t that points to the register context of the thread we 524 * want to modify. 525 * @param bp_num The hardware register we want to set up. 526 * @params vaddr, type, size, rw: seL4 API values for seL4_TCB_SetBreakpoint. 527 * All documented in the seL4 API Manuals. 528 */ 529void setBreakpoint(tcb_t *t, 530 uint16_t bp_num, 531 word_t vaddr, word_t type, word_t size, word_t rw) 532{ 533 bp_num = convertBpNumToArch(bp_num); 534 535 /* C3.3.4: "A debugger can use either byte address selection or address range 536 * masking, if it is implemented. However, it must not attempt to use both at 537 * the same time" 538 * 539 * "v7 Debug and v7.1 Debug deprecate any use of the DBGBCR.MASK field." 540 * ^ So prefer to use DBGBCR.BAS instead. When using masking, you must set 541 * BAS to all 1s, and when using BAS you must set the MASK field to all 0s. 542 * 543 * To detect support for BPAddrMask: 544 * * When it's unsupported: DBGBCR.MASK is always RAZ/WI, and EITHER: 545 * * DBGIDR.DEVID_tmp is RAZ 546 * * OR DBGIDR.DEVID_tmp is RAO and DBGDEVID.{CIDMask, BPAddrMask} are RAZ. 547 * * OR: 548 * * DBGDEVID.BPAddrMask indicates whether addr masking is supported. 549 * * DBGBCR.MASK is UNK/SBZP. 550 * 551 * Setting BAS to 0b0000 makes the cpu break on every instruction. 552 * Be aware that the processor checks the MASK before the BAS. 553 * You must set BAS to 0b1111 for all context match comparisons. 554 */ 555 if (type == seL4_InstructionBreakpoint) { 556 dbg_bcr_t bcr; 557 558 writeBvrContext(t, bp_num, vaddr); 559 560 /* Preserve reserved bits. */ 561 bcr.words[0] = readBcrContext(t, bp_num); 562 bcr = dbg_bcr_set_enabled(bcr, 1); 563 bcr = dbg_bcr_set_linkedBrp(bcr, 0); 564 bcr = dbg_bcr_set_supervisorAccess(bcr, DBGBCR_PRIV_USER); 565 bcr = dbg_bcr_set_byteAddressSelect(bcr, convertSizeToArch(4)); 566 bcr = Arch_setupBcr(bcr, true); 567 writeBcrContext(t, bp_num, bcr.words[0]); 568 } else { 569 dbg_wcr_t wcr; 570 571 writeWvrContext(t, bp_num, vaddr); 572 573 /* Preserve reserved bits */ 574 wcr.words[0] = readWcrContext(t, bp_num); 575 wcr = dbg_wcr_set_enabled(wcr, 1); 576 wcr = dbg_wcr_set_supervisorAccess(wcr, DBGWCR_PRIV_USER); 577 wcr = dbg_wcr_set_byteAddressSelect(wcr, convertSizeToArch(size)); 578 wcr = dbg_wcr_set_loadStore(wcr, convertAccessToArch(rw)); 579 wcr = dbg_wcr_set_enableLinking(wcr, 0); 580 wcr = dbg_wcr_set_linkedBrp(wcr, 0); 581 wcr = Arch_setupWcr(wcr); 582 writeWcrContext(t, bp_num, wcr.words[0]); 583 } 584} 585 586/** Retrieves the current configuration of a hardware breakpoint for a given 587 * thread. 588 * 589 * Doesn't modify the configuration of that thread's breakpoints. 590 * 591 * @param at arch_tcb_t that holds the register context for the thread you wish 592 * to query. 593 * @param bp_num Hardware breakpoint ID. 594 * @return A struct describing the current configuration of the requested 595 * breakpoint. 596 */ 597getBreakpoint_t getBreakpoint(tcb_t *t, uint16_t bp_num) 598{ 599 getBreakpoint_t ret; 600 601 ret.type = getTypeFromBpNum(bp_num); 602 bp_num = convertBpNumToArch(bp_num); 603 604 if (ret.type == seL4_InstructionBreakpoint) { 605 dbg_bcr_t bcr; 606 607 bcr.words[0] = readBcrContext(t, bp_num); 608 if (Arch_breakpointIsMismatch(bcr) == true) { 609 ret.type = seL4_SingleStep; 610 }; 611 ret.size = 0; 612 ret.rw = seL4_BreakOnRead; 613 ret.vaddr = readBvrContext(t, bp_num); 614 ret.is_enabled = dbg_bcr_get_enabled(bcr); 615 } else { 616 dbg_wcr_t wcr; 617 618 wcr.words[0] = readWcrContext(t, bp_num); 619 ret.size = convertArchToSize(dbg_wcr_get_byteAddressSelect(wcr)); 620 ret.rw = convertArchToAccess(dbg_wcr_get_loadStore(wcr)); 621 ret.vaddr = readWvrContext(t, bp_num); 622 ret.is_enabled = dbg_wcr_get_enabled(wcr); 623 } 624 return ret; 625} 626 627/** Disables and clears the configuration of a hardware breakpoint. 628 * 629 * @param at arch_tcb_t holding the reg context for the target thread. 630 * @param bp_num The hardware breakpoint you want to disable+clear. 631 */ 632void unsetBreakpoint(tcb_t *t, uint16_t bp_num) 633{ 634 word_t type; 635 636 type = getTypeFromBpNum(bp_num); 637 bp_num = convertBpNumToArch(bp_num); 638 639 if (type == seL4_InstructionBreakpoint) { 640 dbg_bcr_t bcr; 641 642 bcr.words[0] = readBcrContext(t, bp_num); 643 bcr = dbg_bcr_set_enabled(bcr, 0); 644 writeBcrContext(t, bp_num, bcr.words[0]); 645 writeBvrContext(t, bp_num, 0); 646 } else { 647 dbg_wcr_t wcr; 648 649 wcr.words[0] = readWcrContext(t, bp_num); 650 wcr = dbg_wcr_set_enabled(wcr, 0); 651 writeWcrContext(t, bp_num, wcr.words[0]); 652 writeWvrContext(t, bp_num, 0); 653 } 654} 655 656/** Initiates or halts single-stepping on the target process. 657 * 658 * @param at arch_tcb_t for the target process to be configured. 659 * @param bp_num The hardware ID of the breakpoint register to be used. 660 * @param n_instr The number of instructions to step over. 661 */ 662bool_t configureSingleStepping(tcb_t *t, 663 uint16_t bp_num, 664 word_t n_instr, 665 bool_t is_reply) 666{ 667 /* ARMv7 manual, section D13.3.1: 668 * "v6.1 Debug introduces instruction address mismatch comparisons. 669 * v6 Debug does not support these comparisons." 670 * 671 * ^ The above line means that single-stepping is not supported on v6 debug. 672 * I.e, the KZM cannot use single-stepping. 673 */ 674 675 if (is_reply) { 676 bp_num = t->tcbArch.tcbContext.breakpointState.single_step_hw_bp_num; 677 } else { 678 bp_num = convertBpNumToArch(bp_num); 679 } 680 681 /* On ARM single-stepping is emulated using breakpoint mismatches. So you 682 * would basically set the breakpoint to mismatch everything, and this will 683 * cause an exception to be triggered on every instruction. 684 * 685 * We use NULL as the mismatch address since no code should be trying to 686 * execute NULL, so it's a perfect address to use as the mismatch 687 * criterion. An alternative might be to use an address in the kernel's 688 * high vaddrspace, since that's an address that it's impossible for 689 * userspace to be executing at. 690 */ 691 dbg_bcr_t bcr; 692 693 bcr.words[0] = readBcrContext(t, bp_num); 694 695 /* If the user calls us with n_instr == 0, allow them to configure, but 696 * leave it disabled. 697 */ 698 if (n_instr > 0) { 699 bcr = dbg_bcr_set_enabled(bcr, 1); 700 t->tcbArch.tcbContext.breakpointState.single_step_enabled = true; 701 } else { 702 bcr = dbg_bcr_set_enabled(bcr, 0); 703 t->tcbArch.tcbContext.breakpointState.single_step_enabled = false; 704 } 705 706 bcr = dbg_bcr_set_linkedBrp(bcr, 0); 707 bcr = dbg_bcr_set_supervisorAccess(bcr, DBGBCR_PRIV_USER); 708 bcr = dbg_bcr_set_byteAddressSelect(bcr, convertSizeToArch(1)); 709 bcr = Arch_setupBcr(bcr, false); 710 711 writeBvrContext(t, bp_num, 0); 712 writeBcrContext(t, bp_num, bcr.words[0]); 713 714 t->tcbArch.tcbContext.breakpointState.n_instructions = n_instr; 715 t->tcbArch.tcbContext.breakpointState.single_step_hw_bp_num = bp_num; 716 return true; 717} 718 719/** Using the DBGDIDR register, detects the debug architecture version, and 720 * does a preliminary check for the level of support for our debug API. 721 * 722 * Reads DBGDIDR, which is guaranteed to be read safely. Then 723 * determine whether or not we can or should proceed. 724 * 725 * The majority of the debug setup is concerned with trying to tell which 726 * registers are safe to access on this CPU. The debug architecture is wildly 727 * different across different CPUs and platforms, so genericity is fairly 728 * challenging. 729 */ 730BOOT_CODE static void initVersionInfo(void) 731{ 732 dbg_didr_t didr; 733 734 didr.words[0] = getDIDR(); 735 dbg.oem_revision = dbg_didr_get_revision(didr); 736 dbg.oem_variant = dbg_didr_get_variant(didr); 737 dbg.didr_version = dbg_didr_get_version(didr); 738 dbg.coprocessor_is_baseline_only = true; 739 dbg.breakpoints_supported = dbg.watchpoints_supported = 740 dbg.single_step_supported = true; 741 742 switch (dbg.didr_version) { 743 case 0x1: 744 dbg.debug_armv = 0x60; 745 dbg.single_step_supported = false; 746 break; 747 case 0x2: 748 dbg.debug_armv = 0x61; 749 break; 750 case 0x3: 751 dbg.debug_armv = 0x70; 752 dbg.coprocessor_is_baseline_only = false; 753 break; 754 case 0x4: 755 dbg.debug_armv = 0x70; 756 break; 757 case 0x5: 758 dbg.debug_armv = 0x71; 759 dbg.coprocessor_is_baseline_only = false; 760 break; 761 case 0x6: 762 dbg.debug_armv = 0x80; 763 dbg.coprocessor_is_baseline_only = false; 764 break; 765 default: 766 dbg.is_available = false; 767 dbg.debug_armv = 0; 768 return; 769 } 770 771 dbg.is_available = true; 772} 773 774/** Load an initial, all-disabled setup state for the registers. 775 */ 776BOOT_CODE static void disableAllBpsAndWps(void) 777{ 778 int i; 779 780 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 781 writeBvrCp(i, 0); 782 writeBcrCp(i, readBcrCp(i) & ~DBGBCR_ENABLE); 783 } 784 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) { 785 writeWvrCp(i, 0); 786 writeWcrCp(i, readWcrCp(i) & ~DBGWCR_ENABLE); 787 } 788 789 isb(); 790} 791 792/** Guides the debug hardware initialization sequence. 793 * 794 * In short, there is a small set of registers, the "baseline" registers, which 795 * are guaranteed to be available on all ARM debug architecture implementations. 796 * Aside from those, the rest are a *COMPLETE* toss-up, and detection is 797 * difficult, because if you access any particular register which is 798 * unavailable on an implementation, you trigger an #UNDEFINED exception. And 799 * there is little uniformity or consistency. 800 * 801 * In addition, there are as many as 3 lock registers, all of which have 802 * effects on which registers you can access...and only one of them is 803 * consistently implemented. The others may or may not be implemented, and well, 804 * you have to grope in the dark to determine whether or not they are...but 805 * if they are implemented, their effect on software is still upheld, of course. 806 * 807 * Much of this sequence is catering for the different versions and determining 808 * which registers and locks are implemented, and creating a common register 809 * environment for the rest of the API code. 810 * 811 * There are several conditions which will cause the code to exit and give up. 812 * For the most part, most implementations give you the baseline registers and 813 * some others. When an implementation only supports the baseline registers and 814 * nothing more, you're told so, and that basically means you can't do anything 815 * with it because you have no reliable access to the debug registers. 816 */ 817BOOT_CODE bool_t Arch_initHardwareBreakpoints(void) 818{ 819 word_t dbgosdlr, dbgoslsr; 820 821 /* The functioning of breakpoints on ARM requires that certain external 822 * pin signals be enabled. If these are not enabled, there is nothing 823 * that can be done from software. If these are enabled, we can then 824 * select the debug-mode we want by programming the CP14 interface. 825 * 826 * Of the four modes available, we want monitor mode, because only monitor 827 * mode delivers breakpoint and watchpoint events to the kernel as 828 * exceptions. The other modes cause a break into "debug mode" or ignore 829 * debug events. 830 */ 831 memset(&dbg, 0, sizeof(dbg)); 832 833 initVersionInfo(); 834 if (dbg.is_available == false) { 835 printf("Debug architecture not implemented.\n"); 836 return false; 837 } 838 839 printf("DIDRv: %x, armv %x, coproc baseline only? %s.\n", 840 dbg.didr_version, dbg.debug_armv, 841 ((dbg.coprocessor_is_baseline_only) ? "yes" : "no")); 842 843 if (dbg.debug_armv > 0x61) { 844 if (dbg.coprocessor_is_baseline_only) { 845 printf("ARMDBG: No reliable access to DBG regs.\n"); 846 return dbg.is_available = false; 847 } 848 849 /* Interestingly, since the debug features have so many bits that 850 * behave differently pending the state of secure-mode, ARM had to 851 * expose a bit in the debug coprocessor that reveals whether or not the 852 * CPU is in secure mode, or else it would be semi-impossible to program 853 * this feature. 854 */ 855 dbg.cpu_is_in_secure_mode = !(readDscrCp() & DBGDSCR_SECURE_MODE_DISABLED); 856 if (dbg.cpu_is_in_secure_mode) { 857 word_t sder; 858 859 printf("CPU is in secure mode. Enabling debugging in secure user mode.\n"); 860 MRC(DBGSDER, sder); 861 MCR(DBGSDER, sder 862 | DBGSDER_ENABLE_SECURE_USER_INVASIVE_DEBUG); 863 } 864 865 /* Deal with OS Double-lock: */ 866 if (dbg.debug_armv == 0x71) { 867 /* ARMv7 manuals, C11.11.30: 868 * "In v7.1 Debug, this register is required in all implementations." 869 */ 870 MRC(DBGOSDLR, dbgosdlr); 871 MCR(DBGOSDLR, dbgosdlr & ~DBGOSDLR_LOCK_ENABLE); 872 } else if (dbg.debug_armv == 0x70) { 873 /* ARMv7 manuals, C11.11.30: 874 * "In v7 Debug, this register is not implemented." 875 * 876 * So no need to do anything for debug v7.0. 877 */ 878 } 879 880 /* Now deal with OS lock: ARMv7 manual, C11.11.32: 881 * "In any implementation, software can read this register to detect 882 * whether the OS Save and Restore mechanism is implemented. If it is 883 * not implemented the read of DBGOSLSR.OSLM returns zero." 884 */ 885 MRC(DBGOSLSR, dbgoslsr); 886 if (DBGOSLSR_GET_OSLOCK_MODEL(dbgoslsr) != DBGOSLSR_LOCK_MODEL_NO_OSLOCK) { 887 MCR(DBGOSLAR, ~DBGOSLAR_LOCK_VALUE); 888 } 889 890 disableAllBpsAndWps(); 891 if (!enableMonitorMode()) { 892 return dbg.is_available = false; 893 } 894 } else { 895 /* On v6 you have to enable monitor mode first. */ 896 if (!enableMonitorMode()) { 897 return dbg.is_available = false; 898 } 899 disableAllBpsAndWps(); 900 } 901 902 /* Finally, also pre-load some initial register state that can be used 903 * for all new threads so that their initial saved debug register state 904 * is valid when it's first loaded onto the CPU. 905 */ 906 for (int i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 907 armKSNullBreakpointState.breakpoint[i].cr = readBcrCp(i) & ~DBGBCR_ENABLE; 908 } 909 for (int i = 0; i < seL4_NumExclusiveWatchpoints; i++) { 910 armKSNullBreakpointState.watchpoint[i].cr = readWcrCp(i) & ~DBGWCR_ENABLE; 911 } 912 913 dbg.watchpoint_8b_supported = watchpoint8bSupported(); 914 return true; 915} 916 917/** Determines which breakpoint or watchpoint register caused the debug 918 * exception to be triggered. 919 * 920 * Checks to see which hardware breakpoint was triggered, and saves 921 * the ID of that breakpoint. 922 * There is no short way to do this on ARM. On x86 there is a status 923 * register that tells you which watchpoint has been triggered. On ARM 924 * there is no such register, so you have to manually check each to see which 925 * one was triggered. 926 * 927 * The arguments also work a bit differently from x86 as well. On x86 the 928 * 2 arguments are dummy values, while on ARM, they contain useful information. 929 * 930 * @param vaddr The virtual address stored in the IFSR/DFSR register, which 931 * is either the watchpoint address or breakpoint address. 932 * @param reason The presumed reason for the exception, which is based on 933 * whether it was a prefetch or data abort. 934 * @return Struct with a member "bp_num", which is a positive integer if we 935 * successfully detected which debug register triggered the exception. 936 * "Bp_num" will be negative otherwise. 937 */ 938static int getAndResetActiveBreakpoint(word_t vaddr, word_t reason) 939{ 940 word_t align_mask; 941 int i, ret = -1; 942 943 if (reason == seL4_InstructionBreakpoint) { 944 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 945 dbg_bcr_t bcr; 946 word_t bvr = readBvrCp(i); 947 948 bcr.words[0] = readBcrCp(i); 949 /* The actual trigger address may be an unaligned sub-byte of the 950 * range, which means it's not guaranteed to match the aligned value 951 * that was programmed into the address register. 952 */ 953 align_mask = convertArchToSize(dbg_bcr_get_byteAddressSelect(bcr)); 954 align_mask = ~(align_mask - 1); 955 956 if (bvr != (vaddr & align_mask) || !dbg_bcr_get_enabled(bcr)) { 957 continue; 958 } 959 960 ret = i; 961 return ret; 962 } 963 } 964 965 if (reason == seL4_DataBreakpoint) { 966 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) { 967 dbg_wcr_t wcr; 968 word_t wvr = readWvrCp(i); 969 970 wcr.words[0] = readWcrCp(i); 971 align_mask = convertArchToSize(dbg_wcr_get_byteAddressSelect(wcr)); 972 align_mask = ~(align_mask - 1); 973 974 if (wvr != (vaddr & align_mask) || !dbg_wcr_get_enabled(wcr)) { 975 continue; 976 } 977 978 ret = i; 979 return ret; 980 } 981 } 982 983 return ret; 984} 985 986/** Abstract wrapper around the IFSR/DFSR fault status values. 987 * 988 * Format of the FSR bits is different for long and short descriptors, so 989 * extract the FSR bits and accompany them with a boolean. 990 */ 991typedef struct fault_status { 992 uint8_t status; 993 bool_t is_long_desc_format; 994} fault_status_t; 995 996static fault_status_t getFaultStatus(word_t hsr_or_fsr) 997{ 998 fault_status_t ret; 999 1000 /* Hyp mode uses the HSR, Hype syndrome register. */ 1001#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 1002 /* the HSR only uses the long descriptor format. */ 1003 ret.is_long_desc_format = true; 1004 /* FSR[5:0]. */ 1005 ret.status = hsr_or_fsr & 0x3F; 1006#else 1007 /* Non-hyp uses IFSR/DFSR */ 1008 if (hsr_or_fsr & BIT(FSR_LPAE_SHIFT)) { 1009 ret.is_long_desc_format = true; 1010 /* FSR[5:0] */ 1011 ret.status = hsr_or_fsr & 0x3F; 1012 } else { 1013 ret.is_long_desc_format = false; 1014 /* FSR[10] | FSR[3:0]. */ 1015 ret.status = (hsr_or_fsr & BIT(FSR_STATUS_BIT4_SHIFT)) >> FSR_STATUS_BIT4_SHIFT; 1016 ret.status <<= 4; 1017 ret.status = hsr_or_fsr & 0xF; 1018 } 1019#endif 1020 1021 return ret; 1022} 1023 1024/** Called to determine if an abort was a debug exception. 1025 * 1026 * The ARM debug exceptions look like Prefetch Aborts or Data Aborts, and you 1027 * have to examine some extra register state to determine whether or not the 1028 * abort you currently have on your hands is actually a debug exception. 1029 * 1030 * This routine takes care of the checks. 1031 * @param fs An abstraction of the DFSR/IFSR values, meant to make it irrelevant 1032 * whether we're using the long/short descriptors. Bit positions and 1033 * values change. This also makes the debug code forward compatible 1034 * aarch64. 1035 */ 1036bool_t isDebugFault(word_t hsr_or_fsr) 1037{ 1038 fault_status_t fs; 1039 1040 fs = getFaultStatus(hsr_or_fsr); 1041 if (fs.is_long_desc_format) { 1042 if (fs.status == FSR_LONGDESC_STATUS_DEBUG_EVENT) { 1043 return true; 1044 } 1045 } else { 1046 if (fs.status == FSR_SHORTDESC_STATUS_DEBUG_EVENT) { 1047 return true; 1048 } 1049 } 1050 1051 if (getMethodOfEntry() == DEBUG_ENTRY_ASYNC_WATCHPOINT) { 1052 userError("Debug: Watchpoint delivered as async abort."); 1053 return true; 1054 } 1055 return false; 1056} 1057 1058/** Called to process a debug exception. 1059 * 1060 * On x86, you're told which breakpoint register triggered the exception. On 1061 * ARM, you're told the virtual address that triggered the exception and what 1062 * type of access (data access vs instruction execution) triggered the exception 1063 * and you have to figure out which register triggered it. 1064 * 1065 * For watchpoints, it's not very complicated: just check to see which 1066 * register matches the virtual address. 1067 * 1068 * For breakpoints, it's a bit more complex: since both breakpoints and single- 1069 * stepping are configured using the same registers, we need to first detect 1070 * whether single-stepping is enabled. If not, then we check for a breakpoint. 1071 * @param fault_vaddr The instruction vaddr which triggered the exception, as 1072 * extracted by the kernel. 1073 */ 1074seL4_Fault_t handleUserLevelDebugException(word_t fault_vaddr) 1075{ 1076#ifdef TRACK_KERNEL_ENTRIES 1077 ksKernelEntry.path = Entry_DebugFault; 1078 ksKernelEntry.word = fault_vaddr; 1079#endif 1080 1081 word_t method_of_entry = getMethodOfEntry(); 1082 int i, active_bp; 1083 seL4_Fault_t ret; 1084 word_t bp_reason, bp_vaddr; 1085 1086 switch (method_of_entry) { 1087 case DEBUG_ENTRY_BREAKPOINT: 1088 bp_reason = seL4_InstructionBreakpoint; 1089 bp_vaddr = fault_vaddr; 1090 1091 /* Could have been triggered by: 1092 * 1. An actual breakpoint. 1093 * 2. A breakpoint configured in mismatch mode to emulate 1094 * single-stepping. 1095 * 1096 * If the register is configured for mismatch, then it's a single-step 1097 * exception. If the register is configured for match, then it's a 1098 * normal breakpoint exception. 1099 */ 1100 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 1101 dbg_bcr_t bcr; 1102 1103 bcr.words[0] = readBcrCp(i); 1104 if (!dbg_bcr_get_enabled(bcr) || Arch_breakpointIsMismatch(bcr) != true) { 1105 continue; 1106 } 1107 /* Return the first BP enabled and configured for mismatch. */ 1108 bp_reason = seL4_SingleStep; 1109 active_bp = i; 1110 break; 1111 } 1112 break; 1113 1114 case DEBUG_ENTRY_SYNC_WATCHPOINT: 1115 bp_reason = seL4_DataBreakpoint; 1116#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 1117 /* Sync watchpoint sets the BP vaddr in HDFAR. */ 1118 bp_vaddr = getHDFAR(); 1119#else 1120 bp_vaddr = getFAR(); 1121#endif 1122 break; 1123 1124 case DEBUG_ENTRY_ASYNC_WATCHPOINT: 1125 bp_reason = seL4_DataBreakpoint; 1126 /* Async WP sets the WP vaddr in DBGWFAR for both hyp and non-hyp. */ 1127 bp_vaddr = getWFAR(); 1128 break; 1129 1130 default: /* EXPLICIT_BKPT: BKPT instruction */ 1131 assert(method_of_entry == DEBUG_ENTRY_EXPLICIT_BKPT); 1132 bp_reason = seL4_SoftwareBreakRequest; 1133 bp_vaddr = fault_vaddr; 1134 active_bp = 0; 1135 } 1136 1137 if (method_of_entry != DEBUG_ENTRY_EXPLICIT_BKPT 1138 && bp_reason != seL4_SingleStep) { 1139 active_bp = getAndResetActiveBreakpoint(bp_vaddr, 1140 bp_reason); 1141 assert(active_bp >= 0); 1142 } 1143 1144 /* There is no hardware register associated with BKPT instruction 1145 * triggers. 1146 */ 1147 if (bp_reason != seL4_SoftwareBreakRequest) { 1148 /* Convert the hardware BP num back into an API-ID */ 1149 active_bp = getBpNumFromType(active_bp, bp_reason); 1150 } 1151 ret = seL4_Fault_DebugException_new(bp_vaddr, active_bp, bp_reason); 1152 return ret; 1153} 1154 1155#endif /* CONFIG_HARDWARE_DEBUG_API */ 1156 1157#ifdef ARM_BASE_CP14_SAVE_AND_RESTORE 1158 1159/** Mirrors Arch_initFpuContext. 1160 * 1161 * Zeroes out the BVR thread context and preloads reserved bit values from the 1162 * control regs into the thread context so we can operate solely on the values 1163 * cached in RAM in API calls, rather than retrieving the values from the 1164 * coprocessor. 1165 */ 1166void Arch_initBreakpointContext(user_context_t *uc) 1167{ 1168 uc->breakpointState = armKSNullBreakpointState; 1169} 1170 1171void loadAllDisabledBreakpointState(void) 1172{ 1173 int i; 1174 1175 /* We basically just want to read-modify-write each reg to ensure its 1176 * "ENABLE" bit is clear. We did preload the register context with the 1177 * reserved values from the control registers, so we can read our 1178 * initial values from either the coprocessor or the thread's register 1179 * context. 1180 * 1181 * Both are perfectly fine, and the only discriminant factor is performance. 1182 * I suspect that reading from RAM is faster than reading from the 1183 * coprocessor, but I can't be sure. 1184 */ 1185 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 1186 writeBcrCp(i, readBcrCp(i) & ~DBGBCR_ENABLE); 1187 } 1188 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) { 1189 writeWcrCp(i, readWcrCp(i) & ~DBGWCR_ENABLE); 1190 } 1191} 1192 1193/* We only need to save the breakpoint state in the hypervisor 1194 * build, and only for threads that have an associated VCPU. 1195 * 1196 * When the normal kernel is running with the debug API, all 1197 * changes to the debug regs are done through the debug API. 1198 * In the hypervisor build, the guest VM has full access to the 1199 * debug regs in PL1, so we need to save its values on vmexit. 1200 * 1201 * When saving the debug regs we will always save all of them. 1202 * When restoring, we will restore only those that have been used 1203 * for native threads; and we will restore all of them 1204 * unconditionally for VCPUs (because we don't know which of 1205 * them have been changed by the guest). 1206 * 1207 * To ensure that all the debug regs are restored unconditionally, 1208 * we just set the "used_breakpoints_bf" bitfield to all 1s in 1209 * associateVcpu. 1210 */ 1211void saveAllBreakpointState(tcb_t *t) 1212{ 1213 int i; 1214 1215 assert(t != NULL); 1216 1217 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 1218 writeBvrContext(t, i, readBvrCp(i)); 1219 writeBcrContext(t, i, readBcrCp(i)); 1220 } 1221 1222 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) { 1223 writeWvrContext(t, i, readWvrCp(i)); 1224 writeWcrContext(t, i, readWcrCp(i)); 1225 } 1226} 1227 1228#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS 1229void Arch_debugAssociateVCPUTCB(tcb_t *t) 1230{ 1231 /* Don't attempt to shift beyond end of word. */ 1232 assert(seL4_NumHWBreakpoints < sizeof(word_t) * 8); 1233 1234 /* Set all the bits to 1, so loadBreakpointState() will 1235 * restore all the debug regs unconditionally. 1236 */ 1237 t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf = MASK(seL4_NumHWBreakpoints); 1238} 1239 1240void Arch_debugDissociateVCPUTCB(tcb_t *t) 1241{ 1242 t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf = 0; 1243} 1244#endif 1245 1246static void loadBreakpointState(tcb_t *t) 1247{ 1248 int i; 1249 1250 assert(t != NULL); 1251 1252 for (i = 0; i < seL4_NumExclusiveBreakpoints; i++) { 1253 if (t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf & BIT(i)) { 1254 writeBvrCp(i, readBvrContext(t, i)); 1255 writeBcrCp(i, readBcrContext(t, i)); 1256 } else { 1257 /* If the thread isn't using the BP, then just load 1258 * a default "disabled" state. 1259 */ 1260 writeBcrCp(i, readBcrCp(i) & ~DBGBCR_ENABLE); 1261 } 1262 } 1263 1264 for (i = 0; i < seL4_NumExclusiveWatchpoints; i++) { 1265 if (t->tcbArch.tcbContext.breakpointState.used_breakpoints_bf & 1266 BIT(i + seL4_NumExclusiveBreakpoints)) { 1267 writeWvrCp(i, readWvrContext(t, i)); 1268 writeWcrCp(i, readWcrContext(t, i)); 1269 } else { 1270 writeWcrCp(i, readWcrCp(i) & ~DBGWCR_ENABLE); 1271 } 1272 } 1273} 1274 1275/** Pops debug register context for a thread into the CPU. 1276 * 1277 * Mirrors the idea of restore_user_context. 1278 */ 1279void restore_user_debug_context(tcb_t *target_thread) 1280{ 1281 assert(target_thread != NULL); 1282 1283 if (target_thread->tcbArch.tcbContext.breakpointState.used_breakpoints_bf == 0) { 1284 loadAllDisabledBreakpointState(); 1285 } else { 1286 loadBreakpointState(target_thread); 1287 } 1288 1289 /* ARMv6 manual, sec D3.3.7: 1290 * "The update of a BVR or a BCR is only guaranteed to be visible to 1291 * subsequent instructions after the execution of a PrefetchFlush operation, 1292 * the taking of an exception, or the return from an exception." 1293 * 1294 * So we don't need to execute ISB here because we're about to RFE. 1295 */ 1296} 1297 1298#endif /* ARM_BASE_CP14_SAVE_AND_RESTORE */ 1299