• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/ppc/

Lines Matching refs:of

6  * This file contains Original Code and/or Modifications of Original Code
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
84 ; part of EA to make
106 * Returns 0 if add worked or the vaddr of the first overlap if not
203 mr r29,r4 ; Save top half of vaddr for later
204 mr r30,r5 ; Save bottom half of vaddr for later
219 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
224 adde r8,r29,r22 ; Add the rest of the length on
225 rlwinm r9,r9,0,0,31 ; Clean top half of sum
228 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
287 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
288 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
291 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
316 subfic r7,r7,46 ; Get number of leading zeros
391 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
392 mr r3,r20 ; Save the top of the colliding address
393 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
397 cmplw r20,r8 ; High part of vaddr the same?
414 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
428 li r3,failMapping ; Show that we failed some kind of mapping thing
440 * We return the virtual address of the removed mapping as a
574 mr r29,r4 ; Top half of vaddr
575 mr r30,r5 ; Bottom half of vaddr
592 mr r4,r29 ; High order of address
593 mr r5,r30 ; Low order of address
600 mr r15,r4 ; Save top of next vaddr
601 mr r16,r5 ; Save bottom of next vaddr
628 mr r4,r29 ; High order of address
629 mr r5,r30 ; Low order of address
636 mr r15,r4 ; Save top of next vaddr
637 mr r16,r5 ; Save bottom of next vaddr
672 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
689 lwz r5,0(r26) ; Get the top of PTE
718 sync ; Make sure of it all
780 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
834 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
836 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
855 stw r15,0(r6) ; Pass back the top of the next vaddr
856 stw r16,4(r6) ; Pass back the bottom of the next vaddr
862 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
883 li r3,failMapping ; Show that we failed some kind of mapping thing
888 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
895 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
905 lhz r25,mpBSize(r31) ; Get the number of pages in block
913 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
916 sub r4,r25,r9 ; Get number of pages left
918 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
929 add r27,r27,r0 ; Adjust vaddr to start of current chunk
942 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
953 cmplw cr5,r30,r22 ; Check if we reached the end of the range
1013 or r5,r5,r6 ; Get number of TLBIEs needed
1044 lwz r4,mpVAddr(r31) ; High order of address
1045 lwz r5,mpVAddr+4(r31) ; Low order of address
1052 mr r15,r4 ; Save top of next vaddr
1053 mr r16,r5 ; Save bottom of next vaddr
1071 mr r4,r29 ; High order of address
1072 mr r5,r30 ; Low order of address
1076 mr r15,r4 ; Save top of next vaddr
1077 mr r16,r5 ; Save bottom of next vaddr
1095 ; Here we handle the 64-bit version of hw_rem_map
1109 ld r5,0(r26) ; Get the top of PTE
1116 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1139 ptesync ; Make sure of it all
1220 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1222 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1242 lhz r25,mpBSize(r31) ; Get the number of pages in block
1252 sub r4,r25,r9 ; Get number of pages left
1254 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1263 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1271 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1275 subfic r5,r5,46 ; Get number of leading zeros
1277 mr r30,r27 ; Get start of chunk to invalidate
1292 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1353 or r5,r5,r6 ; Get number of TLBIEs needed
1366 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1387 lwz r4,mpVAddr(r31) ; High order of address
1388 lwz r5,mpVAddr+4(r31) ; Low order of address
1395 mr r15,r4 ; Save top of next vaddr
1396 mr r16,r5 ; Save bottom of next vaddr
1414 mr r4,r29 ; High order of address
1415 mr r5,r30 ; Low order of address
1419 mr r15,r4 ; Save top of next vaddr
1420 mr r16,r5 ; Save bottom of next vaddr
1448 ; Method of operation:
1463 ; r29: high-order 32 bits of guest virtual address
1464 ; r30: low-order 32 bits of guest virtual address
1476 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1484 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1492 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1596 ; r4 -> High-order 32 bits of PTE
1597 ; r5 -> Low-order 32 bits of PTE
1629 beq- hrmGPEMissMiss ; End of chain, this is not good
1656 beq-- hrmGPEMissMiss ; End of chain, this is not good
1719 * We return the virtual address of the removed mapping as a
1785 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1787 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1789 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1801 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1804 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1837 * We return the virtual address of the removed mapping as a
1910 mr r29,r4 ; Top half of vaddr
1911 mr r30,r5 ; Bottom half of vaddr
1915 mr r15,r4 ; Save top of next vaddr
1917 mr r16,r5 ; Save bottom of next vaddr
1933 li r3,failMapping ; Show that we failed some kind of mapping thing
1949 * We return the virtual address of the removed mapping as a
2031 cmplw r10,r8 ; Is this one of ours?
2048 cmplw r10,r8 ; Is this one of ours?
2057 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2089 li r3,failMapping ; Show that we failed some kind of mapping thing
2108 * r4 : high-order 32 bits of guest virtual address
2109 * r5 : low-order 32 bits of guest virtual address
2170 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2172 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2175 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2177 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2206 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2208 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2211 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2213 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2239 * We return the virtual address of the found mapping in
2272 cmplw r10,r8 ; Is this one of ours?
2289 cmplw r10,r8 ; Is this one of ours?
2306 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2347 li r3,failMapping ; Show that we failed some kind of mapping thing
2352 ; Returns 0 if not found or the virtual address of the mapping if
2363 mr r25,r6 ; Remember address of next va
2377 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2390 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2392 hfmSF1: mr r29,r4 ; Save top half of vaddr
2413 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2423 ; word of the xor.
2425 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2432 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2444 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2445 stw r30,4(r25) ; Save the bottom of the next va
2466 li r3,failMapping ; Show that we failed some kind of mapping thing
2486 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2487 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2491 lwz r0,mrStart(r5) ; Get start of table entry
2492 lwz r4,mrEnd(r5) ; Get end of table entry
2628 lis r27,hi16(hwpOpBase) ; Get high order of op base
2630 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2635 add r28,r28,r27 ; Get the address of the postop routine
2636 add r27,r5,r27 ; Get the address of the op routine
2691 stw r5,4(r3) ; Store second half of PTE
2778 std r5,8(r3) ; Save bottom of PTE
2831 ; at the start of hwpOpBase
2848 ; This is the continuation of function 4 - Set attributes in mapping
2850 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2857 bgt++ hwpSAMinvd ; Go do the rest of it...
2865 bgt++ hwpSAMinvi ; Go do the rest of it...
2888 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2898 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2905 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2911 ; Note: CR0_EQ is set because of stwcx.
2919 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2937 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2944 ; NOTE: we moved the remainder of the code out of here because it
2946 ; at the end of the no-op function.
2955 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2961 ; Note: CR0_EQ is set because of stwcx.
2970 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2974 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2982 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2988 ; Note: CR0_EQ is set because of stwcx.
2997 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3001 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3009 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3015 ; Note: CR0_EQ is set because of stwcx.
3023 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3026 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3033 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3039 ; Note: CR0_EQ is set because of stwcx.
3046 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3049 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3112 li r5,ppLink+4 ; Get offset for flag part of physent
3119 ; Note: CR0_EQ is set because of stwcx.
3128 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3132 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3156 mr r25,r7 ; Remember address of next va
3172 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3185 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3187 hpSF1: mr r29,r4 ; Save top half of vaddr
3224 stw r5,4(r3) ; Store second half of PTE
3249 std r5,8(r3) ; Store second half of PTE
3273 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3274 stw r30,4(r25) ; Save the bottom of the next va
3315 li r3,failMapping ; Show that we failed some kind of mapping thing
3350 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3363 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3365 htrSF1: mr r29,r4 ; Save top half of vaddr
3394 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3401 andc r12,r12,r0 ; Clear mapping copy of RC
3402 andc r5,r5,r0 ; Clear PTE copy of RC
3425 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3432 andc r12,r12,r0 ; Clear mapping copy of RC
3433 andc r5,r5,r0 ; Clear PTE copy of RC
3489 li r3,failMapping ; Show that we failed some kind of mapping thing
3500 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3502 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3505 lwz r5,mrStart(r9) ; Get start of table entry
3506 lwz r0,mrEnd(r9) ; Get end of table entry
3508 cmplwi cr7,r3,0 ; Are we at the end of the table?
3534 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3536 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3539 lwz r7,mrStart(r3) ; Get the start of range
3540 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3728 * We canot handle any kind of protection exceptions here, so we pass
3751 lwz r29,savedar(r13) ; Get the first half of the DAR
3757 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3762 li r20,64 ; Set a limit of 64 nests for sanity check
3773 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3774 mr r23,r30 ; Save the low part of faulting address
3778 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3825 mr r4,r22 ; Get top of faulting vaddr
3826 mr r5,r23 ; Get bottom of faulting vaddr
3865 addc r23,r23,r9 ; Relocate bottom half of vaddr
3868 add r10,r10,r11 ; Add in the higher part of the index
3870 adde r22,r22,r8 ; Relocate the top half of the vaddr
3875 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3881 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3928 ; Did not find any kind of mapping
3947 ; We keep a mapping of a "linkage" mapping in the per_proc.
3949 ; as part of context switch. It relocates the appropriate user address
3968 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3983 ; r22: relocated high-order 32 bits of vaddr
3984 ; r23: relocated low-order 32 bits of vaddr
3988 ; r29: high-order 32 bits of faulting vaddr
3989 ; r30: low-order 32 bits of faulting vaddr
3996 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3997 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3998 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
4000 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
4002 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
4004 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
4005 xor r14,r14,r20 ; Calculate the top half of VSID
4006 xor r15,r15,r21 ; Calculate the bottom half of the VSID
4007 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
4009 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
4011 or r12,r12,r15 ; Add key into the bottom of VSID
4033 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4035 ; 0:3 for 32-bit) of the ESID.
4042 ; The cache entry contains an image of the ESID/VSID pair we would load for
4064 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4065 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4084 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4092 stw r29,sgcESID(r9) ; Save the top of the ESID
4095 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4098 stw r14,sgcVSID(r9) ; Save the top of the VSID
4100 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4101 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4103 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4107 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4130 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4151 sldi r10,r14,32 ; Move high part of VSID over
4186 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4214 ; mapping. This is not a good thing.... We really only need one of the
4256 ; The 24 bits of the 32-bit architecture VSID is in the following:
4275 mr r2,r25 ; Save the flag part of the mapping
4276 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4281 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4285 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4297 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4312 li r21,8 ; Get the number of slots
4340 lwz r7,4(r19) ; Get the real part of the stealee
4354 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4365 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4372 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4388 sync ; Make sure of it all
4395 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4412 cmplw cr1,r10,r8 ; Is this one of ours?
4427 stw r24,4(r19) ; Stuff in the real part of the PTE
4430 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4456 sldi r11,r22,32 ; Slide top of adjusted EA over
4457 sldi r14,r14,32 ; Slide top of VSID over
4461 mr r2,r10 ; Save the flag part of the mapping
4462 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4464 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4466 subfic r5,r5,46 ; Get number of leading zeros
4468 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4469 srd r16,r16,r5 ; Shift over to get length of table
4476 add r24,r24,r11 ; Get actual physical address of this page
4486 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4501 li r21,8 ; Get the number of slots
4517 mr r6,r17 ; Restore original state of PCA
4530 ld r7,8(r19) ; Get the real part of the stealee
4545 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4557 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4567 mr r7,r8 ; Get a copy of the space ID
4571 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4574 ptesync ; Make sure of it all
4584 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4602 cmplw cr1,r10,r8 ; Is this one of ours?
4628 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4630 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4645 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4655 ; else has taken care of it.
4670 ; is in the process of handling the fault.
4692 ; search iteration is unrolled so that we don't fetch beyond the end of
4699 ; linear list of 64-bit physical addresses of the pages that comprise
4705 ; a share of the pmap search lock for the host pmap with the host pmap's
4718 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4779 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4873 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4898 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4946 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4964 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4976 and r6,r4,r6 ; lite start of double bit runs in 1st word
4979 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4992 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4999 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
5014 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5017 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5018 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5019 li r3,31 ; get index of this field
5053 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5055 neg r12,r12 ; Get negative end of PCA
5065 mr r0,r9 ; Get a copy of the MSR
5078 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5080 neg r12,r12 ; Get negative end of PCA
5109 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5167 cntlzw r10,r12 ; Get the number of bits
5174 li r0,0 ; Set an SLB slot index of 0
5238 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5240 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5241 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5244 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5248 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5403 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5496 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5500 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5510 lwz r28,0(r8) ; Get top half of pmap address
5584 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5599 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5606 andc r16,r16,r22 ; Get rid of the guy we just did
5621 ; First we blow away all of the SLB entries.
5653 subfic r17,r17,64 ; Get the number of 1 bits we need
5682 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5715 ; r3 : address of pmap, 32-bit kernel virtual address
5773 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5789 bne graMiss ; Not one of ours, skip it
5806 ; r4 -> High-order 32 bits of PTE
5807 ; r5 -> Low-order 32 bits of PTE
5837 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5862 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5884 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5892 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5895 cmplwi r28,GV_HPAGES ; End of hash table?
5924 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5935 ; r3 : address of guest pmap, 32-bit kernel virtual address
5995 li r28,0 ; r28 <- index of first active map word to search
6005 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
6013 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6042 bne grlLoop ; Not one of ours, skip it
6051 ; r4 -> High-order 32 bits of PTE
6052 ; r5 -> Low-order 32 bits of PTE
6106 ; r3 : address of host pmap, 32-bit kernel virtual address
6107 ; r4 : address of guest pmap, 32-bit kernel virtual address
6149 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6150 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6159 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6170 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6171 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6175 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6271 ; r4 -> High-order 32 bits of PTE
6272 ; r5 -> Low-order 32 bits of PTE
6297 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6309 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6369 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6395 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6460 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6471 ; r3 : address of host pmap, 32-bit kernel virtual address
6472 ; r4 : address of guest pmap, 32-bit kernel virtual address
6517 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6518 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6528 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6542 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6679 or r31,r22,r12 ; r31 <- address of mapping to steal
6685 ; r4 -> High-order 32 bits of PTE
6686 ; r5 -> Low-order 32 bits of PTE
6721 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6746 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6838 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6849 ; r3 : address of host pmap, 32-bit kernel virtual address
6850 ; r4 : address of guest pmap, 32-bit kernel virtual address
6884 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6893 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6903 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6907 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7000 ; r4 -> High-order 32 bits of PTE
7001 ; r5 -> Low-order 32 bits of PTE
7053 ; r3 : address of host pmap, 32-bit kernel virtual address
7054 ; r4 : address of guest pmap, 32-bit kernel virtual address
7093 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7103 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7114 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7118 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7213 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7220 andc r12,r12,r0 ; Clear mapping copy of RC
7221 andc r5,r5,r0 ; Clear PTE copy of RC
7243 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7250 andc r12,r12,r0 ; Clear mapping copy of RC
7251 andc r5,r5,r0 ; Clear PTE copy of RC
7305 ; unpredictable which of the two or more possible host virtual addresses
7309 ; r3 : address of guest pmap, 32-bit kernel virtual address
7347 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7357 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7368 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7372 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7464 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7549 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7558 ; of it.
7561 ; r3 : address of guest pmap, 32-bit kernel virtual address
7598 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7609 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7620 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7624 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7711 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7737 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7776 ; r3 : address of guest pmap, 32-bit kernel virtual address
7812 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7821 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7832 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7836 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7928 ; r4 -> High-order 32 bits of PTE
7929 ; r5 -> Low-order 32 bits of PTE
7990 ; cr0_eq is false of there was an entry and it was locked
7996 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7998 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
8001 lwz r5,mrStart(r9) ; Get start of table entry
8002 lwz r0,mrEnd(r9) ; Get end of table entry
8004 cmplwi cr2,r3,0 ; Are we at the end of the table?
8023 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8042 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8043 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8045 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8048 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8053 ; matches. More than likely, we will eliminate almost all of the searches.
8081 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8082 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8084 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8098 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8099 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8118 and. r6,r9,r6 ; Get mask of valid and hit
8184 lwz r5,4(r3) ; Get the real part of the PTE
8188 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8189 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8190 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8194 lwz r11,mrStart(r11) ; Get the start of bank
8196 addi r2,r2,4 ; Offset to last half of field
8213 ; 64-bit version of mapMergeRC
8248 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8254 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8285 lwz r4,0(r3) ; Get the top of the PTE
8311 sync ; Make sure of it all
8318 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8319 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8320 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8324 lwz r11,mrStart(r11) ; Get the start of bank
8326 addi r2,r2,4 ; Offset to last half of field
8372 ld r4,0(r3) ; Get the top of the PTE
8380 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8453 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8456 ; guys do not keep track of reference and change and are actually "wired".
8458 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8459 ; fields fit in a single word and are loaded and stored under control of the
8499 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8518 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8623 ; Take a share of the lock
8680 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8700 li r2,4096/32 ; Get number of cache lines
8729 li r2,4096/128 ; Get number of cache lines