1/* 2 * Userland implementation of gettimeofday() for 32 bits processes in a 3 * ppc64 kernel for use in the vDSO 4 * 5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org, 6 * IBM Corp. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13#include <asm/processor.h> 14#include <asm/ppc_asm.h> 15#include <asm/vdso.h> 16#include <asm/asm-offsets.h> 17#include <asm/unistd.h> 18 19/* Offset for the low 32-bit part of a field of long type */ 20#ifdef CONFIG_PPC64 21#define LOPART 4 22#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART 23#else 24#define LOPART 0 25#define TSPEC_TV_SEC TSPC32_TV_SEC 26#endif 27 28 .text 29/* 30 * Exact prototype of gettimeofday 31 * 32 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); 33 * 34 */ 35V_FUNCTION_BEGIN(__kernel_gettimeofday) 36 .cfi_startproc 37 mflr r12 38 .cfi_register lr,r12 39 40 mr r10,r3 /* r10 saves tv */ 41 mr r11,r4 /* r11 saves tz */ 42 bl __get_datapage@local /* get data page */ 43 mr r9, r3 /* datapage ptr in r9 */ 44 cmplwi r10,0 /* check if tv is NULL */ 45 beq 3f 46 lis r7,1000000@ha /* load up USEC_PER_SEC */ 47 addi r7,r7,1000000@l /* so we get microseconds in r4 */ 48 bl __do_get_tspec@local /* get sec/usec from tb & kernel */ 49 stw r3,TVAL32_TV_SEC(r10) 50 stw r4,TVAL32_TV_USEC(r10) 51 523: cmplwi r11,0 /* check if tz is NULL */ 53 beq 1f 54 lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */ 55 lwz r5,CFG_TZ_DSTTIME(r9) 56 stw r4,TZONE_TZ_MINWEST(r11) 57 stw r5,TZONE_TZ_DSTTIME(r11) 58 591: mtlr r12 60 crclr cr0*4+so 61 li r3,0 62 blr 63 .cfi_endproc 64V_FUNCTION_END(__kernel_gettimeofday) 65 66/* 67 * Exact prototype of clock_gettime() 68 * 69 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); 70 * 71 */ 72V_FUNCTION_BEGIN(__kernel_clock_gettime) 73 .cfi_startproc 74 /* Check for supported clock IDs */ 75 cmpli cr0,r3,CLOCK_REALTIME 76 cmpli cr1,r3,CLOCK_MONOTONIC 77 cror cr0*4+eq,cr0*4+eq,cr1*4+eq 78 bne cr0,99f 79 80 mflr r12 /* r12 saves lr */ 81 .cfi_register lr,r12 82 mr r11,r4 /* r11 saves tp */ 83 bl __get_datapage@local /* get data page */ 84 mr r9,r3 /* datapage ptr in r9 */ 85 lis r7,NSEC_PER_SEC@h /* want nanoseconds */ 86 ori r7,r7,NSEC_PER_SEC@l 8750: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */ 88 bne cr1,80f /* not monotonic -> all done */ 89 90 /* 91 * CLOCK_MONOTONIC 92 */ 93 94 /* now we must fixup using wall to monotonic. We need to snapshot 95 * that value and do the counter trick again. Fortunately, we still 96 * have the counter value in r8 that was returned by __do_get_xsec. 97 * At this point, r3,r4 contain our sec/nsec values, r5 and r6 98 * can be used, r7 contains NSEC_PER_SEC. 99 */ 100 101 lwz r5,WTOM_CLOCK_SEC(r9) 102 lwz r6,WTOM_CLOCK_NSEC(r9) 103 104 /* We now have our offset in r5,r6. We create a fake dependency 105 * on that value and re-check the counter 106 */ 107 or r0,r6,r5 108 xor r0,r0,r0 109 add r9,r9,r0 110 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) 111 cmpl cr0,r8,r0 /* check if updated */ 112 bne- 50b 113 114 /* Calculate and store result. Note that this mimics the C code, 115 * which may cause funny results if nsec goes negative... is that 116 * possible at all ? 117 */ 118 add r3,r3,r5 119 add r4,r4,r6 120 cmpw cr0,r4,r7 121 cmpwi cr1,r4,0 122 blt 1f 123 subf r4,r7,r4 124 addi r3,r3,1 1251: bge cr1,80f 126 addi r3,r3,-1 127 add r4,r4,r7 128 12980: stw r3,TSPC32_TV_SEC(r11) 130 stw r4,TSPC32_TV_NSEC(r11) 131 132 mtlr r12 133 crclr cr0*4+so 134 li r3,0 135 blr 136 137 /* 138 * syscall fallback 139 */ 14099: 141 li r0,__NR_clock_gettime 142 sc 143 blr 144 .cfi_endproc 145V_FUNCTION_END(__kernel_clock_gettime) 146 147 148/* 149 * Exact prototype of clock_getres() 150 * 151 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); 152 * 153 */ 154V_FUNCTION_BEGIN(__kernel_clock_getres) 155 .cfi_startproc 156 /* Check for supported clock IDs */ 157 cmpwi cr0,r3,CLOCK_REALTIME 158 cmpwi cr1,r3,CLOCK_MONOTONIC 159 cror cr0*4+eq,cr0*4+eq,cr1*4+eq 160 bne cr0,99f 161 162 li r3,0 163 cmpli cr0,r4,0 164 crclr cr0*4+so 165 beqlr 166 lis r5,CLOCK_REALTIME_RES@h 167 ori r5,r5,CLOCK_REALTIME_RES@l 168 stw r3,TSPC32_TV_SEC(r4) 169 stw r5,TSPC32_TV_NSEC(r4) 170 blr 171 172 /* 173 * syscall fallback 174 */ 17599: 176 li r0,__NR_clock_getres 177 sc 178 blr 179 .cfi_endproc 180V_FUNCTION_END(__kernel_clock_getres) 181 182 183/* 184 * This is the core of clock_gettime() and gettimeofday(), 185 * it returns the current time in r3 (seconds) and r4. 186 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC 187 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds. 188 * It expects the datapage ptr in r9 and doesn't clobber it. 189 * It clobbers r0, r5 and r6. 190 * On return, r8 contains the counter value that can be reused. 191 * This clobbers cr0 but not any other cr field. 192 */ 193__do_get_tspec: 194 .cfi_startproc 195 /* Check for update count & load values. We use the low 196 * order 32 bits of the update count 197 */ 1981: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) 199 andi. r0,r8,1 /* pending update ? loop */ 200 bne- 1b 201 xor r0,r8,r8 /* create dependency */ 202 add r9,r9,r0 203 204 /* Load orig stamp (offset to TB) */ 205 lwz r5,CFG_TB_ORIG_STAMP(r9) 206 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) 207 208 /* Get a stable TB value */ 2092: mftbu r3 210 mftbl r4 211 mftbu r0 212 cmplw cr0,r3,r0 213 bne- 2b 214 215 /* Subtract tb orig stamp and shift left 12 bits. 216 */ 217 subfc r4,r6,r4 218 subfe r0,r5,r3 219 slwi r0,r0,12 220 rlwimi. r0,r4,12,20,31 221 slwi r4,r4,12 222 223 /* 224 * Load scale factor & do multiplication. 225 * We only use the high 32 bits of the tb_to_xs value. 226 * Even with a 1GHz timebase clock, the high 32 bits of 227 * tb_to_xs will be at least 4 million, so the error from 228 * ignoring the low 32 bits will be no more than 0.25ppm. 229 * The error will just make the clock run very very slightly 230 * slow until the next time the kernel updates the VDSO data, 231 * at which point the clock will catch up to the kernel's value, 232 * so there is no long-term error accumulation. 233 */ 234 lwz r5,CFG_TB_TO_XS(r9) /* load values */ 235 mulhwu r4,r4,r5 236 li r3,0 237 238 beq+ 4f /* skip high part computation if 0 */ 239 mulhwu r3,r0,r5 240 mullw r5,r0,r5 241 addc r4,r4,r5 242 addze r3,r3 2434: 244 /* At this point, we have seconds since the xtime stamp 245 * as a 32.32 fixed-point number in r3 and r4. 246 * Load & add the xtime stamp. 247 */ 248 lwz r5,STAMP_XTIME+TSPEC_TV_SEC(r9) 249 lwz r6,STAMP_SEC_FRAC(r9) 250 addc r4,r4,r6 251 adde r3,r3,r5 252 253 /* We create a fake dependency on the result in r3/r4 254 * and re-check the counter 255 */ 256 or r6,r4,r3 257 xor r0,r6,r6 258 add r9,r9,r0 259 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) 260 cmplw cr0,r8,r0 /* check if updated */ 261 bne- 1b 262 263 mulhwu r4,r4,r7 /* convert to micro or nanoseconds */ 264 265 blr 266 .cfi_endproc 267