1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
|
64#include <machine/smp.h> 65 66#ifdef GPROF 67#include <sys/gmon.h> 68#endif 69 70 71static void initclocks __P((void *dummy)); 72SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 73 74/* Some of these don't belong here, but it's easiest to concentrate them. */ 75long cp_time[CPUSTATES]; 76 77long tk_cancc; 78long tk_nin; 79long tk_nout; 80long tk_rawcc; 81 82/* 83 * Clock handling routines. 84 * 85 * This code is written to operate with two timers that run independently of 86 * each other. 87 * 88 * The main timer, running hz times per second, is used to trigger interval 89 * timers, timeouts and rescheduling as needed. 90 * 91 * The second timer handles kernel and user profiling, 92 * and does resource use estimation. If the second timer is programmable, 93 * it is randomized to avoid aliasing between the two clocks. For example, 94 * the randomization prevents an adversary from always giving up the cpu 95 * just before its quantum expires. Otherwise, it would never accumulate 96 * cpu ticks. The mean frequency of the second timer is stathz. 97 * 98 * If no second timer exists, stathz will be zero; in this case we drive 99 * profiling and statistics off the main clock. This WILL NOT be accurate; 100 * do not do it unless absolutely necessary. 101 * 102 * The statistics clock may (or may not) be run at a higher rate while 103 * profiling. This profile clock runs at profhz. We require that profhz 104 * be an integral multiple of stathz. 105 * 106 * If the statistics clock is running fast, it must be divided by the ratio 107 * profhz/stathz for statistics. (For profiling, every tick counts.) 108 * 109 * Time-of-day is maintained using a "timecounter", which may or may 110 * not be related to the hardware generating the above mentioned 111 * interrupts. 112 */ 113 114int stathz; 115int profhz; 116static int profprocs; 117int ticks; 118static int psdiv, pscnt; /* prof => stat divider */ 119int psratio; /* ratio: prof / stat */ 120 121/* 122 * Initialize clock frequencies and start both clocks running. 123 */ 124/* ARGSUSED*/ 125static void 126initclocks(dummy) 127 void *dummy; 128{ 129 register int i; 130 131 /* 132 * Set divisors to 1 (normal case) and let the machine-specific 133 * code do its bit. 134 */ 135 psdiv = pscnt = 1; 136 cpu_initclocks(); 137 138 /* 139 * Compute profhz/stathz, and fix profhz if needed. 140 */ 141 i = stathz ? stathz : hz; 142 if (profhz == 0) 143 profhz = i; 144 psratio = profhz / i; 145} 146 147/* 148 * The real-time timer, interrupting hz times per second. 149 */ 150void 151hardclock(frame) 152 register struct clockframe *frame; 153{ 154 register struct proc *p; 155 156 p = curproc; 157 if (p != idleproc) { 158 register struct pstats *pstats; 159 160 /* 161 * Run current process's virtual and profile time, as needed. 162 */ 163 pstats = p->p_stats; 164 if (CLKF_USERMODE(frame) && 165 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 166 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 167 p->p_flag |= P_ALRMPEND; 168 aston(); 169 } 170 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 171 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 172 p->p_flag |= P_PROFPEND; 173 aston(); 174 } 175 } 176 177#if defined(SMP) && defined(BETTER_CLOCK) 178 forward_hardclock(pscnt); 179#endif 180 181 /* 182 * If no separate statistics clock is available, run it from here. 183 */ 184 if (stathz == 0) 185 statclock(frame); 186 187 tc_windup(); 188 ticks++; 189 190 /* 191 * Process callouts at a very low cpu priority, so we don't keep the 192 * relatively high clock interrupt priority any longer than necessary. 193 */ 194 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { 195 setsoftclock(); 196 } else if (softticks + 1 == ticks) 197 ++softticks; 198} 199 200/* 201 * Compute number of ticks in the specified amount of time. 202 */ 203int 204tvtohz(tv) 205 struct timeval *tv; 206{ 207 register unsigned long ticks; 208 register long sec, usec; 209 210 /* 211 * If the number of usecs in the whole seconds part of the time 212 * difference fits in a long, then the total number of usecs will 213 * fit in an unsigned long. Compute the total and convert it to 214 * ticks, rounding up and adding 1 to allow for the current tick 215 * to expire. Rounding also depends on unsigned long arithmetic 216 * to avoid overflow. 217 * 218 * Otherwise, if the number of ticks in the whole seconds part of 219 * the time difference fits in a long, then convert the parts to 220 * ticks separately and add, using similar rounding methods and 221 * overflow avoidance. This method would work in the previous 222 * case but it is slightly slower and assumes that hz is integral. 223 * 224 * Otherwise, round the time difference down to the maximum 225 * representable value. 226 * 227 * If ints have 32 bits, then the maximum value for any timeout in 228 * 10ms ticks is 248 days. 229 */ 230 sec = tv->tv_sec; 231 usec = tv->tv_usec; 232 if (usec < 0) { 233 sec--; 234 usec += 1000000; 235 } 236 if (sec < 0) { 237#ifdef DIAGNOSTIC 238 if (usec > 0) { 239 sec++; 240 usec -= 1000000; 241 } 242 printf("tvotohz: negative time difference %ld sec %ld usec\n", 243 sec, usec); 244#endif 245 ticks = 1; 246 } else if (sec <= LONG_MAX / 1000000) 247 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 248 / tick + 1; 249 else if (sec <= LONG_MAX / hz) 250 ticks = sec * hz 251 + ((unsigned long)usec + (tick - 1)) / tick + 1; 252 else 253 ticks = LONG_MAX; 254 if (ticks > INT_MAX) 255 ticks = INT_MAX; 256 return ((int)ticks); 257} 258 259/* 260 * Start profiling on a process. 261 * 262 * Kernel profiling passes proc0 which never exits and hence 263 * keeps the profile clock running constantly. 264 */ 265void 266startprofclock(p) 267 register struct proc *p; 268{ 269 int s; 270 271 if ((p->p_flag & P_PROFIL) == 0) { 272 p->p_flag |= P_PROFIL; 273 if (++profprocs == 1 && stathz != 0) { 274 s = splstatclock(); 275 psdiv = pscnt = psratio; 276 setstatclockrate(profhz); 277 splx(s); 278 } 279 } 280} 281 282/* 283 * Stop profiling on a process. 284 */ 285void 286stopprofclock(p) 287 register struct proc *p; 288{ 289 int s; 290 291 if (p->p_flag & P_PROFIL) { 292 p->p_flag &= ~P_PROFIL; 293 if (--profprocs == 0 && stathz != 0) { 294 s = splstatclock(); 295 psdiv = pscnt = 1; 296 setstatclockrate(stathz); 297 splx(s); 298 } 299 } 300} 301 302/* 303 * Statistics clock. Grab profile sample, and if divider reaches 0, 304 * do process and kernel statistics. Most of the statistics are only 305 * used by user-level statistics programs. The main exceptions are 306 * p->p_uticks, p->p_sticks, p->p_iticks, and p->p_estcpu. 307 */ 308void 309statclock(frame) 310 register struct clockframe *frame; 311{ 312#ifdef GPROF 313 register struct gmonparam *g; 314 int i; 315#endif 316 register struct proc *p; 317 struct pstats *pstats; 318 long rss; 319 struct rusage *ru; 320 struct vmspace *vm; 321 322 mtx_enter(&sched_lock, MTX_SPIN); 323 324 if (CLKF_USERMODE(frame)) { 325 /* 326 * Came from user mode; CPU was in user state. 327 * If this process is being profiled, record the tick. 328 */ 329 p = curproc; 330 if (p->p_flag & P_PROFIL) 331 addupc_intr(p, CLKF_PC(frame), 1); 332#if defined(SMP) && defined(BETTER_CLOCK) 333 if (stathz != 0) 334 forward_statclock(pscnt); 335#endif 336 if (--pscnt > 0) { 337 mtx_exit(&sched_lock, MTX_SPIN); 338 return; 339 } 340 /* 341 * Charge the time as appropriate. 342 */ 343 p->p_uticks++; 344 if (p->p_nice > NZERO) 345 cp_time[CP_NICE]++; 346 else 347 cp_time[CP_USER]++; 348 } else { 349#ifdef GPROF 350 /* 351 * Kernel statistics are just like addupc_intr, only easier. 352 */ 353 g = &_gmonparam; 354 if (g->state == GMON_PROF_ON) { 355 i = CLKF_PC(frame) - g->lowpc; 356 if (i < g->textsize) { 357 i /= HISTFRACTION * sizeof(*g->kcount); 358 g->kcount[i]++; 359 } 360 } 361#endif 362#if defined(SMP) && defined(BETTER_CLOCK) 363 if (stathz != 0) 364 forward_statclock(pscnt); 365#endif 366 if (--pscnt > 0) { 367 mtx_exit(&sched_lock, MTX_SPIN); 368 return; 369 } 370 /* 371 * Came from kernel mode, so we were: 372 * - handling an interrupt, 373 * - doing syscall or trap work on behalf of the current 374 * user process, or 375 * - spinning in the idle loop. 376 * Whichever it is, charge the time as appropriate. 377 * Note that we charge interrupts to the current process, 378 * regardless of whether they are ``for'' that process, 379 * so that we know how much of its real time was spent 380 * in ``non-process'' (i.e., interrupt) work. 381 */ 382 p = curproc; 383 if ((p->p_ithd != NULL) || CLKF_INTR(frame)) { 384 p->p_iticks++; 385 cp_time[CP_INTR]++; 386 } else { 387 p->p_sticks++; 388 if (p != idleproc) 389 cp_time[CP_SYS]++; 390 else 391 cp_time[CP_IDLE]++; 392 } 393 } 394 pscnt = psdiv; 395 396 schedclock(p); 397 398 /* Update resource usage integrals and maximums. */ 399 if ((pstats = p->p_stats) != NULL && 400 (ru = &pstats->p_ru) != NULL && 401 (vm = p->p_vmspace) != NULL) { 402 ru->ru_ixrss += pgtok(vm->vm_tsize); 403 ru->ru_idrss += pgtok(vm->vm_dsize); 404 ru->ru_isrss += pgtok(vm->vm_ssize); 405 rss = pgtok(vmspace_resident_count(vm)); 406 if (ru->ru_maxrss < rss) 407 ru->ru_maxrss = rss; 408 } 409 410 mtx_exit(&sched_lock, MTX_SPIN); 411} 412 413/* 414 * Return information about system clocks. 415 */ 416static int 417sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 418{ 419 struct clockinfo clkinfo; 420 /* 421 * Construct clockinfo structure. 422 */ 423 clkinfo.hz = hz; 424 clkinfo.tick = tick; 425 clkinfo.tickadj = tickadj; 426 clkinfo.profhz = profhz; 427 clkinfo.stathz = stathz ? stathz : hz; 428 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 429} 430 431SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 432 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
|