Line data Source code
1 : /* $OpenBSD: kern_time.c,v 1.103 2018/05/28 18:05:42 guenther Exp $ */
2 : /* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
3 :
4 : /*
5 : * Copyright (c) 1982, 1986, 1989, 1993
6 : * The Regents of the University of California. All rights reserved.
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : * 3. Neither the name of the University nor the names of its contributors
17 : * may be used to endorse or promote products derived from this software
18 : * without specific prior written permission.
19 : *
20 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 : * SUCH DAMAGE.
31 : *
32 : * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
33 : */
34 :
35 : #include <sys/param.h>
36 : #include <sys/resourcevar.h>
37 : #include <sys/kernel.h>
38 : #include <sys/systm.h>
39 : #include <sys/proc.h>
40 : #include <sys/ktrace.h>
41 : #include <sys/vnode.h>
42 : #include <sys/signalvar.h>
43 : #include <sys/pledge.h>
44 : #include <sys/task.h>
45 : #include <sys/timeout.h>
46 : #include <sys/timetc.h>
47 :
48 : #include <sys/mount.h>
49 : #include <sys/syscallargs.h>
50 :
51 :
52 : int64_t adjtimedelta; /* unapplied time correction (microseconds) */
53 :
54 : /*
55 : * Time of day and interval timer support.
56 : *
57 : * These routines provide the kernel entry points to get and set
58 : * the time-of-day and per-process interval timers. Subroutines
59 : * here provide support for adding and subtracting timeval structures
60 : * and decrementing interval timers, optionally reloading the interval
61 : * timers when they expire.
62 : */
63 :
64 : /* This function is used by clock_settime and settimeofday */
65 : int
66 0 : settime(const struct timespec *ts)
67 : {
68 0 : struct timespec now;
69 :
70 : /*
71 : * Adjtime in progress is meaningless or harmful after
72 : * setting the clock. Cancel adjtime and then set new time.
73 : */
74 0 : adjtimedelta = 0;
75 :
76 : /*
77 : * Don't allow the time to be set forward so far it will wrap
78 : * and become negative, thus allowing an attacker to bypass
79 : * the next check below. The cutoff is 1 year before rollover
80 : * occurs, so even if the attacker uses adjtime(2) to move
81 : * the time past the cutoff, it will take a very long time
82 : * to get to the wrap point.
83 : *
84 : * XXX: we check against UINT_MAX until we can figure out
85 : * how to deal with the hardware RTCs.
86 : */
87 0 : if (ts->tv_sec > UINT_MAX - 365*24*60*60) {
88 0 : printf("denied attempt to set clock forward to %lld\n",
89 : (long long)ts->tv_sec);
90 0 : return (EPERM);
91 : }
92 : /*
93 : * If the system is secure, we do not allow the time to be
94 : * set to an earlier value (it may be slowed using adjtime,
95 : * but not set back). This feature prevent interlopers from
96 : * setting arbitrary time stamps on files.
97 : */
98 0 : nanotime(&now);
99 0 : if (securelevel > 1 && timespeccmp(ts, &now, <)) {
100 0 : printf("denied attempt to set clock back %lld seconds\n",
101 0 : (long long)now.tv_sec - ts->tv_sec);
102 0 : return (EPERM);
103 : }
104 :
105 0 : tc_setrealtimeclock(ts);
106 0 : resettodr();
107 :
108 0 : return (0);
109 0 : }
110 :
111 : int
112 0 : clock_gettime(struct proc *p, clockid_t clock_id, struct timespec *tp)
113 : {
114 0 : struct bintime bt;
115 : struct proc *q;
116 :
117 0 : switch (clock_id) {
118 : case CLOCK_REALTIME:
119 0 : nanotime(tp);
120 0 : break;
121 : case CLOCK_UPTIME:
122 0 : binuptime(&bt);
123 0 : bintime_sub(&bt, &naptime);
124 0 : bintime2timespec(&bt, tp);
125 0 : break;
126 : case CLOCK_MONOTONIC:
127 : case CLOCK_BOOTTIME:
128 0 : nanouptime(tp);
129 0 : break;
130 : case CLOCK_PROCESS_CPUTIME_ID:
131 0 : nanouptime(tp);
132 0 : timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
133 0 : timespecadd(tp, &p->p_p->ps_tu.tu_runtime, tp);
134 0 : timespecadd(tp, &p->p_rtime, tp);
135 : break;
136 : case CLOCK_THREAD_CPUTIME_ID:
137 0 : nanouptime(tp);
138 0 : timespecsub(tp, &curcpu()->ci_schedstate.spc_runtime, tp);
139 0 : timespecadd(tp, &p->p_tu.tu_runtime, tp);
140 0 : timespecadd(tp, &p->p_rtime, tp);
141 : break;
142 : default:
143 : /* check for clock from pthread_getcpuclockid() */
144 0 : if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
145 0 : q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
146 0 : if (q == NULL || q->p_p != p->p_p)
147 0 : return (ESRCH);
148 0 : *tp = q->p_tu.tu_runtime;
149 : } else
150 0 : return (EINVAL);
151 0 : }
152 0 : return (0);
153 0 : }
154 :
155 : int
156 0 : sys_clock_gettime(struct proc *p, void *v, register_t *retval)
157 : {
158 : struct sys_clock_gettime_args /* {
159 : syscallarg(clockid_t) clock_id;
160 : syscallarg(struct timespec *) tp;
161 0 : } */ *uap = v;
162 0 : struct timespec ats;
163 : int error;
164 :
165 0 : memset(&ats, 0, sizeof(ats));
166 0 : if ((error = clock_gettime(p, SCARG(uap, clock_id), &ats)) != 0)
167 0 : return (error);
168 :
169 0 : error = copyout(&ats, SCARG(uap, tp), sizeof(ats));
170 : #ifdef KTRACE
171 0 : if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
172 0 : KERNEL_LOCK();
173 0 : ktrabstimespec(p, &ats);
174 0 : KERNEL_UNLOCK();
175 0 : }
176 : #endif
177 0 : return (error);
178 0 : }
179 :
180 : int
181 0 : sys_clock_settime(struct proc *p, void *v, register_t *retval)
182 : {
183 : struct sys_clock_settime_args /* {
184 : syscallarg(clockid_t) clock_id;
185 : syscallarg(const struct timespec *) tp;
186 0 : } */ *uap = v;
187 0 : struct timespec ats;
188 : clockid_t clock_id;
189 : int error;
190 :
191 0 : if ((error = suser(p)) != 0)
192 0 : return (error);
193 :
194 0 : if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
195 0 : return (error);
196 :
197 0 : clock_id = SCARG(uap, clock_id);
198 0 : switch (clock_id) {
199 : case CLOCK_REALTIME:
200 0 : if ((error = settime(&ats)) != 0)
201 0 : return (error);
202 : break;
203 : default: /* Other clocks are read-only */
204 0 : return (EINVAL);
205 : }
206 :
207 0 : return (0);
208 0 : }
209 :
210 : int
211 0 : sys_clock_getres(struct proc *p, void *v, register_t *retval)
212 : {
213 : struct sys_clock_getres_args /* {
214 : syscallarg(clockid_t) clock_id;
215 : syscallarg(struct timespec *) tp;
216 0 : } */ *uap = v;
217 : clockid_t clock_id;
218 0 : struct timespec ts;
219 : struct proc *q;
220 : int error = 0;
221 :
222 0 : memset(&ts, 0, sizeof(ts));
223 0 : clock_id = SCARG(uap, clock_id);
224 0 : switch (clock_id) {
225 : case CLOCK_REALTIME:
226 : case CLOCK_MONOTONIC:
227 : case CLOCK_BOOTTIME:
228 : case CLOCK_UPTIME:
229 : case CLOCK_PROCESS_CPUTIME_ID:
230 : case CLOCK_THREAD_CPUTIME_ID:
231 0 : ts.tv_sec = 0;
232 0 : ts.tv_nsec = 1000000000 / hz;
233 0 : break;
234 : default:
235 : /* check for clock from pthread_getcpuclockid() */
236 0 : if (__CLOCK_TYPE(clock_id) == CLOCK_THREAD_CPUTIME_ID) {
237 0 : q = tfind(__CLOCK_PTID(clock_id) - THREAD_PID_OFFSET);
238 0 : if (q == NULL || q->p_p != p->p_p)
239 0 : return (ESRCH);
240 0 : ts.tv_sec = 0;
241 0 : ts.tv_nsec = 1000000000 / hz;
242 : } else
243 0 : return (EINVAL);
244 0 : }
245 :
246 0 : if (SCARG(uap, tp)) {
247 0 : error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
248 : #ifdef KTRACE
249 0 : if (error == 0 && KTRPOINT(p, KTR_STRUCT)) {
250 0 : KERNEL_LOCK();
251 0 : ktrreltimespec(p, &ts);
252 0 : KERNEL_UNLOCK();
253 0 : }
254 : #endif
255 : }
256 :
257 0 : return error;
258 0 : }
259 :
260 : int
261 0 : sys_nanosleep(struct proc *p, void *v, register_t *retval)
262 : {
263 : static int nanowait;
264 : struct sys_nanosleep_args/* {
265 : syscallarg(const struct timespec *) rqtp;
266 : syscallarg(struct timespec *) rmtp;
267 0 : } */ *uap = v;
268 0 : struct timespec rqt, rmt;
269 0 : struct timespec sts, ets;
270 : struct timespec *rmtp;
271 : int error, error1;
272 :
273 0 : rmtp = SCARG(uap, rmtp);
274 0 : error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
275 0 : if (error)
276 0 : return (error);
277 : #ifdef KTRACE
278 0 : if (KTRPOINT(p, KTR_STRUCT)) {
279 0 : KERNEL_LOCK();
280 0 : ktrreltimespec(p, &rqt);
281 0 : KERNEL_UNLOCK();
282 0 : }
283 : #endif
284 :
285 0 : if (rqt.tv_sec > 100000000 || timespecfix(&rqt))
286 0 : return (EINVAL);
287 :
288 0 : if (rmtp)
289 0 : getnanouptime(&sts);
290 :
291 0 : error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep",
292 0 : MAX(1, tstohz(&rqt)));
293 0 : if (error == ERESTART)
294 : error = EINTR;
295 0 : if (error == EWOULDBLOCK)
296 : error = 0;
297 :
298 0 : if (rmtp) {
299 0 : getnanouptime(&ets);
300 :
301 0 : memset(&rmt, 0, sizeof(rmt));
302 0 : timespecsub(&ets, &sts, &sts);
303 0 : timespecsub(&rqt, &sts, &rmt);
304 :
305 0 : if (rmt.tv_sec < 0)
306 0 : timespecclear(&rmt);
307 :
308 0 : error1 = copyout(&rmt, rmtp, sizeof(rmt));
309 0 : if (error1 != 0)
310 0 : error = error1;
311 : #ifdef KTRACE
312 0 : if (error1 == 0 && KTRPOINT(p, KTR_STRUCT)) {
313 0 : KERNEL_LOCK();
314 0 : ktrreltimespec(p, &rmt);
315 0 : KERNEL_UNLOCK();
316 0 : }
317 : #endif
318 : }
319 :
320 0 : return error;
321 0 : }
322 :
323 : int
324 0 : sys_gettimeofday(struct proc *p, void *v, register_t *retval)
325 : {
326 : struct sys_gettimeofday_args /* {
327 : syscallarg(struct timeval *) tp;
328 : syscallarg(struct timezone *) tzp;
329 0 : } */ *uap = v;
330 0 : struct timeval atv;
331 : struct timeval *tp;
332 : struct timezone *tzp;
333 : int error = 0;
334 :
335 0 : tp = SCARG(uap, tp);
336 0 : tzp = SCARG(uap, tzp);
337 :
338 0 : if (tp) {
339 0 : memset(&atv, 0, sizeof(atv));
340 0 : microtime(&atv);
341 0 : if ((error = copyout(&atv, tp, sizeof (atv))))
342 0 : return (error);
343 : #ifdef KTRACE
344 0 : if (KTRPOINT(p, KTR_STRUCT)) {
345 0 : KERNEL_LOCK();
346 0 : ktrabstimeval(p, &atv);
347 0 : KERNEL_UNLOCK();
348 0 : }
349 : #endif
350 : }
351 0 : if (tzp)
352 0 : error = copyout(&tz, tzp, sizeof (tz));
353 0 : return (error);
354 0 : }
355 :
356 : int
357 0 : sys_settimeofday(struct proc *p, void *v, register_t *retval)
358 : {
359 : struct sys_settimeofday_args /* {
360 : syscallarg(const struct timeval *) tv;
361 : syscallarg(const struct timezone *) tzp;
362 0 : } */ *uap = v;
363 0 : struct timezone atz;
364 0 : struct timeval atv;
365 : const struct timeval *tv;
366 : const struct timezone *tzp;
367 : int error;
368 :
369 0 : tv = SCARG(uap, tv);
370 0 : tzp = SCARG(uap, tzp);
371 :
372 0 : if ((error = suser(p)))
373 0 : return (error);
374 : /* Verify all parameters before changing time. */
375 0 : if (tv && (error = copyin(tv, &atv, sizeof(atv))))
376 0 : return (error);
377 0 : if (tzp && (error = copyin(tzp, &atz, sizeof(atz))))
378 0 : return (error);
379 0 : if (tv) {
380 0 : struct timespec ts;
381 :
382 0 : TIMEVAL_TO_TIMESPEC(&atv, &ts);
383 0 : if ((error = settime(&ts)) != 0)
384 0 : return (error);
385 0 : }
386 0 : if (tzp)
387 0 : tz = atz;
388 0 : return (0);
389 0 : }
390 :
391 : int
392 0 : sys_adjfreq(struct proc *p, void *v, register_t *retval)
393 : {
394 : struct sys_adjfreq_args /* {
395 : syscallarg(const int64_t *) freq;
396 : syscallarg(int64_t *) oldfreq;
397 0 : } */ *uap = v;
398 : int error;
399 0 : int64_t f;
400 0 : const int64_t *freq = SCARG(uap, freq);
401 0 : int64_t *oldfreq = SCARG(uap, oldfreq);
402 0 : if (oldfreq) {
403 0 : if ((error = tc_adjfreq(&f, NULL)))
404 0 : return (error);
405 0 : if ((error = copyout(&f, oldfreq, sizeof(f))))
406 0 : return (error);
407 : }
408 0 : if (freq) {
409 0 : if ((error = suser(p)))
410 0 : return (error);
411 0 : if ((error = copyin(freq, &f, sizeof(f))))
412 0 : return (error);
413 0 : if ((error = tc_adjfreq(NULL, &f)))
414 0 : return (error);
415 : }
416 0 : return (0);
417 0 : }
418 :
419 : int
420 0 : sys_adjtime(struct proc *p, void *v, register_t *retval)
421 : {
422 : struct sys_adjtime_args /* {
423 : syscallarg(const struct timeval *) delta;
424 : syscallarg(struct timeval *) olddelta;
425 0 : } */ *uap = v;
426 0 : const struct timeval *delta = SCARG(uap, delta);
427 0 : struct timeval *olddelta = SCARG(uap, olddelta);
428 0 : struct timeval atv;
429 : int error;
430 :
431 0 : error = pledge_adjtime(p, delta);
432 0 : if (error)
433 0 : return error;
434 :
435 0 : if (olddelta) {
436 0 : memset(&atv, 0, sizeof(atv));
437 0 : atv.tv_sec = adjtimedelta / 1000000;
438 0 : atv.tv_usec = adjtimedelta % 1000000;
439 0 : if (atv.tv_usec < 0) {
440 0 : atv.tv_usec += 1000000;
441 0 : atv.tv_sec--;
442 0 : }
443 :
444 0 : if ((error = copyout(&atv, olddelta, sizeof(struct timeval))))
445 0 : return (error);
446 : }
447 :
448 0 : if (delta) {
449 0 : if ((error = suser(p)))
450 0 : return (error);
451 :
452 0 : if ((error = copyin(delta, &atv, sizeof(struct timeval))))
453 0 : return (error);
454 :
455 : /* XXX Check for overflow? */
456 0 : adjtimedelta = (int64_t)atv.tv_sec * 1000000 + atv.tv_usec;
457 0 : }
458 :
459 0 : return (0);
460 0 : }
461 :
462 :
463 : struct mutex itimer_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
464 :
465 : /*
466 : * Get value of an interval timer. The process virtual and
467 : * profiling virtual time timers are kept internally in the
468 : * way they are specified externally: in time until they expire.
469 : *
470 : * The real time interval timer's it_value, in contrast, is kept as an
471 : * absolute time rather than as a delta, so that it is easy to keep
472 : * periodic real-time signals from drifting.
473 : *
474 : * Virtual time timers are processed in the hardclock() routine of
475 : * kern_clock.c. The real time timer is processed by a timeout
476 : * routine, called from the softclock() routine. Since a callout
477 : * may be delayed in real time due to interrupt processing in the system,
478 : * it is possible for the real time timeout routine (realitexpire, given below),
479 : * to be delayed in real time past when it is supposed to occur. It
480 : * does not suffice, therefore, to reload the real timer .it_value from the
481 : * real time timers .it_interval. Rather, we compute the next time in
482 : * absolute time the timer should go off.
483 : */
484 : int
485 0 : sys_getitimer(struct proc *p, void *v, register_t *retval)
486 : {
487 : struct sys_getitimer_args /* {
488 : syscallarg(int) which;
489 : syscallarg(struct itimerval *) itv;
490 0 : } */ *uap = v;
491 0 : struct itimerval aitv;
492 : int which;
493 :
494 0 : which = SCARG(uap, which);
495 :
496 0 : if (which < ITIMER_REAL || which > ITIMER_PROF)
497 0 : return (EINVAL);
498 0 : memset(&aitv, 0, sizeof(aitv));
499 0 : mtx_enter(&itimer_mtx);
500 0 : aitv.it_interval.tv_sec = p->p_p->ps_timer[which].it_interval.tv_sec;
501 0 : aitv.it_interval.tv_usec = p->p_p->ps_timer[which].it_interval.tv_usec;
502 0 : aitv.it_value.tv_sec = p->p_p->ps_timer[which].it_value.tv_sec;
503 0 : aitv.it_value.tv_usec = p->p_p->ps_timer[which].it_value.tv_usec;
504 0 : mtx_leave(&itimer_mtx);
505 :
506 0 : if (which == ITIMER_REAL) {
507 0 : struct timeval now;
508 :
509 0 : getmicrouptime(&now);
510 : /*
511 : * Convert from absolute to relative time in .it_value
512 : * part of real time timer. If time for real time timer
513 : * has passed return 0, else return difference between
514 : * current time and time for the timer to go off.
515 : */
516 0 : if (timerisset(&aitv.it_value)) {
517 0 : if (timercmp(&aitv.it_value, &now, <))
518 0 : timerclear(&aitv.it_value);
519 : else
520 0 : timersub(&aitv.it_value, &now,
521 : &aitv.it_value);
522 : }
523 0 : }
524 :
525 0 : return (copyout(&aitv, SCARG(uap, itv), sizeof (struct itimerval)));
526 0 : }
527 :
528 : int
529 0 : sys_setitimer(struct proc *p, void *v, register_t *retval)
530 : {
531 : struct sys_setitimer_args /* {
532 : syscallarg(int) which;
533 : syscallarg(const struct itimerval *) itv;
534 : syscallarg(struct itimerval *) oitv;
535 0 : } */ *uap = v;
536 0 : struct sys_getitimer_args getargs;
537 0 : struct itimerval aitv;
538 : const struct itimerval *itvp;
539 : struct itimerval *oitv;
540 0 : struct process *pr = p->p_p;
541 : int error;
542 : int timo;
543 : int which;
544 :
545 0 : which = SCARG(uap, which);
546 0 : oitv = SCARG(uap, oitv);
547 :
548 0 : if (which < ITIMER_REAL || which > ITIMER_PROF)
549 0 : return (EINVAL);
550 0 : itvp = SCARG(uap, itv);
551 0 : if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
552 : sizeof(struct itimerval))))
553 0 : return (error);
554 0 : if (oitv != NULL) {
555 0 : SCARG(&getargs, which) = which;
556 0 : SCARG(&getargs, itv) = oitv;
557 0 : if ((error = sys_getitimer(p, &getargs, retval)))
558 0 : return (error);
559 : }
560 0 : if (itvp == 0)
561 0 : return (0);
562 0 : if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
563 0 : return (EINVAL);
564 0 : if (which == ITIMER_REAL) {
565 0 : struct timeval ctv;
566 :
567 0 : timeout_del(&pr->ps_realit_to);
568 0 : getmicrouptime(&ctv);
569 0 : if (timerisset(&aitv.it_value)) {
570 0 : timo = tvtohz(&aitv.it_value);
571 0 : timeout_add(&pr->ps_realit_to, timo);
572 0 : timeradd(&aitv.it_value, &ctv, &aitv.it_value);
573 : }
574 0 : pr->ps_timer[ITIMER_REAL] = aitv;
575 0 : } else {
576 0 : itimerround(&aitv.it_interval);
577 0 : mtx_enter(&itimer_mtx);
578 0 : pr->ps_timer[which] = aitv;
579 0 : mtx_leave(&itimer_mtx);
580 : }
581 :
582 0 : return (0);
583 0 : }
584 :
585 : /*
586 : * Real interval timer expired:
587 : * send process whose timer expired an alarm signal.
588 : * If time is not set up to reload, then just return.
589 : * Else compute next time timer should go off which is > current time.
590 : * This is where delay in processing this timeout causes multiple
591 : * SIGALRM calls to be compressed into one.
592 : */
593 : void
594 0 : realitexpire(void *arg)
595 : {
596 0 : struct process *pr = arg;
597 0 : struct itimerval *tp = &pr->ps_timer[ITIMER_REAL];
598 :
599 0 : prsignal(pr, SIGALRM);
600 0 : if (!timerisset(&tp->it_interval)) {
601 0 : timerclear(&tp->it_value);
602 0 : return;
603 : }
604 0 : for (;;) {
605 0 : struct timeval ctv, ntv;
606 : int timo;
607 :
608 0 : timeradd(&tp->it_value, &tp->it_interval, &tp->it_value);
609 0 : getmicrouptime(&ctv);
610 0 : if (timercmp(&tp->it_value, &ctv, >)) {
611 0 : ntv = tp->it_value;
612 0 : timersub(&ntv, &ctv, &ntv);
613 0 : timo = tvtohz(&ntv) - 1;
614 0 : if (timo <= 0)
615 : timo = 1;
616 0 : if ((pr->ps_flags & PS_EXITING) == 0)
617 0 : timeout_add(&pr->ps_realit_to, timo);
618 0 : return;
619 : }
620 0 : }
621 0 : }
622 :
623 : /*
624 : * Check that a timespec value is legit
625 : */
626 : int
627 0 : timespecfix(struct timespec *ts)
628 : {
629 0 : if (ts->tv_sec < 0 ||
630 0 : ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
631 0 : return (EINVAL);
632 0 : if (ts->tv_sec > 100000000)
633 0 : ts->tv_sec = 100000000;
634 0 : return (0);
635 0 : }
636 :
637 : /*
638 : * Check that a proposed value to load into the .it_value or
639 : * .it_interval part of an interval timer is acceptable.
640 : */
641 : int
642 0 : itimerfix(struct timeval *tv)
643 : {
644 :
645 0 : if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
646 0 : tv->tv_usec < 0 || tv->tv_usec >= 1000000)
647 0 : return (EINVAL);
648 :
649 0 : if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
650 0 : tv->tv_usec = tick;
651 :
652 0 : return (0);
653 0 : }
654 :
655 : /*
656 : * Nonzero timer interval smaller than the resolution of the
657 : * system clock are rounded up.
658 : */
659 : void
660 0 : itimerround(struct timeval *tv)
661 : {
662 0 : if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
663 0 : tv->tv_usec = tick;
664 0 : }
665 :
666 : /*
667 : * Decrement an interval timer by a specified number
668 : * of microseconds, which must be less than a second,
669 : * i.e. < 1000000. If the timer expires, then reload
670 : * it. In this case, carry over (usec - old value) to
671 : * reduce the value reloaded into the timer so that
672 : * the timer does not drift. This routine assumes
673 : * that it is called in a context where the timers
674 : * on which it is operating cannot change in value.
675 : */
676 : int
677 0 : itimerdecr(struct itimerval *itp, int usec)
678 : {
679 0 : mtx_enter(&itimer_mtx);
680 0 : if (itp->it_value.tv_usec < usec) {
681 0 : if (itp->it_value.tv_sec == 0) {
682 : /* expired, and already in next interval */
683 0 : usec -= itp->it_value.tv_usec;
684 0 : goto expire;
685 : }
686 0 : itp->it_value.tv_usec += 1000000;
687 0 : itp->it_value.tv_sec--;
688 0 : }
689 0 : itp->it_value.tv_usec -= usec;
690 : usec = 0;
691 0 : if (timerisset(&itp->it_value)) {
692 0 : mtx_leave(&itimer_mtx);
693 0 : return (1);
694 : }
695 : /* expired, exactly at end of interval */
696 : expire:
697 0 : if (timerisset(&itp->it_interval)) {
698 0 : itp->it_value = itp->it_interval;
699 0 : itp->it_value.tv_usec -= usec;
700 0 : if (itp->it_value.tv_usec < 0) {
701 0 : itp->it_value.tv_usec += 1000000;
702 0 : itp->it_value.tv_sec--;
703 0 : }
704 : } else
705 0 : itp->it_value.tv_usec = 0; /* sec is already 0 */
706 0 : mtx_leave(&itimer_mtx);
707 0 : return (0);
708 0 : }
709 :
710 : /*
711 : * ratecheck(): simple time-based rate-limit checking. see ratecheck(9)
712 : * for usage and rationale.
713 : */
714 : int
715 0 : ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
716 : {
717 0 : struct timeval tv, delta;
718 : int rv = 0;
719 :
720 0 : getmicrouptime(&tv);
721 :
722 0 : timersub(&tv, lasttime, &delta);
723 :
724 : /*
725 : * check for 0,0 is so that the message will be seen at least once,
726 : * even if interval is huge.
727 : */
728 0 : if (timercmp(&delta, mininterval, >=) ||
729 0 : (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
730 0 : *lasttime = tv;
731 : rv = 1;
732 0 : }
733 :
734 0 : return (rv);
735 0 : }
736 :
737 : /*
738 : * ppsratecheck(): packets (or events) per second limitation.
739 : */
740 : int
741 0 : ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
742 : {
743 0 : struct timeval tv, delta;
744 : int rv;
745 :
746 0 : microuptime(&tv);
747 :
748 0 : timersub(&tv, lasttime, &delta);
749 :
750 : /*
751 : * check for 0,0 is so that the message will be seen at least once.
752 : * if more than one second have passed since the last update of
753 : * lasttime, reset the counter.
754 : *
755 : * we do increment *curpps even in *curpps < maxpps case, as some may
756 : * try to use *curpps for stat purposes as well.
757 : */
758 0 : if (maxpps == 0)
759 0 : rv = 0;
760 0 : else if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
761 0 : delta.tv_sec >= 1) {
762 0 : *lasttime = tv;
763 0 : *curpps = 0;
764 : rv = 1;
765 0 : } else if (maxpps < 0)
766 0 : rv = 1;
767 0 : else if (*curpps < maxpps)
768 0 : rv = 1;
769 : else
770 : rv = 0;
771 :
772 : #if 1 /*DIAGNOSTIC?*/
773 : /* be careful about wrap-around */
774 0 : if (*curpps + 1 > *curpps)
775 0 : *curpps = *curpps + 1;
776 : #else
777 : /*
778 : * assume that there's not too many calls to this function.
779 : * not sure if the assumption holds, as it depends on *caller's*
780 : * behavior, not the behavior of this function.
781 : * IMHO it is wrong to make assumption on the caller's behavior,
782 : * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
783 : */
784 : *curpps = *curpps + 1;
785 : #endif
786 :
787 0 : return (rv);
788 0 : }
789 :
790 :
791 : #define RESETTODR_PERIOD 1800
792 :
793 : void periodic_resettodr(void *);
794 : void perform_resettodr(void *);
795 :
796 : struct timeout resettodr_to = TIMEOUT_INITIALIZER(periodic_resettodr, NULL);
797 : struct task resettodr_task = TASK_INITIALIZER(perform_resettodr, NULL);
798 :
799 : void
800 0 : periodic_resettodr(void *arg __unused)
801 : {
802 0 : task_add(systq, &resettodr_task);
803 0 : }
804 :
805 : void
806 0 : perform_resettodr(void *arg __unused)
807 : {
808 0 : resettodr();
809 0 : timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
810 0 : }
811 :
812 : void
813 0 : start_periodic_resettodr(void)
814 : {
815 0 : timeout_add_sec(&resettodr_to, RESETTODR_PERIOD);
816 0 : }
817 :
818 : void
819 0 : stop_periodic_resettodr(void)
820 : {
821 0 : timeout_del(&resettodr_to);
822 0 : task_del(systq, &resettodr_task);
823 0 : }
|