Line data Source code
1 : /* $OpenBSD: kern_resource.c,v 1.58 2018/02/19 08:59:52 mpi Exp $ */
2 : /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
3 :
4 : /*-
5 : * Copyright (c) 1982, 1986, 1991, 1993
6 : * The Regents of the University of California. All rights reserved.
7 : * (c) UNIX System Laboratories, Inc.
8 : * All or some portions of this file are derived from material licensed
9 : * to the University of California by American Telephone and Telegraph
10 : * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 : * the permission of UNIX System Laboratories, Inc.
12 : *
13 : * Redistribution and use in source and binary forms, with or without
14 : * modification, are permitted provided that the following conditions
15 : * are met:
16 : * 1. Redistributions of source code must retain the above copyright
17 : * notice, this list of conditions and the following disclaimer.
18 : * 2. Redistributions in binary form must reproduce the above copyright
19 : * notice, this list of conditions and the following disclaimer in the
20 : * documentation and/or other materials provided with the distribution.
21 : * 3. Neither the name of the University nor the names of its contributors
22 : * may be used to endorse or promote products derived from this software
23 : * without specific prior written permission.
24 : *
25 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 : * SUCH DAMAGE.
36 : *
37 : * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
38 : */
39 :
40 : #include <sys/param.h>
41 : #include <sys/systm.h>
42 : #include <sys/kernel.h>
43 : #include <sys/file.h>
44 : #include <sys/resourcevar.h>
45 : #include <sys/pool.h>
46 : #include <sys/proc.h>
47 : #include <sys/ktrace.h>
48 : #include <sys/sched.h>
49 :
50 : #include <sys/mount.h>
51 : #include <sys/syscallargs.h>
52 :
53 : #include <uvm/uvm_extern.h>
54 :
55 : void tuagg_sub(struct tusage *, struct proc *);
56 :
57 : /*
58 : * Patchable maximum data and stack limits.
59 : */
60 : rlim_t maxdmap = MAXDSIZ;
61 : rlim_t maxsmap = MAXSSIZ;
62 :
63 : /*
64 : * Resource controls and accounting.
65 : */
66 :
67 : int
68 0 : sys_getpriority(struct proc *curp, void *v, register_t *retval)
69 : {
70 : struct sys_getpriority_args /* {
71 : syscallarg(int) which;
72 : syscallarg(id_t) who;
73 0 : } */ *uap = v;
74 : struct process *pr;
75 : int low = NZERO + PRIO_MAX + 1;
76 :
77 0 : switch (SCARG(uap, which)) {
78 :
79 : case PRIO_PROCESS:
80 0 : if (SCARG(uap, who) == 0)
81 0 : pr = curp->p_p;
82 : else
83 0 : pr = prfind(SCARG(uap, who));
84 0 : if (pr == NULL)
85 : break;
86 0 : if (pr->ps_nice < low)
87 0 : low = pr->ps_nice;
88 : break;
89 :
90 : case PRIO_PGRP: {
91 : struct pgrp *pg;
92 :
93 0 : if (SCARG(uap, who) == 0)
94 0 : pg = curp->p_p->ps_pgrp;
95 0 : else if ((pg = pgfind(SCARG(uap, who))) == NULL)
96 0 : break;
97 0 : LIST_FOREACH(pr, &pg->pg_members, ps_pglist)
98 0 : if (pr->ps_nice < low)
99 0 : low = pr->ps_nice;
100 0 : break;
101 : }
102 :
103 : case PRIO_USER:
104 0 : if (SCARG(uap, who) == 0)
105 0 : SCARG(uap, who) = curp->p_ucred->cr_uid;
106 0 : LIST_FOREACH(pr, &allprocess, ps_list)
107 0 : if (pr->ps_ucred->cr_uid == SCARG(uap, who) &&
108 0 : pr->ps_nice < low)
109 0 : low = pr->ps_nice;
110 : break;
111 :
112 : default:
113 0 : return (EINVAL);
114 : }
115 0 : if (low == NZERO + PRIO_MAX + 1)
116 0 : return (ESRCH);
117 0 : *retval = low - NZERO;
118 0 : return (0);
119 0 : }
120 :
121 : int
122 0 : sys_setpriority(struct proc *curp, void *v, register_t *retval)
123 : {
124 : struct sys_setpriority_args /* {
125 : syscallarg(int) which;
126 : syscallarg(id_t) who;
127 : syscallarg(int) prio;
128 0 : } */ *uap = v;
129 : struct process *pr;
130 : int found = 0, error = 0;
131 :
132 0 : switch (SCARG(uap, which)) {
133 :
134 : case PRIO_PROCESS:
135 0 : if (SCARG(uap, who) == 0)
136 0 : pr = curp->p_p;
137 : else
138 0 : pr = prfind(SCARG(uap, who));
139 0 : if (pr == NULL)
140 : break;
141 0 : error = donice(curp, pr, SCARG(uap, prio));
142 : found++;
143 0 : break;
144 :
145 : case PRIO_PGRP: {
146 : struct pgrp *pg;
147 :
148 0 : if (SCARG(uap, who) == 0)
149 0 : pg = curp->p_p->ps_pgrp;
150 0 : else if ((pg = pgfind(SCARG(uap, who))) == NULL)
151 0 : break;
152 0 : LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
153 0 : error = donice(curp, pr, SCARG(uap, prio));
154 0 : found++;
155 : }
156 0 : break;
157 : }
158 :
159 : case PRIO_USER:
160 0 : if (SCARG(uap, who) == 0)
161 0 : SCARG(uap, who) = curp->p_ucred->cr_uid;
162 0 : LIST_FOREACH(pr, &allprocess, ps_list)
163 0 : if (pr->ps_ucred->cr_uid == SCARG(uap, who)) {
164 0 : error = donice(curp, pr, SCARG(uap, prio));
165 0 : found++;
166 0 : }
167 : break;
168 :
169 : default:
170 0 : return (EINVAL);
171 : }
172 0 : if (found == 0)
173 0 : return (ESRCH);
174 0 : return (error);
175 0 : }
176 :
177 : int
178 0 : donice(struct proc *curp, struct process *chgpr, int n)
179 : {
180 0 : struct ucred *ucred = curp->p_ucred;
181 : struct proc *p;
182 : int s;
183 :
184 0 : if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 &&
185 0 : ucred->cr_uid != chgpr->ps_ucred->cr_uid &&
186 0 : ucred->cr_ruid != chgpr->ps_ucred->cr_uid)
187 0 : return (EPERM);
188 0 : if (n > PRIO_MAX)
189 0 : n = PRIO_MAX;
190 0 : if (n < PRIO_MIN)
191 0 : n = PRIO_MIN;
192 0 : n += NZERO;
193 0 : if (n < chgpr->ps_nice && suser(curp))
194 0 : return (EACCES);
195 0 : chgpr->ps_nice = n;
196 0 : SCHED_LOCK(s);
197 0 : TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link)
198 0 : (void)resetpriority(p);
199 0 : SCHED_UNLOCK(s);
200 0 : return (0);
201 0 : }
202 :
203 : int
204 0 : sys_setrlimit(struct proc *p, void *v, register_t *retval)
205 : {
206 : struct sys_setrlimit_args /* {
207 : syscallarg(int) which;
208 : syscallarg(const struct rlimit *) rlp;
209 0 : } */ *uap = v;
210 0 : struct rlimit alim;
211 : int error;
212 :
213 0 : error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim,
214 : sizeof (struct rlimit));
215 0 : if (error)
216 0 : return (error);
217 : #ifdef KTRACE
218 0 : if (KTRPOINT(p, KTR_STRUCT))
219 0 : ktrrlimit(p, &alim);
220 : #endif
221 0 : return (dosetrlimit(p, SCARG(uap, which), &alim));
222 0 : }
223 :
224 : int
225 0 : dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
226 : {
227 : struct rlimit *alimp;
228 : rlim_t maxlim;
229 : int error;
230 :
231 0 : if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max)
232 0 : return (EINVAL);
233 :
234 0 : alimp = &p->p_rlimit[which];
235 0 : if (limp->rlim_max > alimp->rlim_max)
236 0 : if ((error = suser(p)) != 0)
237 0 : return (error);
238 0 : if (p->p_p->ps_limit->p_refcnt > 1) {
239 : struct plimit *l = p->p_p->ps_limit;
240 :
241 : /* limcopy() can sleep, so copy before decrementing refcnt */
242 0 : p->p_p->ps_limit = limcopy(l);
243 0 : limfree(l);
244 0 : alimp = &p->p_rlimit[which];
245 0 : }
246 :
247 0 : switch (which) {
248 : case RLIMIT_DATA:
249 0 : maxlim = maxdmap;
250 0 : break;
251 : case RLIMIT_STACK:
252 0 : maxlim = maxsmap;
253 0 : break;
254 : case RLIMIT_NOFILE:
255 0 : maxlim = maxfiles;
256 0 : break;
257 : case RLIMIT_NPROC:
258 0 : maxlim = maxprocess;
259 0 : break;
260 : default:
261 : maxlim = RLIM_INFINITY;
262 0 : break;
263 : }
264 :
265 0 : if (limp->rlim_max > maxlim)
266 0 : limp->rlim_max = maxlim;
267 0 : if (limp->rlim_cur > limp->rlim_max)
268 0 : limp->rlim_cur = limp->rlim_max;
269 :
270 0 : if (which == RLIMIT_STACK) {
271 : /*
272 : * Stack is allocated to the max at exec time with only
273 : * "rlim_cur" bytes accessible. If stack limit is going
274 : * up make more accessible, if going down make inaccessible.
275 : */
276 0 : if (limp->rlim_cur != alimp->rlim_cur) {
277 : vaddr_t addr;
278 : vsize_t size;
279 : vm_prot_t prot;
280 0 : struct vmspace *vm = p->p_vmspace;
281 :
282 0 : if (limp->rlim_cur > alimp->rlim_cur) {
283 : prot = PROT_READ | PROT_WRITE;
284 0 : size = limp->rlim_cur - alimp->rlim_cur;
285 : #ifdef MACHINE_STACK_GROWS_UP
286 : addr = (vaddr_t)vm->vm_maxsaddr +
287 : alimp->rlim_cur;
288 : #else
289 0 : addr = (vaddr_t)vm->vm_minsaddr -
290 : limp->rlim_cur;
291 : #endif
292 0 : } else {
293 : prot = PROT_NONE;
294 0 : size = alimp->rlim_cur - limp->rlim_cur;
295 : #ifdef MACHINE_STACK_GROWS_UP
296 : addr = (vaddr_t)vm->vm_maxsaddr +
297 : limp->rlim_cur;
298 : #else
299 0 : addr = (vaddr_t)vm->vm_minsaddr -
300 : alimp->rlim_cur;
301 : #endif
302 : }
303 0 : addr = trunc_page(addr);
304 0 : size = round_page(size);
305 0 : (void) uvm_map_protect(&vm->vm_map,
306 0 : addr, addr+size, prot, FALSE);
307 0 : }
308 : }
309 :
310 0 : *alimp = *limp;
311 0 : return (0);
312 0 : }
313 :
314 : int
315 0 : sys_getrlimit(struct proc *p, void *v, register_t *retval)
316 : {
317 : struct sys_getrlimit_args /* {
318 : syscallarg(int) which;
319 : syscallarg(struct rlimit *) rlp;
320 0 : } */ *uap = v;
321 : struct rlimit *alimp;
322 : int error;
323 :
324 0 : if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS)
325 0 : return (EINVAL);
326 0 : alimp = &p->p_rlimit[SCARG(uap, which)];
327 0 : error = copyout(alimp, SCARG(uap, rlp), sizeof(struct rlimit));
328 : #ifdef KTRACE
329 0 : if (error == 0 && KTRPOINT(p, KTR_STRUCT))
330 0 : ktrrlimit(p, alimp);
331 : #endif
332 0 : return (error);
333 0 : }
334 :
335 : void
336 0 : tuagg_sub(struct tusage *tup, struct proc *p)
337 : {
338 0 : timespecadd(&tup->tu_runtime, &p->p_rtime, &tup->tu_runtime);
339 0 : tup->tu_uticks += p->p_uticks;
340 0 : tup->tu_sticks += p->p_sticks;
341 0 : tup->tu_iticks += p->p_iticks;
342 0 : }
343 :
344 : /*
345 : * Aggregate a single thread's immediate time counts into the running
346 : * totals for the thread and process
347 : */
348 : void
349 0 : tuagg_unlocked(struct process *pr, struct proc *p)
350 : {
351 0 : tuagg_sub(&pr->ps_tu, p);
352 0 : tuagg_sub(&p->p_tu, p);
353 0 : timespecclear(&p->p_rtime);
354 0 : p->p_uticks = 0;
355 0 : p->p_sticks = 0;
356 0 : p->p_iticks = 0;
357 0 : }
358 :
359 : void
360 0 : tuagg(struct process *pr, struct proc *p)
361 : {
362 : int s;
363 :
364 0 : SCHED_LOCK(s);
365 0 : tuagg_unlocked(pr, p);
366 0 : SCHED_UNLOCK(s);
367 0 : }
368 :
369 : /*
370 : * Transform the running time and tick information in a struct tusage
371 : * into user, system, and interrupt time usage.
372 : */
373 : void
374 0 : calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp,
375 : struct timespec *ip)
376 : {
377 : u_quad_t st, ut, it;
378 : int freq;
379 :
380 0 : st = tup->tu_sticks;
381 0 : ut = tup->tu_uticks;
382 0 : it = tup->tu_iticks;
383 :
384 0 : if (st + ut + it == 0) {
385 0 : timespecclear(up);
386 0 : timespecclear(sp);
387 0 : if (ip != NULL)
388 0 : timespecclear(ip);
389 0 : return;
390 : }
391 :
392 0 : freq = stathz ? stathz : hz;
393 :
394 0 : st = st * 1000000000 / freq;
395 0 : sp->tv_sec = st / 1000000000;
396 0 : sp->tv_nsec = st % 1000000000;
397 0 : ut = ut * 1000000000 / freq;
398 0 : up->tv_sec = ut / 1000000000;
399 0 : up->tv_nsec = ut % 1000000000;
400 0 : if (ip != NULL) {
401 0 : it = it * 1000000000 / freq;
402 0 : ip->tv_sec = it / 1000000000;
403 0 : ip->tv_nsec = it % 1000000000;
404 0 : }
405 0 : }
406 :
407 : void
408 0 : calcru(struct tusage *tup, struct timeval *up, struct timeval *sp,
409 : struct timeval *ip)
410 : {
411 0 : struct timespec u, s, i;
412 :
413 0 : calctsru(tup, &u, &s, ip != NULL ? &i : NULL);
414 0 : TIMESPEC_TO_TIMEVAL(up, &u);
415 0 : TIMESPEC_TO_TIMEVAL(sp, &s);
416 0 : if (ip != NULL)
417 0 : TIMESPEC_TO_TIMEVAL(ip, &i);
418 0 : }
419 :
420 : int
421 0 : sys_getrusage(struct proc *p, void *v, register_t *retval)
422 : {
423 : struct sys_getrusage_args /* {
424 : syscallarg(int) who;
425 : syscallarg(struct rusage *) rusage;
426 0 : } */ *uap = v;
427 0 : struct rusage ru;
428 : int error;
429 :
430 0 : error = dogetrusage(p, SCARG(uap, who), &ru);
431 0 : if (error == 0) {
432 0 : error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
433 : #ifdef KTRACE
434 0 : if (error == 0 && KTRPOINT(p, KTR_STRUCT))
435 0 : ktrrusage(p, &ru);
436 : #endif
437 : }
438 0 : return (error);
439 0 : }
440 :
441 : int
442 0 : dogetrusage(struct proc *p, int who, struct rusage *rup)
443 : {
444 0 : struct process *pr = p->p_p;
445 : struct proc *q;
446 :
447 0 : switch (who) {
448 :
449 : case RUSAGE_SELF:
450 : /* start with the sum of dead threads, if any */
451 0 : if (pr->ps_ru != NULL)
452 0 : *rup = *pr->ps_ru;
453 : else
454 0 : memset(rup, 0, sizeof(*rup));
455 :
456 : /* add on all living threads */
457 0 : TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
458 0 : ruadd(rup, &q->p_ru);
459 0 : tuagg(pr, q);
460 : }
461 :
462 0 : calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL);
463 0 : break;
464 :
465 : case RUSAGE_THREAD:
466 0 : *rup = p->p_ru;
467 0 : calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL);
468 0 : break;
469 :
470 : case RUSAGE_CHILDREN:
471 0 : *rup = pr->ps_cru;
472 0 : break;
473 :
474 : default:
475 0 : return (EINVAL);
476 : }
477 0 : return (0);
478 0 : }
479 :
480 : void
481 0 : ruadd(struct rusage *ru, struct rusage *ru2)
482 : {
483 : long *ip, *ip2;
484 : int i;
485 :
486 0 : timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
487 0 : timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
488 0 : if (ru->ru_maxrss < ru2->ru_maxrss)
489 0 : ru->ru_maxrss = ru2->ru_maxrss;
490 0 : ip = &ru->ru_first; ip2 = &ru2->ru_first;
491 0 : for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
492 0 : *ip++ += *ip2++;
493 0 : }
494 :
495 : struct pool plimit_pool;
496 :
497 : /*
498 : * Make a copy of the plimit structure.
499 : * We share these structures copy-on-write after fork,
500 : * and copy when a limit is changed.
501 : */
502 : struct plimit *
503 0 : limcopy(struct plimit *lim)
504 : {
505 : struct plimit *newlim;
506 : static int initialized;
507 :
508 0 : if (!initialized) {
509 0 : pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_NONE,
510 : PR_WAITOK, "plimitpl", NULL);
511 0 : initialized = 1;
512 0 : }
513 :
514 0 : newlim = pool_get(&plimit_pool, PR_WAITOK);
515 0 : memcpy(newlim->pl_rlimit, lim->pl_rlimit,
516 : sizeof(struct rlimit) * RLIM_NLIMITS);
517 0 : newlim->p_refcnt = 1;
518 0 : return (newlim);
519 : }
520 :
521 : void
522 0 : limfree(struct plimit *lim)
523 : {
524 0 : if (--lim->p_refcnt > 0)
525 : return;
526 0 : pool_put(&plimit_pool, lim);
527 0 : }
|