Line data Source code
1 : /* $OpenBSD: vfs_lockf.c,v 1.25 2018/02/26 13:43:51 mpi Exp $ */
2 : /* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */
3 :
4 : /*
5 : * Copyright (c) 1982, 1986, 1989, 1993
6 : * The Regents of the University of California. All rights reserved.
7 : *
8 : * This code is derived from software contributed to Berkeley by
9 : * Scooter Morris at Genentech Inc.
10 : *
11 : * Redistribution and use in source and binary forms, with or without
12 : * modification, are permitted provided that the following conditions
13 : * are met:
14 : * 1. Redistributions of source code must retain the above copyright
15 : * notice, this list of conditions and the following disclaimer.
16 : * 2. Redistributions in binary form must reproduce the above copyright
17 : * notice, this list of conditions and the following disclaimer in the
18 : * documentation and/or other materials provided with the distribution.
19 : * 3. Neither the name of the University nor the names of its contributors
20 : * may be used to endorse or promote products derived from this software
21 : * without specific prior written permission.
22 : *
23 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 : * SUCH DAMAGE.
34 : *
35 : * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
36 : */
37 :
38 : #include <sys/param.h>
39 : #include <sys/systm.h>
40 : #include <sys/kernel.h>
41 : #include <sys/proc.h>
42 : #include <sys/vnode.h>
43 : #include <sys/pool.h>
44 : #include <sys/fcntl.h>
45 : #include <sys/lockf.h>
46 : #include <sys/unistd.h>
47 :
48 : struct pool lockfpool;
49 :
50 : /*
51 : * This variable controls the maximum number of processes that will
52 : * be checked in doing deadlock detection.
53 : */
54 : int maxlockdepth = MAXDEPTH;
55 :
56 : #define SELF 0x1
57 : #define OTHERS 0x2
58 :
59 : #ifdef LOCKF_DEBUG
60 :
61 : #define DEBUG_SETLOCK 0x01
62 : #define DEBUG_CLEARLOCK 0x02
63 : #define DEBUG_GETLOCK 0x04
64 : #define DEBUG_FINDOVR 0x08
65 : #define DEBUG_SPLIT 0x10
66 : #define DEBUG_WAKELOCK 0x20
67 :
68 : int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
69 :
70 : #define DPRINTF(args, level) if (lockf_debug & (level)) printf args
71 : #else
72 : #define DPRINTF(args, level)
73 : #endif
74 :
75 : void
76 0 : lf_init(void)
77 : {
78 0 : pool_init(&lockfpool, sizeof(struct lockf), 0, IPL_NONE, PR_WAITOK,
79 : "lockfpl", NULL);
80 0 : }
81 :
82 : struct lockf *lf_alloc(uid_t, int);
83 : void lf_free(struct lockf *);
84 :
85 : /*
86 : * We enforce a limit on locks by uid, so that a single user cannot
87 : * run the kernel out of memory. For now, the limit is pretty coarse.
88 : * There is no limit on root.
89 : *
90 : * Splitting a lock will always succeed, regardless of current allocations.
91 : * If you're slightly above the limit, we still have to permit an allocation
92 : * so that the unlock can succeed. If the unlocking causes too many splits,
93 : * however, you're totally cutoff.
94 : */
95 : int maxlocksperuid = 1024;
96 :
97 : /*
98 : * 3 options for allowfail.
99 : * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
100 : */
101 : struct lockf *
102 0 : lf_alloc(uid_t uid, int allowfail)
103 : {
104 : struct uidinfo *uip;
105 : struct lockf *lock;
106 :
107 0 : uip = uid_find(uid);
108 0 : if (uid && allowfail && uip->ui_lockcnt >
109 0 : (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
110 0 : uid_release(uip);
111 0 : return (NULL);
112 : }
113 0 : uip->ui_lockcnt++;
114 0 : uid_release(uip);
115 0 : lock = pool_get(&lockfpool, PR_WAITOK);
116 0 : lock->lf_uid = uid;
117 0 : return (lock);
118 0 : }
119 :
120 : void
121 0 : lf_free(struct lockf *lock)
122 : {
123 : struct uidinfo *uip;
124 :
125 0 : uip = uid_find(lock->lf_uid);
126 0 : uip->ui_lockcnt--;
127 0 : uid_release(uip);
128 0 : pool_put(&lockfpool, lock);
129 0 : }
130 :
131 :
132 : /*
133 : * Do an advisory lock operation.
134 : */
135 : int
136 0 : lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
137 : struct flock *fl, int flags)
138 : {
139 0 : struct proc *p = curproc;
140 : struct lockf *lock;
141 : off_t start, end;
142 : int error;
143 :
144 : /*
145 : * Convert the flock structure into a start and end.
146 : */
147 0 : switch (fl->l_whence) {
148 : case SEEK_SET:
149 : case SEEK_CUR:
150 : /*
151 : * Caller is responsible for adding any necessary offset
152 : * when SEEK_CUR is used.
153 : */
154 0 : start = fl->l_start;
155 0 : break;
156 : case SEEK_END:
157 0 : start = size + fl->l_start;
158 0 : break;
159 : default:
160 0 : return (EINVAL);
161 : }
162 0 : if (start < 0)
163 0 : return (EINVAL);
164 0 : if (fl->l_len == 0) {
165 : end = -1;
166 0 : } else {
167 0 : end = start + fl->l_len - 1;
168 0 : if (end < start)
169 0 : return (EINVAL);
170 : }
171 :
172 : /*
173 : * Avoid the common case of unlocking when inode has no locks.
174 : */
175 0 : if (*head == NULL) {
176 0 : if (op != F_SETLK) {
177 0 : fl->l_type = F_UNLCK;
178 0 : return (0);
179 : }
180 : }
181 :
182 0 : lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK ? 1 : 2);
183 0 : if (!lock)
184 0 : return (ENOLCK);
185 0 : lock->lf_start = start;
186 0 : lock->lf_end = end;
187 0 : lock->lf_id = id;
188 0 : lock->lf_head = head;
189 0 : lock->lf_type = fl->l_type;
190 0 : lock->lf_next = NULL;
191 0 : TAILQ_INIT(&lock->lf_blkhd);
192 0 : lock->lf_flags = flags;
193 0 : lock->lf_pid = (flags & F_POSIX) ? p->p_p->ps_pid : -1;
194 :
195 0 : switch (op) {
196 : case F_SETLK:
197 0 : return (lf_setlock(lock));
198 : case F_UNLCK:
199 0 : error = lf_clearlock(lock);
200 0 : lf_free(lock);
201 0 : return (error);
202 : case F_GETLK:
203 0 : error = lf_getlock(lock, fl);
204 0 : lf_free(lock);
205 0 : return (error);
206 : default:
207 0 : lf_free(lock);
208 0 : return (EINVAL);
209 : }
210 : /* NOTREACHED */
211 0 : }
212 :
213 : /*
214 : * Set a byte-range lock.
215 : */
216 : int
217 0 : lf_setlock(struct lockf *lock)
218 : {
219 : struct lockf *block;
220 0 : struct lockf **head = lock->lf_head;
221 0 : struct lockf **prev, *overlap, *ltmp;
222 : static char lockstr[] = "lockf";
223 : int ovcase, priority, needtolink, error;
224 :
225 : #ifdef LOCKF_DEBUG
226 : if (lockf_debug & DEBUG_SETLOCK)
227 : lf_print("lf_setlock", lock);
228 : #endif /* LOCKF_DEBUG */
229 :
230 : priority = PLOCK;
231 0 : if (lock->lf_type == F_WRLCK)
232 0 : priority += 4;
233 0 : priority |= PCATCH;
234 : /*
235 : * Scan lock list for this file looking for locks that would block us.
236 : */
237 0 : while ((block = lf_getblock(lock)) != NULL) {
238 0 : if ((lock->lf_flags & F_WAIT) == 0) {
239 0 : lf_free(lock);
240 0 : return (EAGAIN);
241 : }
242 : /*
243 : * We are blocked. Since flock style locks cover
244 : * the whole file, there is no chance for deadlock.
245 : * For byte-range locks we must check for deadlock.
246 : *
247 : * Deadlock detection is done by looking through the
248 : * wait channels to see if there are any cycles that
249 : * involve us. MAXDEPTH is set just to make sure we
250 : * do not go off into neverland.
251 : */
252 0 : if ((lock->lf_flags & F_POSIX) &&
253 0 : (block->lf_flags & F_POSIX)) {
254 : struct proc *wproc;
255 : struct lockf *waitblock;
256 : int i = 0;
257 :
258 : /* The block is waiting on something */
259 0 : wproc = (struct proc *)block->lf_id;
260 0 : while (wproc->p_wchan &&
261 0 : (wproc->p_wmesg == lockstr) &&
262 0 : (i++ < maxlockdepth)) {
263 0 : waitblock = (struct lockf *)wproc->p_wchan;
264 : /* Get the owner of the blocking lock */
265 0 : waitblock = waitblock->lf_next;
266 0 : if ((waitblock->lf_flags & F_POSIX) == 0)
267 : break;
268 0 : wproc = (struct proc *)waitblock->lf_id;
269 0 : if (wproc == (struct proc *)lock->lf_id) {
270 0 : lf_free(lock);
271 0 : return (EDEADLK);
272 : }
273 : }
274 0 : }
275 : /*
276 : * For flock type locks, we must first remove
277 : * any shared locks that we hold before we sleep
278 : * waiting for an exclusive lock.
279 : */
280 0 : if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) {
281 0 : lock->lf_type = F_UNLCK;
282 0 : (void)lf_clearlock(lock);
283 0 : lock->lf_type = F_WRLCK;
284 0 : }
285 : /*
286 : * Add our lock to the blocked list and sleep until we're free.
287 : * Remember who blocked us (for deadlock detection).
288 : */
289 0 : lock->lf_next = block;
290 : #ifdef LOCKF_DEBUG
291 : if (lockf_debug & DEBUG_SETLOCK) {
292 : lf_print("lf_setlock", lock);
293 : lf_print("lf_setlock: blocking on", block);
294 : }
295 : #endif /* LOCKF_DEBUG */
296 0 : TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
297 0 : error = tsleep(lock, priority, lockstr, 0);
298 0 : if (lock->lf_next != NULL) {
299 0 : TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
300 0 : lock->lf_next = NULL;
301 0 : }
302 0 : if (error) {
303 0 : lf_free(lock);
304 0 : return (error);
305 : }
306 : }
307 : /*
308 : * No blocks!! Add the lock. Note that we will
309 : * downgrade or upgrade any overlapping locks this
310 : * process already owns.
311 : *
312 : * Skip over locks owned by other processes.
313 : * Handle any locks that overlap and are owned by ourselves.
314 : */
315 0 : prev = head;
316 0 : block = *head;
317 : needtolink = 1;
318 0 : for (;;) {
319 0 : ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
320 0 : if (ovcase)
321 0 : block = overlap->lf_next;
322 : /*
323 : * Six cases:
324 : * 0) no overlap
325 : * 1) overlap == lock
326 : * 2) overlap contains lock
327 : * 3) lock contains overlap
328 : * 4) overlap starts before lock
329 : * 5) overlap ends after lock
330 : */
331 0 : switch (ovcase) {
332 : case 0: /* no overlap */
333 0 : if (needtolink) {
334 0 : *prev = lock;
335 0 : lock->lf_next = overlap;
336 0 : }
337 : break;
338 : case 1: /* overlap == lock */
339 : /*
340 : * If downgrading lock, others may be
341 : * able to acquire it.
342 : */
343 0 : if (lock->lf_type == F_RDLCK &&
344 0 : overlap->lf_type == F_WRLCK)
345 0 : lf_wakelock(overlap);
346 0 : overlap->lf_type = lock->lf_type;
347 0 : lf_free(lock);
348 : lock = overlap; /* for debug output below */
349 0 : break;
350 : case 2: /* overlap contains lock */
351 : /*
352 : * Check for common starting point and different types.
353 : */
354 0 : if (overlap->lf_type == lock->lf_type) {
355 0 : lf_free(lock);
356 : lock = overlap; /* for debug output below */
357 0 : break;
358 : }
359 0 : if (overlap->lf_start == lock->lf_start) {
360 0 : *prev = lock;
361 0 : lock->lf_next = overlap;
362 0 : overlap->lf_start = lock->lf_end + 1;
363 0 : } else
364 0 : lf_split(overlap, lock);
365 0 : lf_wakelock(overlap);
366 0 : break;
367 : case 3: /* lock contains overlap */
368 : /*
369 : * If downgrading lock, others may be able to
370 : * acquire it, otherwise take the list.
371 : */
372 0 : if (lock->lf_type == F_RDLCK &&
373 0 : overlap->lf_type == F_WRLCK) {
374 0 : lf_wakelock(overlap);
375 0 : } else {
376 0 : while ((ltmp =
377 0 : TAILQ_FIRST(&overlap->lf_blkhd))) {
378 0 : TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
379 : lf_block);
380 0 : ltmp->lf_next = lock;
381 0 : TAILQ_INSERT_TAIL(&lock->lf_blkhd,
382 : ltmp, lf_block);
383 : }
384 : }
385 : /*
386 : * Add the new lock if necessary and delete the overlap.
387 : */
388 0 : if (needtolink) {
389 0 : *prev = lock;
390 0 : lock->lf_next = overlap->lf_next;
391 0 : prev = &lock->lf_next;
392 : needtolink = 0;
393 0 : } else
394 0 : *prev = overlap->lf_next;
395 0 : lf_free(overlap);
396 0 : continue;
397 : case 4: /* overlap starts before lock */
398 : /*
399 : * Add lock after overlap on the list.
400 : */
401 0 : lock->lf_next = overlap->lf_next;
402 0 : overlap->lf_next = lock;
403 0 : overlap->lf_end = lock->lf_start - 1;
404 0 : prev = &lock->lf_next;
405 0 : lf_wakelock(overlap);
406 : needtolink = 0;
407 0 : continue;
408 : case 5: /* overlap ends after lock */
409 : /*
410 : * Add the new lock before overlap.
411 : */
412 0 : if (needtolink) {
413 0 : *prev = lock;
414 0 : lock->lf_next = overlap;
415 0 : }
416 0 : overlap->lf_start = lock->lf_end + 1;
417 0 : lf_wakelock(overlap);
418 0 : break;
419 : }
420 : break;
421 : }
422 : #ifdef LOCKF_DEBUG
423 : if (lockf_debug & DEBUG_SETLOCK) {
424 : lf_print("lf_setlock: got the lock", lock);
425 : }
426 : #endif /* LOCKF_DEBUG */
427 0 : return (0);
428 0 : }
429 :
430 : /*
431 : * Remove a byte-range lock on an inode.
432 : *
433 : * Generally, find the lock (or an overlap to that lock)
434 : * and remove it (or shrink it), then wakeup anyone we can.
435 : */
436 : int
437 0 : lf_clearlock(struct lockf *lock)
438 : {
439 0 : struct lockf **head = lock->lf_head;
440 0 : struct lockf *lf = *head;
441 0 : struct lockf *overlap, **prev;
442 : int ovcase;
443 :
444 0 : if (lf == NULL)
445 0 : return (0);
446 : #ifdef LOCKF_DEBUG
447 : if (lockf_debug & DEBUG_CLEARLOCK)
448 : lf_print("lf_clearlock", lock);
449 : #endif /* LOCKF_DEBUG */
450 0 : prev = head;
451 0 : while ((ovcase = lf_findoverlap(lf, lock, SELF, &prev, &overlap))) {
452 0 : lf_wakelock(overlap);
453 :
454 0 : switch (ovcase) {
455 : case 1: /* overlap == lock */
456 0 : *prev = overlap->lf_next;
457 0 : lf_free(overlap);
458 0 : break;
459 : case 2: /* overlap contains lock: split it */
460 0 : if (overlap->lf_start == lock->lf_start) {
461 0 : overlap->lf_start = lock->lf_end + 1;
462 0 : break;
463 : }
464 0 : lf_split(overlap, lock);
465 0 : overlap->lf_next = lock->lf_next;
466 0 : break;
467 : case 3: /* lock contains overlap */
468 0 : *prev = overlap->lf_next;
469 0 : lf = overlap->lf_next;
470 0 : lf_free(overlap);
471 0 : continue;
472 : case 4: /* overlap starts before lock */
473 0 : overlap->lf_end = lock->lf_start - 1;
474 0 : prev = &overlap->lf_next;
475 0 : lf = overlap->lf_next;
476 0 : continue;
477 : case 5: /* overlap ends after lock */
478 0 : overlap->lf_start = lock->lf_end + 1;
479 0 : break;
480 : }
481 : break;
482 : }
483 0 : return (0);
484 0 : }
485 :
486 : /*
487 : * Check whether there is a blocking lock,
488 : * and if so return its process identifier.
489 : */
490 : int
491 0 : lf_getlock(struct lockf *lock, struct flock *fl)
492 : {
493 : struct lockf *block;
494 :
495 : #ifdef LOCKF_DEBUG
496 : if (lockf_debug & DEBUG_CLEARLOCK)
497 : lf_print("lf_getlock", lock);
498 : #endif /* LOCKF_DEBUG */
499 :
500 0 : if ((block = lf_getblock(lock)) != NULL) {
501 0 : fl->l_type = block->lf_type;
502 0 : fl->l_whence = SEEK_SET;
503 0 : fl->l_start = block->lf_start;
504 0 : if (block->lf_end == -1)
505 0 : fl->l_len = 0;
506 : else
507 0 : fl->l_len = block->lf_end - block->lf_start + 1;
508 0 : fl->l_pid = block->lf_pid;
509 0 : } else {
510 0 : fl->l_type = F_UNLCK;
511 : }
512 0 : return (0);
513 : }
514 :
515 : /*
516 : * Walk the list of locks for an inode and
517 : * return the first blocking lock.
518 : */
519 : struct lockf *
520 0 : lf_getblock(struct lockf *lock)
521 : {
522 0 : struct lockf **prev, *overlap, *lf;
523 :
524 0 : prev = lock->lf_head;
525 0 : lf = *prev;
526 0 : while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
527 : /*
528 : * We've found an overlap, see if it blocks us
529 : */
530 0 : if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
531 0 : return (overlap);
532 : /*
533 : * Nope, point to the next one on the list and
534 : * see if it blocks us
535 : */
536 0 : lf = overlap->lf_next;
537 : }
538 0 : return (NULL);
539 0 : }
540 :
541 : /*
542 : * Walk the list of locks for an inode to
543 : * find an overlapping lock (if any).
544 : *
545 : * NOTE: this returns only the FIRST overlapping lock. There
546 : * may be more than one.
547 : */
548 : int
549 0 : lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
550 : struct lockf ***prev, struct lockf **overlap)
551 : {
552 : off_t start, end;
553 :
554 : #ifdef LOCKF_DEBUG
555 : if (lf && lockf_debug & DEBUG_FINDOVR)
556 : lf_print("lf_findoverlap: looking for overlap in", lock);
557 : #endif /* LOCKF_DEBUG */
558 :
559 0 : *overlap = lf;
560 0 : start = lock->lf_start;
561 0 : end = lock->lf_end;
562 0 : while (lf != NULL) {
563 0 : if (((type & SELF) && lf->lf_id != lock->lf_id) ||
564 0 : ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
565 0 : *prev = &lf->lf_next;
566 0 : *overlap = lf = lf->lf_next;
567 0 : continue;
568 : }
569 : #ifdef LOCKF_DEBUG
570 : if (lockf_debug & DEBUG_FINDOVR)
571 : lf_print("\tchecking", lf);
572 : #endif /* LOCKF_DEBUG */
573 : /*
574 : * OK, check for overlap
575 : *
576 : * Six cases:
577 : * 0) no overlap
578 : * 1) overlap == lock
579 : * 2) overlap contains lock
580 : * 3) lock contains overlap
581 : * 4) overlap starts before lock
582 : * 5) overlap ends after lock
583 : */
584 :
585 : /* Case 0 */
586 0 : if ((lf->lf_end != -1 && start > lf->lf_end) ||
587 0 : (end != -1 && lf->lf_start > end)) {
588 : DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
589 0 : if ((type & SELF) && end != -1 && lf->lf_start > end)
590 0 : return (0);
591 0 : *prev = &lf->lf_next;
592 0 : *overlap = lf = lf->lf_next;
593 0 : continue;
594 : }
595 : /* Case 1 */
596 0 : if ((lf->lf_start == start) && (lf->lf_end == end)) {
597 : DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
598 0 : return (1);
599 : }
600 : /* Case 2 */
601 0 : if ((lf->lf_start <= start) &&
602 0 : (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
603 : DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
604 0 : return (2);
605 : }
606 : /* Case 3 */
607 0 : if (start <= lf->lf_start &&
608 0 : (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
609 : DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
610 0 : return (3);
611 : }
612 : /* Case 4 */
613 0 : if ((lf->lf_start < start) &&
614 0 : ((lf->lf_end >= start) || (lf->lf_end == -1))) {
615 : DPRINTF(("overlap starts before lock\n"),
616 : DEBUG_FINDOVR);
617 0 : return (4);
618 : }
619 : /* Case 5 */
620 0 : if ((lf->lf_start > start) && (end != -1) &&
621 0 : ((lf->lf_end > end) || (lf->lf_end == -1))) {
622 : DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
623 0 : return (5);
624 : }
625 0 : panic("lf_findoverlap: default");
626 : }
627 0 : return (0);
628 0 : }
629 :
630 : /*
631 : * Split a lock and a contained region into
632 : * two or three locks as necessary.
633 : */
634 : void
635 0 : lf_split(struct lockf *lock1, struct lockf *lock2)
636 : {
637 : struct lockf *splitlock;
638 :
639 : #ifdef LOCKF_DEBUG
640 : if (lockf_debug & DEBUG_SPLIT) {
641 : lf_print("lf_split", lock1);
642 : lf_print("splitting from", lock2);
643 : }
644 : #endif /* LOCKF_DEBUG */
645 : /*
646 : * Check to see if splitting into only two pieces.
647 : */
648 0 : if (lock1->lf_start == lock2->lf_start) {
649 0 : lock1->lf_start = lock2->lf_end + 1;
650 0 : lock2->lf_next = lock1;
651 0 : return;
652 : }
653 0 : if (lock1->lf_end == lock2->lf_end) {
654 0 : lock1->lf_end = lock2->lf_start - 1;
655 0 : lock2->lf_next = lock1->lf_next;
656 0 : lock1->lf_next = lock2;
657 0 : return;
658 : }
659 : /*
660 : * Make a new lock consisting of the last part of
661 : * the encompassing lock
662 : */
663 0 : splitlock = lf_alloc(lock1->lf_uid, 0);
664 0 : memcpy(splitlock, lock1, sizeof(*splitlock));
665 0 : splitlock->lf_start = lock2->lf_end + 1;
666 0 : splitlock->lf_block.tqe_next = NULL;
667 0 : TAILQ_INIT(&splitlock->lf_blkhd);
668 0 : lock1->lf_end = lock2->lf_start - 1;
669 :
670 0 : lock2->lf_next = splitlock;
671 0 : lock1->lf_next = lock2;
672 0 : }
673 :
674 : /*
675 : * Wakeup a blocklist
676 : */
677 : void
678 0 : lf_wakelock(struct lockf *lock)
679 : {
680 : struct lockf *wakelock;
681 :
682 0 : while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
683 0 : TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
684 0 : wakelock->lf_next = NULL;
685 0 : wakeup_one(wakelock);
686 : }
687 0 : }
688 :
689 : #ifdef LOCKF_DEBUG
690 : /*
691 : * Print out a lock.
692 : */
693 : void
694 : lf_print(char *tag, struct lockf *lock)
695 : {
696 : struct lockf *block;
697 :
698 : printf("%s: lock %p for ", tag, lock);
699 : if (lock->lf_flags & F_POSIX)
700 : printf("thread %d", ((struct proc *)(lock->lf_id))->p_tid);
701 : else
702 : printf("id %p", lock->lf_id);
703 : printf(" %s, start %llx, end %llx",
704 : lock->lf_type == F_RDLCK ? "shared" :
705 : lock->lf_type == F_WRLCK ? "exclusive" :
706 : lock->lf_type == F_UNLCK ? "unlock" :
707 : "unknown", lock->lf_start, lock->lf_end);
708 : block = TAILQ_FIRST(&lock->lf_blkhd);
709 : if (block)
710 : printf(" block");
711 : TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
712 : printf(" %p,", block);
713 : printf("\n");
714 :
715 : }
716 :
717 : void
718 : lf_printlist(char *tag, struct lockf *lock)
719 : {
720 : struct lockf *lf;
721 :
722 : printf("%s: Lock list:\n", tag);
723 : for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
724 : printf("\tlock %p for ", lf);
725 : if (lf->lf_flags & F_POSIX)
726 : printf("thread %d", ((struct proc*)(lf->lf_id))->p_tid);
727 : else
728 : printf("id %p", lf->lf_id);
729 : printf(" %s, start %llx, end %llx",
730 : lf->lf_type == F_RDLCK ? "shared" :
731 : lf->lf_type == F_WRLCK ? "exclusive" :
732 : lf->lf_type == F_UNLCK ? "unlock" :
733 : "unknown", lf->lf_start, lf->lf_end);
734 : printf("\n");
735 : }
736 : }
737 : #endif /* LOCKF_DEBUG */
|