Line data Source code
1 : /* $OpenBSD: ffs_alloc.c,v 1.108 2016/05/23 20:47:49 tb Exp $ */
2 : /* $NetBSD: ffs_alloc.c,v 1.11 1996/05/11 18:27:09 mycroft Exp $ */
3 :
4 : /*
5 : * Copyright (c) 2002 Networks Associates Technology, Inc.
6 : * All rights reserved.
7 : *
8 : * This software was developed for the FreeBSD Project by Marshall
9 : * Kirk McKusick and Network Associates Laboratories, the Security
10 : * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 : * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12 : * research program.
13 : *
14 : * Copyright (c) 1982, 1986, 1989, 1993
15 : * The Regents of the University of California. All rights reserved.
16 : *
17 : * Redistribution and use in source and binary forms, with or without
18 : * modification, are permitted provided that the following conditions
19 : * are met:
20 : * 1. Redistributions of source code must retain the above copyright
21 : * notice, this list of conditions and the following disclaimer.
22 : * 2. Redistributions in binary form must reproduce the above copyright
23 : * notice, this list of conditions and the following disclaimer in the
24 : * documentation and/or other materials provided with the distribution.
25 : * 3. Neither the name of the University nor the names of its contributors
26 : * may be used to endorse or promote products derived from this software
27 : * without specific prior written permission.
28 : *
29 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 : * SUCH DAMAGE.
40 : *
41 : * @(#)ffs_alloc.c 8.11 (Berkeley) 10/27/94
42 : */
43 :
44 : #include <sys/param.h>
45 : #include <sys/systm.h>
46 : #include <sys/buf.h>
47 : #include <sys/vnode.h>
48 : #include <sys/mount.h>
49 : #include <sys/syslog.h>
50 : #include <sys/stdint.h>
51 : #include <sys/time.h>
52 :
53 : #include <ufs/ufs/quota.h>
54 : #include <ufs/ufs/inode.h>
55 : #include <ufs/ufs/ufsmount.h>
56 : #include <ufs/ufs/ufs_extern.h>
57 :
58 : #include <ufs/ffs/fs.h>
59 : #include <ufs/ffs/ffs_extern.h>
60 :
61 : #define ffs_fserr(fs, uid, cp) do { \
62 : log(LOG_ERR, "uid %u on %s: %s\n", (uid), \
63 : (fs)->fs_fsmnt, (cp)); \
64 : } while (0)
65 :
66 : daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
67 : struct buf * ffs_cgread(struct fs *, struct inode *, int);
68 : daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
69 : ufsino_t ffs_dirpref(struct inode *);
70 : daddr_t ffs_fragextend(struct inode *, int, daddr_t, int, int);
71 : daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
72 : daddr_t (*)(struct inode *, int, daddr_t, int));
73 : daddr_t ffs_nodealloccg(struct inode *, int, daddr_t, int);
74 : daddr_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
75 :
76 : static const struct timeval fserr_interval = { 2, 0 };
77 :
78 :
79 : /*
80 : * Allocate a block in the file system.
81 : *
82 : * The size of the requested block is given, which must be some
83 : * multiple of fs_fsize and <= fs_bsize.
84 : * A preference may be optionally specified. If a preference is given
85 : * the following hierarchy is used to allocate a block:
86 : * 1) allocate the requested block.
87 : * 2) allocate a rotationally optimal block in the same cylinder.
88 : * 3) allocate a block in the same cylinder group.
89 : * 4) quadratically rehash into other cylinder groups, until an
90 : * available block is located.
91 : * If no block preference is given the following hierarchy is used
92 : * to allocate a block:
93 : * 1) allocate a block in the cylinder group that contains the
94 : * inode for the file.
95 : * 2) quadratically rehash into other cylinder groups, until an
96 : * available block is located.
97 : */
98 : int
99 0 : ffs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref, int size,
100 : struct ucred *cred, daddr_t *bnp)
101 : {
102 : static struct timeval fsfull_last;
103 : struct fs *fs;
104 : daddr_t bno;
105 : int cg;
106 : int error;
107 :
108 0 : *bnp = 0;
109 0 : fs = ip->i_fs;
110 : #ifdef DIAGNOSTIC
111 0 : if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
112 0 : printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
113 0 : ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
114 0 : panic("ffs_alloc: bad size");
115 : }
116 0 : if (cred == NOCRED)
117 0 : panic("ffs_alloc: missing credential");
118 : #endif /* DIAGNOSTIC */
119 0 : if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
120 : goto nospace;
121 0 : if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
122 : goto nospace;
123 :
124 0 : if ((error = ufs_quota_alloc_blocks(ip, btodb(size), cred)) != 0)
125 0 : return (error);
126 :
127 : /*
128 : * Start allocation in the preferred block's cylinder group or
129 : * the file's inode's cylinder group if no preferred block was
130 : * specified.
131 : */
132 0 : if (bpref >= fs->fs_size)
133 0 : bpref = 0;
134 0 : if (bpref == 0)
135 0 : cg = ino_to_cg(fs, ip->i_number);
136 : else
137 0 : cg = dtog(fs, bpref);
138 :
139 : /* Try allocating a block. */
140 0 : bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
141 0 : if (bno > 0) {
142 : /* allocation successful, update inode data */
143 0 : DIP_ADD(ip, blocks, btodb(size));
144 0 : ip->i_flag |= IN_CHANGE | IN_UPDATE;
145 0 : *bnp = bno;
146 0 : return (0);
147 : }
148 :
149 : /* Restore user's disk quota because allocation failed. */
150 0 : (void) ufs_quota_free_blocks(ip, btodb(size), cred);
151 :
152 : nospace:
153 0 : if (ratecheck(&fsfull_last, &fserr_interval)) {
154 0 : ffs_fserr(fs, cred->cr_uid, "file system full");
155 0 : uprintf("\n%s: write failed, file system is full\n",
156 : fs->fs_fsmnt);
157 0 : }
158 0 : return (ENOSPC);
159 0 : }
160 :
161 : /*
162 : * Reallocate a fragment to a bigger size
163 : *
164 : * The number and size of the old block is given, and a preference
165 : * and new size is also specified. The allocator attempts to extend
166 : * the original block. Failing that, the regular block allocator is
167 : * invoked to get an appropriate block.
168 : */
169 : int
170 0 : ffs_realloccg(struct inode *ip, daddr_t lbprev, daddr_t bpref, int osize,
171 : int nsize, struct ucred *cred, struct buf **bpp, daddr_t *blknop)
172 : {
173 : static struct timeval fsfull_last;
174 : struct fs *fs;
175 0 : struct buf *bp = NULL;
176 : daddr_t quota_updated = 0;
177 : int cg, request, error;
178 : daddr_t bprev, bno;
179 :
180 0 : if (bpp != NULL)
181 0 : *bpp = NULL;
182 0 : fs = ip->i_fs;
183 : #ifdef DIAGNOSTIC
184 0 : if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
185 0 : (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
186 0 : printf(
187 : "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
188 0 : ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
189 0 : panic("ffs_realloccg: bad size");
190 : }
191 0 : if (cred == NOCRED)
192 0 : panic("ffs_realloccg: missing credential");
193 : #endif /* DIAGNOSTIC */
194 0 : if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
195 : goto nospace;
196 :
197 0 : bprev = DIP(ip, db[lbprev]);
198 :
199 0 : if (bprev == 0) {
200 0 : printf("dev = 0x%x, bsize = %d, bprev = %lld, fs = %s\n",
201 0 : ip->i_dev, fs->fs_bsize, (long long)bprev, fs->fs_fsmnt);
202 0 : panic("ffs_realloccg: bad bprev");
203 : }
204 :
205 : /*
206 : * Allocate the extra space in the buffer.
207 : */
208 0 : if (bpp != NULL) {
209 0 : if ((error = bread(ITOV(ip), lbprev, fs->fs_bsize, &bp)) != 0)
210 : goto error;
211 0 : buf_adjcnt(bp, osize);
212 0 : }
213 :
214 0 : if ((error = ufs_quota_alloc_blocks(ip, btodb(nsize - osize), cred))
215 0 : != 0)
216 : goto error;
217 :
218 : quota_updated = btodb(nsize - osize);
219 :
220 : /*
221 : * Check for extension in the existing location.
222 : */
223 0 : cg = dtog(fs, bprev);
224 0 : if ((bno = ffs_fragextend(ip, cg, bprev, osize, nsize)) != 0) {
225 0 : DIP_ADD(ip, blocks, btodb(nsize - osize));
226 0 : ip->i_flag |= IN_CHANGE | IN_UPDATE;
227 0 : if (bpp != NULL) {
228 0 : if (bp->b_blkno != fsbtodb(fs, bno))
229 0 : panic("ffs_realloccg: bad blockno");
230 : #ifdef DIAGNOSTIC
231 0 : if (nsize > bp->b_bufsize)
232 0 : panic("ffs_realloccg: small buf");
233 : #endif
234 0 : buf_adjcnt(bp, nsize);
235 0 : bp->b_flags |= B_DONE;
236 0 : memset(bp->b_data + osize, 0, nsize - osize);
237 0 : *bpp = bp;
238 0 : }
239 0 : if (blknop != NULL) {
240 0 : *blknop = bno;
241 0 : }
242 0 : return (0);
243 : }
244 : /*
245 : * Allocate a new disk location.
246 : */
247 0 : if (bpref >= fs->fs_size)
248 0 : bpref = 0;
249 0 : switch (fs->fs_optim) {
250 : case FS_OPTSPACE:
251 : /*
252 : * Allocate an exact sized fragment. Although this makes
253 : * best use of space, we will waste time relocating it if
254 : * the file continues to grow. If the fragmentation is
255 : * less than half of the minimum free reserve, we choose
256 : * to begin optimizing for time.
257 : */
258 : request = nsize;
259 0 : if (fs->fs_minfree < 5 ||
260 0 : fs->fs_cstotal.cs_nffree >
261 0 : fs->fs_dsize * fs->fs_minfree / (2 * 100))
262 : break;
263 0 : fs->fs_optim = FS_OPTTIME;
264 0 : break;
265 : case FS_OPTTIME:
266 : /*
267 : * At this point we have discovered a file that is trying to
268 : * grow a small fragment to a larger fragment. To save time,
269 : * we allocate a full sized block, then free the unused portion.
270 : * If the file continues to grow, the `ffs_fragextend' call
271 : * above will be able to grow it in place without further
272 : * copying. If aberrant programs cause disk fragmentation to
273 : * grow within 2% of the free reserve, we choose to begin
274 : * optimizing for space.
275 : */
276 0 : request = fs->fs_bsize;
277 0 : if (fs->fs_cstotal.cs_nffree <
278 0 : fs->fs_dsize * (fs->fs_minfree - 2) / 100)
279 : break;
280 0 : fs->fs_optim = FS_OPTSPACE;
281 0 : break;
282 : default:
283 0 : printf("dev = 0x%x, optim = %d, fs = %s\n",
284 0 : ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
285 0 : panic("ffs_realloccg: bad optim");
286 : /* NOTREACHED */
287 : }
288 0 : bno = ffs_hashalloc(ip, cg, bpref, request, ffs_alloccg);
289 0 : if (bno <= 0)
290 : goto nospace;
291 :
292 0 : (void) uvm_vnp_uncache(ITOV(ip));
293 0 : if (!DOINGSOFTDEP(ITOV(ip)))
294 0 : ffs_blkfree(ip, bprev, (long)osize);
295 0 : if (nsize < request)
296 0 : ffs_blkfree(ip, bno + numfrags(fs, nsize),
297 0 : (long)(request - nsize));
298 0 : DIP_ADD(ip, blocks, btodb(nsize - osize));
299 0 : ip->i_flag |= IN_CHANGE | IN_UPDATE;
300 0 : if (bpp != NULL) {
301 0 : bp->b_blkno = fsbtodb(fs, bno);
302 : #ifdef DIAGNOSTIC
303 0 : if (nsize > bp->b_bufsize)
304 0 : panic("ffs_realloccg: small buf 2");
305 : #endif
306 0 : buf_adjcnt(bp, nsize);
307 0 : bp->b_flags |= B_DONE;
308 0 : memset(bp->b_data + osize, 0, nsize - osize);
309 0 : *bpp = bp;
310 0 : }
311 0 : if (blknop != NULL) {
312 0 : *blknop = bno;
313 0 : }
314 0 : return (0);
315 :
316 : nospace:
317 0 : if (ratecheck(&fsfull_last, &fserr_interval)) {
318 0 : ffs_fserr(fs, cred->cr_uid, "file system full");
319 0 : uprintf("\n%s: write failed, file system is full\n",
320 : fs->fs_fsmnt);
321 0 : }
322 0 : error = ENOSPC;
323 :
324 : error:
325 0 : if (bp != NULL) {
326 0 : brelse(bp);
327 0 : bp = NULL;
328 0 : }
329 :
330 : /*
331 : * Restore user's disk quota because allocation failed.
332 : */
333 0 : if (quota_updated != 0)
334 0 : (void)ufs_quota_free_blocks(ip, quota_updated, cred);
335 :
336 0 : return error;
337 0 : }
338 :
339 : /*
340 : * Allocate an inode in the file system.
341 : *
342 : * If allocating a directory, use ffs_dirpref to select the inode.
343 : * If allocating in a directory, the following hierarchy is followed:
344 : * 1) allocate the preferred inode.
345 : * 2) allocate an inode in the same cylinder group.
346 : * 3) quadratically rehash into other cylinder groups, until an
347 : * available inode is located.
348 : * If no inode preference is given the following hierarchy is used
349 : * to allocate an inode:
350 : * 1) allocate an inode in cylinder group 0.
351 : * 2) quadratically rehash into other cylinder groups, until an
352 : * available inode is located.
353 : */
354 : int
355 0 : ffs_inode_alloc(struct inode *pip, mode_t mode, struct ucred *cred,
356 : struct vnode **vpp)
357 : {
358 : static struct timeval fsnoinodes_last;
359 0 : struct vnode *pvp = ITOV(pip);
360 : struct fs *fs;
361 : struct inode *ip;
362 : ufsino_t ino, ipref;
363 : int cg, error;
364 :
365 0 : *vpp = NULL;
366 0 : fs = pip->i_fs;
367 0 : if (fs->fs_cstotal.cs_nifree == 0)
368 : goto noinodes;
369 :
370 0 : if ((mode & IFMT) == IFDIR)
371 0 : ipref = ffs_dirpref(pip);
372 : else
373 0 : ipref = pip->i_number;
374 0 : if (ipref >= fs->fs_ncg * fs->fs_ipg)
375 0 : ipref = 0;
376 0 : cg = ino_to_cg(fs, ipref);
377 :
378 : /*
379 : * Track number of dirs created one after another
380 : * in a same cg without intervening by files.
381 : */
382 0 : if ((mode & IFMT) == IFDIR) {
383 0 : if (fs->fs_contigdirs[cg] < 255)
384 0 : fs->fs_contigdirs[cg]++;
385 : } else {
386 0 : if (fs->fs_contigdirs[cg] > 0)
387 0 : fs->fs_contigdirs[cg]--;
388 : }
389 0 : ino = (ufsino_t)ffs_hashalloc(pip, cg, ipref, mode, ffs_nodealloccg);
390 0 : if (ino == 0)
391 : goto noinodes;
392 0 : error = VFS_VGET(pvp->v_mount, ino, vpp);
393 0 : if (error) {
394 0 : ffs_inode_free(pip, ino, mode);
395 0 : return (error);
396 : }
397 :
398 0 : ip = VTOI(*vpp);
399 :
400 0 : if (DIP(ip, mode)) {
401 0 : printf("mode = 0%o, inum = %u, fs = %s\n",
402 0 : DIP(ip, mode), ip->i_number, fs->fs_fsmnt);
403 0 : panic("ffs_valloc: dup alloc");
404 : }
405 :
406 0 : if (DIP(ip, blocks)) {
407 0 : printf("free inode %s/%d had %lld blocks\n",
408 0 : fs->fs_fsmnt, ino, (long long)DIP(ip, blocks));
409 0 : DIP_ASSIGN(ip, blocks, 0);
410 : }
411 :
412 0 : DIP_ASSIGN(ip, flags, 0);
413 :
414 : /*
415 : * Set up a new generation number for this inode.
416 : * XXX - just increment for now, this is wrong! (millert)
417 : * Need a way to preserve randomization.
418 : */
419 0 : if (DIP(ip, gen) != 0)
420 0 : DIP_ADD(ip, gen, 1);
421 0 : if (DIP(ip, gen) == 0)
422 0 : DIP_ASSIGN(ip, gen, arc4random() & INT_MAX);
423 :
424 0 : if (DIP(ip, gen) == 0 || DIP(ip, gen) == -1)
425 0 : DIP_ASSIGN(ip, gen, 1); /* Shouldn't happen */
426 :
427 0 : return (0);
428 :
429 : noinodes:
430 0 : if (ratecheck(&fsnoinodes_last, &fserr_interval)) {
431 0 : ffs_fserr(fs, cred->cr_uid, "out of inodes");
432 0 : uprintf("\n%s: create/symlink failed, no inodes free\n",
433 : fs->fs_fsmnt);
434 0 : }
435 0 : return (ENOSPC);
436 0 : }
437 :
438 : /*
439 : * Find a cylinder group to place a directory.
440 : *
441 : * The policy implemented by this algorithm is to allocate a
442 : * directory inode in the same cylinder group as its parent
443 : * directory, but also to reserve space for its files inodes
444 : * and data. Restrict the number of directories which may be
445 : * allocated one after another in the same cylinder group
446 : * without intervening allocation of files.
447 : *
448 : * If we allocate a first level directory then force allocation
449 : * in another cylinder group.
450 : */
451 : ufsino_t
452 0 : ffs_dirpref(struct inode *pip)
453 : {
454 : struct fs *fs;
455 : int cg, prefcg, dirsize, cgsize;
456 : int avgifree, avgbfree, avgndir, curdirsize;
457 : int minifree, minbfree, maxndir;
458 : int mincg, minndir;
459 : int maxcontigdirs;
460 :
461 0 : fs = pip->i_fs;
462 :
463 0 : avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
464 0 : avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
465 0 : avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
466 :
467 : /*
468 : * Force allocation in another cg if creating a first level dir.
469 : */
470 0 : if (ITOV(pip)->v_flag & VROOT) {
471 0 : prefcg = arc4random_uniform(fs->fs_ncg);
472 : mincg = prefcg;
473 0 : minndir = fs->fs_ipg;
474 0 : for (cg = prefcg; cg < fs->fs_ncg; cg++)
475 0 : if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
476 0 : fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
477 0 : fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
478 : mincg = cg;
479 : minndir = fs->fs_cs(fs, cg).cs_ndir;
480 0 : }
481 0 : for (cg = 0; cg < prefcg; cg++)
482 0 : if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
483 0 : fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
484 0 : fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
485 : mincg = cg;
486 : minndir = fs->fs_cs(fs, cg).cs_ndir;
487 0 : }
488 : cg = mincg;
489 0 : goto end;
490 : } else
491 0 : prefcg = ino_to_cg(fs, pip->i_number);
492 :
493 : /*
494 : * Count various limits which used for
495 : * optimal allocation of a directory inode.
496 : */
497 0 : maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
498 0 : minifree = avgifree - (avgifree / 4);
499 0 : if (minifree < 1)
500 : minifree = 1;
501 0 : minbfree = avgbfree - (avgbfree / 4);
502 0 : if (minbfree < 1)
503 : minbfree = 1;
504 :
505 0 : cgsize = fs->fs_fsize * fs->fs_fpg;
506 0 : dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
507 0 : curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
508 0 : if (dirsize < curdirsize)
509 0 : dirsize = curdirsize;
510 0 : if (dirsize <= 0)
511 0 : maxcontigdirs = 0; /* dirsize overflowed */
512 : else
513 0 : maxcontigdirs = min(avgbfree * fs->fs_bsize / dirsize, 255);
514 0 : if (fs->fs_avgfpdir > 0)
515 0 : maxcontigdirs = min(maxcontigdirs,
516 0 : fs->fs_ipg / fs->fs_avgfpdir);
517 0 : if (maxcontigdirs == 0)
518 0 : maxcontigdirs = 1;
519 :
520 : /*
521 : * Limit number of dirs in one cg and reserve space for
522 : * regular files, but only if we have no deficit in
523 : * inodes or space.
524 : *
525 : * We are trying to find a suitable cylinder group nearby
526 : * our preferred cylinder group to place a new directory.
527 : * We scan from our preferred cylinder group forward looking
528 : * for a cylinder group that meets our criterion. If we get
529 : * to the final cylinder group and do not find anything,
530 : * we start scanning backwards from our preferred cylinder
531 : * group. The ideal would be to alternate looking forward
532 : * and backward, but tha tis just too complex to code for
533 : * the gain it would get. The most likely place where the
534 : * backward scan would take effect is when we start near
535 : * the end of the filesystem and do not find anything from
536 : * where we are to the end. In that case, scanning backward
537 : * will likely find us a suitable cylinder group much closer
538 : * to our desired location than if we were to start scanning
539 : * forward from the beginning for the filesystem.
540 : */
541 0 : for (cg = prefcg; cg < fs->fs_ncg; cg++)
542 0 : if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
543 0 : fs->fs_cs(fs, cg).cs_nifree >= minifree &&
544 0 : fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
545 0 : if (fs->fs_contigdirs[cg] < maxcontigdirs)
546 : goto end;
547 : }
548 0 : for (cg = prefcg - 1; cg >= 0; cg--)
549 0 : if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
550 0 : fs->fs_cs(fs, cg).cs_nifree >= minifree &&
551 0 : fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
552 0 : if (fs->fs_contigdirs[cg] < maxcontigdirs)
553 : goto end;
554 : }
555 : /*
556 : * This is a backstop when we have deficit in space.
557 : */
558 0 : for (cg = prefcg; cg < fs->fs_ncg; cg++)
559 0 : if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
560 : goto end;
561 0 : for (cg = prefcg - 1; cg >= 0; cg--)
562 0 : if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
563 : goto end;
564 : end:
565 0 : return ((ufsino_t)(fs->fs_ipg * cg));
566 : }
567 :
568 : /*
569 : * Select the desired position for the next block in a file. The file is
570 : * logically divided into sections. The first section is composed of the
571 : * direct blocks. Each additional section contains fs_maxbpg blocks.
572 : *
573 : * If no blocks have been allocated in the first section, the policy is to
574 : * request a block in the same cylinder group as the inode that describes
575 : * the file. The first indirect is allocated immediately following the last
576 : * direct block and the data blocks for the first indirect immediately
577 : * follow it.
578 : *
579 : * If no blocks have been allocated in any other section, the indirect
580 : * block(s) are allocated in the same cylinder group as its inode in an
581 : * area reserved immediately following the inode blocks. The policy for
582 : * the data blocks is to place them in a cylinder group with a greater than
583 : * average number of free blocks. An appropriate cylinder group is found
584 : * by using a rotor that sweeps the cylinder groups. When a new group of
585 : * blocks is needed, the sweep begins in the cylinder group following the
586 : * cylinder group from which the previous allocation was made. The sweep
587 : * continues until a cylinder group with greater than the average number
588 : * of free blocks is found. If the allocation is for the first block in an
589 : * indirect block, the information on the previous allocation is unavailable;
590 : * here a best guess is made based upon the logical block number being
591 : * allocated.
592 : */
593 : int32_t
594 0 : ffs1_blkpref(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
595 : {
596 : struct fs *fs;
597 : int cg, inocg, avgbfree, startcg;
598 : uint32_t pref;
599 :
600 0 : KASSERT(indx <= 0 || bap != NULL);
601 0 : fs = ip->i_fs;
602 : /*
603 : * Allocation of indirect blocks is indicated by passing negative
604 : * values in indx: -1 for single indirect, -2 for double indirect,
605 : * -3 for triple indirect. As noted below, we attempt to allocate
606 : * the first indirect inline with the file data. For all later
607 : * indirect blocks, the data is often allocated in other cylinder
608 : * groups. However to speed random file access and to speed up
609 : * fsck, the filesystem reserves the first fs_metaspace blocks
610 : * (typically half of fs_minfree) of the data area of each cylinder
611 : * group to hold these later indirect blocks.
612 : */
613 0 : inocg = ino_to_cg(fs, ip->i_number);
614 0 : if (indx < 0) {
615 : /*
616 : * Our preference for indirect blocks is the zone at the
617 : * beginning of the inode's cylinder group data area that
618 : * we try to reserve for indirect blocks.
619 : */
620 0 : pref = cgmeta(fs, inocg);
621 : /*
622 : * If we are allocating the first indirect block, try to
623 : * place it immediately following the last direct block.
624 : */
625 0 : if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
626 0 : ip->i_din1->di_db[NDADDR - 1] != 0)
627 0 : pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag;
628 0 : return (pref);
629 : }
630 : /*
631 : * If we are allocating the first data block in the first indirect
632 : * block and the indirect has been allocated in the data block area,
633 : * try to place it immediately following the indirect block.
634 : */
635 0 : if (lbn == NDADDR) {
636 0 : pref = ip->i_din1->di_ib[0];
637 0 : if (pref != 0 && pref >= cgdata(fs, inocg) &&
638 0 : pref < cgbase(fs, inocg + 1))
639 0 : return (pref + fs->fs_frag);
640 : }
641 : /*
642 : * If we are the beginning of a file, or we have already allocated
643 : * the maximum number of blocks per cylinder group, or we do not
644 : * have a block allocated immediately preceding us, then we need
645 : * to decide where to start allocating new blocks.
646 : */
647 0 : if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
648 : /*
649 : * If we are allocating a directory data block, we want
650 : * to place it in the metadata area.
651 : */
652 0 : if ((DIP(ip, mode) & IFMT) == IFDIR)
653 0 : return (cgmeta(fs, inocg));
654 : /*
655 : * Until we fill all the direct and all the first indirect's
656 : * blocks, we try to allocate in the data area of the inode's
657 : * cylinder group.
658 : */
659 0 : if (lbn < NDADDR + NINDIR(fs))
660 0 : return (cgdata(fs, inocg));
661 : /*
662 : * Find a cylinder with greater than average number of
663 : * unused data blocks.
664 : */
665 0 : if (indx == 0 || bap[indx - 1] == 0)
666 0 : startcg = inocg + lbn / fs->fs_maxbpg;
667 : else
668 0 : startcg = dtog(fs, bap[indx - 1]) + 1;
669 0 : startcg %= fs->fs_ncg;
670 0 : avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
671 0 : for (cg = startcg; cg < fs->fs_ncg; cg++)
672 0 : if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
673 0 : fs->fs_cgrotor = cg;
674 0 : return (cgdata(fs, cg));
675 : }
676 0 : for (cg = 0; cg <= startcg; cg++)
677 0 : if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
678 0 : fs->fs_cgrotor = cg;
679 0 : return (cgdata(fs, cg));
680 : }
681 0 : return (0);
682 : }
683 : /*
684 : * Otherwise, we just always try to lay things out contiguously.
685 : */
686 0 : return (bap[indx - 1] + fs->fs_frag);
687 0 : }
688 :
689 : /*
690 : * Same as above, for UFS2.
691 : */
692 : #ifdef FFS2
693 : int64_t
694 0 : ffs2_blkpref(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
695 : {
696 : struct fs *fs;
697 : int cg, inocg, avgbfree, startcg;
698 : uint64_t pref;
699 :
700 0 : KASSERT(indx <= 0 || bap != NULL);
701 0 : fs = ip->i_fs;
702 : /*
703 : * Allocation of indirect blocks is indicated by passing negative
704 : * values in indx: -1 for single indirect, -2 for double indirect,
705 : * -3 for triple indirect. As noted below, we attempt to allocate
706 : * the first indirect inline with the file data. For all later
707 : * indirect blocks, the data is often allocated in other cylinder
708 : * groups. However to speed random file access and to speed up
709 : * fsck, the filesystem reserves the first fs_metaspace blocks
710 : * (typically half of fs_minfree) of the data area of each cylinder
711 : * group to hold these later indirect blocks.
712 : */
713 0 : inocg = ino_to_cg(fs, ip->i_number);
714 0 : if (indx < 0) {
715 : /*
716 : * Our preference for indirect blocks is the zone at the
717 : * beginning of the inode's cylinder group data area that
718 : * we try to reserve for indirect blocks.
719 : */
720 0 : pref = cgmeta(fs, inocg);
721 : /*
722 : * If we are allocating the first indirect block, try to
723 : * place it immediately following the last direct block.
724 : */
725 0 : if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
726 0 : ip->i_din2->di_db[NDADDR - 1] != 0)
727 0 : pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag;
728 0 : return (pref);
729 : }
730 : /*
731 : * If we are allocating the first data block in the first indirect
732 : * block and the indirect has been allocated in the data block area,
733 : * try to place it immediately following the indirect block.
734 : */
735 0 : if (lbn == NDADDR) {
736 0 : pref = ip->i_din2->di_ib[0];
737 0 : if (pref != 0 && pref >= cgdata(fs, inocg) &&
738 0 : pref < cgbase(fs, inocg + 1))
739 0 : return (pref + fs->fs_frag);
740 : }
741 : /*
742 : * If we are the beginning of a file, or we have already allocated
743 : * the maximum number of blocks per cylinder group, or we do not
744 : * have a block allocated immediately preceding us, then we need
745 : * to decide where to start allocating new blocks.
746 : */
747 :
748 0 : if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
749 : /*
750 : * If we are allocating a directory data block, we want
751 : * to place it in the metadata area.
752 : */
753 0 : if ((DIP(ip, mode) & IFMT) == IFDIR)
754 0 : return (cgmeta(fs, inocg));
755 : /*
756 : * Until we fill all the direct and all the first indirect's
757 : * blocks, we try to allocate in the data area of the inode's
758 : * cylinder group.
759 : */
760 0 : if (lbn < NDADDR + NINDIR(fs))
761 0 : return (cgdata(fs, inocg));
762 : /*
763 : * Find a cylinder with greater than average number of
764 : * unused data blocks.
765 : */
766 0 : if (indx == 0 || bap[indx - 1] == 0)
767 0 : startcg = inocg + lbn / fs->fs_maxbpg;
768 : else
769 0 : startcg = dtog(fs, bap[indx - 1] + 1);
770 :
771 0 : startcg %= fs->fs_ncg;
772 0 : avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
773 :
774 0 : for (cg = startcg; cg < fs->fs_ncg; cg++)
775 0 : if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
776 0 : return (cgbase(fs, cg) + fs->fs_frag);
777 :
778 0 : for (cg = 0; cg < startcg; cg++)
779 0 : if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
780 0 : return (cgbase(fs, cg) + fs->fs_frag);
781 :
782 0 : return (0);
783 : }
784 :
785 : /*
786 : * Otherwise, we just always try to lay things out contiguously.
787 : */
788 0 : return (bap[indx - 1] + fs->fs_frag);
789 0 : }
790 : #endif /* FFS2 */
791 :
792 : /*
793 : * Implement the cylinder overflow algorithm.
794 : *
795 : * The policy implemented by this algorithm is:
796 : * 1) allocate the block in its requested cylinder group.
797 : * 2) quadratically rehash on the cylinder group number.
798 : * 3) brute force search for a free block.
799 : */
800 : daddr_t
801 0 : ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
802 : daddr_t (*allocator)(struct inode *, int, daddr_t, int))
803 : {
804 : struct fs *fs;
805 : daddr_t result;
806 : int i, icg = cg;
807 :
808 0 : fs = ip->i_fs;
809 : /*
810 : * 1: preferred cylinder group
811 : */
812 0 : result = (*allocator)(ip, cg, pref, size);
813 0 : if (result)
814 0 : return (result);
815 : /*
816 : * 2: quadratic rehash
817 : */
818 0 : for (i = 1; i < fs->fs_ncg; i *= 2) {
819 0 : cg += i;
820 0 : if (cg >= fs->fs_ncg)
821 0 : cg -= fs->fs_ncg;
822 0 : result = (*allocator)(ip, cg, 0, size);
823 0 : if (result)
824 0 : return (result);
825 : }
826 : /*
827 : * 3: brute force search
828 : * Note that we start at i == 2, since 0 was checked initially,
829 : * and 1 is always checked in the quadratic rehash.
830 : */
831 0 : cg = (icg + 2) % fs->fs_ncg;
832 0 : for (i = 2; i < fs->fs_ncg; i++) {
833 0 : result = (*allocator)(ip, cg, 0, size);
834 0 : if (result)
835 0 : return (result);
836 0 : cg++;
837 0 : if (cg == fs->fs_ncg)
838 : cg = 0;
839 : }
840 0 : return (0);
841 0 : }
842 :
843 : struct buf *
844 0 : ffs_cgread(struct fs *fs, struct inode *ip, int cg)
845 : {
846 0 : struct buf *bp;
847 :
848 0 : if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
849 0 : (int)fs->fs_cgsize, &bp)) {
850 0 : brelse(bp);
851 0 : return (NULL);
852 : }
853 :
854 0 : if (!cg_chkmagic((struct cg *)bp->b_data)) {
855 0 : brelse(bp);
856 0 : return (NULL);
857 : }
858 :
859 0 : return bp;
860 0 : }
861 :
862 : /*
863 : * Determine whether a fragment can be extended.
864 : *
865 : * Check to see if the necessary fragments are available, and
866 : * if they are, allocate them.
867 : */
868 : daddr_t
869 0 : ffs_fragextend(struct inode *ip, int cg, daddr_t bprev, int osize, int nsize)
870 : {
871 : struct fs *fs;
872 : struct cg *cgp;
873 : struct buf *bp;
874 : daddr_t bno;
875 : int i, frags, bbase;
876 :
877 0 : fs = ip->i_fs;
878 0 : if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
879 0 : return (0);
880 0 : frags = numfrags(fs, nsize);
881 0 : bbase = fragnum(fs, bprev);
882 0 : if (bbase > fragnum(fs, (bprev + frags - 1))) {
883 : /* cannot extend across a block boundary */
884 0 : return (0);
885 : }
886 :
887 0 : if (!(bp = ffs_cgread(fs, ip, cg)))
888 0 : return (0);
889 :
890 0 : cgp = (struct cg *)bp->b_data;
891 0 : cgp->cg_ffs2_time = cgp->cg_time = time_second;
892 :
893 0 : bno = dtogd(fs, bprev);
894 0 : for (i = numfrags(fs, osize); i < frags; i++)
895 0 : if (isclr(cg_blksfree(cgp), bno + i)) {
896 0 : brelse(bp);
897 0 : return (0);
898 : }
899 : /*
900 : * the current fragment can be extended
901 : * deduct the count on fragment being extended into
902 : * increase the count on the remaining fragment (if any)
903 : * allocate the extended piece
904 : */
905 0 : for (i = frags; i < fs->fs_frag - bbase; i++)
906 0 : if (isclr(cg_blksfree(cgp), bno + i))
907 : break;
908 0 : cgp->cg_frsum[i - numfrags(fs, osize)]--;
909 0 : if (i != frags)
910 0 : cgp->cg_frsum[i - frags]++;
911 0 : for (i = numfrags(fs, osize); i < frags; i++) {
912 0 : clrbit(cg_blksfree(cgp), bno + i);
913 0 : cgp->cg_cs.cs_nffree--;
914 0 : fs->fs_cstotal.cs_nffree--;
915 0 : fs->fs_cs(fs, cg).cs_nffree--;
916 : }
917 0 : fs->fs_fmod = 1;
918 0 : if (DOINGSOFTDEP(ITOV(ip)))
919 0 : softdep_setup_blkmapdep(bp, fs, bprev);
920 :
921 0 : bdwrite(bp);
922 0 : return (bprev);
923 0 : }
924 :
925 : /*
926 : * Determine whether a block can be allocated.
927 : *
928 : * Check to see if a block of the appropriate size is available,
929 : * and if it is, allocate it.
930 : */
931 : daddr_t
932 0 : ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
933 : {
934 : struct fs *fs;
935 : struct cg *cgp;
936 : struct buf *bp;
937 : daddr_t bno, blkno;
938 : int i, frags, allocsiz;
939 :
940 0 : fs = ip->i_fs;
941 0 : if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
942 0 : return (0);
943 :
944 0 : if (!(bp = ffs_cgread(fs, ip, cg)))
945 0 : return (0);
946 :
947 0 : cgp = (struct cg *)bp->b_data;
948 0 : if (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize) {
949 0 : brelse(bp);
950 0 : return (0);
951 : }
952 :
953 0 : cgp->cg_ffs2_time = cgp->cg_time = time_second;
954 :
955 0 : if (size == fs->fs_bsize) {
956 : /* allocate and return a complete data block */
957 0 : bno = ffs_alloccgblk(ip, bp, bpref);
958 0 : bdwrite(bp);
959 0 : return (bno);
960 : }
961 : /*
962 : * check to see if any fragments are already available
963 : * allocsiz is the size which will be allocated, hacking
964 : * it down to a smaller size if necessary
965 : */
966 0 : frags = numfrags(fs, size);
967 0 : for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
968 0 : if (cgp->cg_frsum[allocsiz] != 0)
969 : break;
970 0 : if (allocsiz == fs->fs_frag) {
971 : /*
972 : * no fragments were available, so a block will be
973 : * allocated, and hacked up
974 : */
975 0 : if (cgp->cg_cs.cs_nbfree == 0) {
976 0 : brelse(bp);
977 0 : return (0);
978 : }
979 0 : bno = ffs_alloccgblk(ip, bp, bpref);
980 0 : bpref = dtogd(fs, bno);
981 0 : for (i = frags; i < fs->fs_frag; i++)
982 0 : setbit(cg_blksfree(cgp), bpref + i);
983 0 : i = fs->fs_frag - frags;
984 0 : cgp->cg_cs.cs_nffree += i;
985 0 : fs->fs_cstotal.cs_nffree += i;
986 0 : fs->fs_cs(fs, cg).cs_nffree += i;
987 0 : fs->fs_fmod = 1;
988 0 : cgp->cg_frsum[i]++;
989 0 : bdwrite(bp);
990 0 : return (bno);
991 : }
992 0 : bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
993 0 : if (bno < 0) {
994 0 : brelse(bp);
995 0 : return (0);
996 : }
997 :
998 0 : for (i = 0; i < frags; i++)
999 0 : clrbit(cg_blksfree(cgp), bno + i);
1000 0 : cgp->cg_cs.cs_nffree -= frags;
1001 0 : fs->fs_cstotal.cs_nffree -= frags;
1002 0 : fs->fs_cs(fs, cg).cs_nffree -= frags;
1003 0 : fs->fs_fmod = 1;
1004 0 : cgp->cg_frsum[allocsiz]--;
1005 0 : if (frags != allocsiz)
1006 0 : cgp->cg_frsum[allocsiz - frags]++;
1007 :
1008 0 : blkno = cgbase(fs, cg) + bno;
1009 0 : if (DOINGSOFTDEP(ITOV(ip)))
1010 0 : softdep_setup_blkmapdep(bp, fs, blkno);
1011 0 : bdwrite(bp);
1012 0 : return (blkno);
1013 0 : }
1014 :
1015 : /*
1016 : * Allocate a block in a cylinder group.
1017 : * Note that this routine only allocates fs_bsize blocks; these
1018 : * blocks may be fragmented by the routine that allocates them.
1019 : */
1020 : daddr_t
1021 0 : ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
1022 : {
1023 : struct fs *fs;
1024 : struct cg *cgp;
1025 : daddr_t bno, blkno;
1026 : u_int8_t *blksfree;
1027 : int cylno, cgbpref;
1028 :
1029 0 : fs = ip->i_fs;
1030 0 : cgp = (struct cg *) bp->b_data;
1031 0 : blksfree = cg_blksfree(cgp);
1032 :
1033 0 : if (bpref == 0) {
1034 0 : bpref = cgp->cg_rotor;
1035 0 : } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1036 : /* map bpref to correct zone in this cg */
1037 0 : if (bpref < cgdata(fs, cgbpref))
1038 0 : bpref = cgmeta(fs, cgp->cg_cgx);
1039 : else
1040 0 : bpref = cgdata(fs, cgp->cg_cgx);
1041 : }
1042 : /*
1043 : * If the requested block is available, use it.
1044 : */
1045 0 : bno = dtogd(fs, blknum(fs, bpref));
1046 0 : if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1047 : goto gotit;
1048 : /*
1049 : * Take the next available block in this cylinder group.
1050 : */
1051 0 : bno = ffs_mapsearch(fs, cgp, bpref, (int) fs->fs_frag);
1052 0 : if (bno < 0)
1053 0 : return (0);
1054 :
1055 : /* Update cg_rotor only if allocated from the data zone */
1056 0 : if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1057 0 : cgp->cg_rotor = bno;
1058 :
1059 : gotit:
1060 0 : blkno = fragstoblks(fs, bno);
1061 0 : ffs_clrblock(fs, blksfree, blkno);
1062 0 : ffs_clusteracct(fs, cgp, blkno, -1);
1063 0 : cgp->cg_cs.cs_nbfree--;
1064 0 : fs->fs_cstotal.cs_nbfree--;
1065 0 : fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1066 :
1067 0 : if (fs->fs_magic != FS_UFS2_MAGIC) {
1068 0 : cylno = cbtocylno(fs, bno);
1069 0 : cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
1070 0 : cg_blktot(cgp)[cylno]--;
1071 0 : }
1072 :
1073 0 : fs->fs_fmod = 1;
1074 0 : blkno = cgbase(fs, cgp->cg_cgx) + bno;
1075 :
1076 0 : if (DOINGSOFTDEP(ITOV(ip)))
1077 0 : softdep_setup_blkmapdep(bp, fs, blkno);
1078 :
1079 0 : return (blkno);
1080 0 : }
1081 :
1082 : /* inode allocation routine */
1083 : daddr_t
1084 0 : ffs_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
1085 : {
1086 : struct fs *fs;
1087 : struct cg *cgp;
1088 : struct buf *bp;
1089 : int start, len, loc, map, i;
1090 : #ifdef FFS2
1091 : struct buf *ibp = NULL;
1092 : struct ufs2_dinode *dp2;
1093 : #endif
1094 :
1095 : /*
1096 : * For efficiency, before looking at the bitmaps for free inodes,
1097 : * check the counters kept in the superblock cylinder group summaries,
1098 : * and in the cylinder group itself.
1099 : */
1100 0 : fs = ip->i_fs;
1101 0 : if (fs->fs_cs(fs, cg).cs_nifree == 0)
1102 0 : return (0);
1103 :
1104 0 : if (!(bp = ffs_cgread(fs, ip, cg)))
1105 0 : return (0);
1106 :
1107 0 : cgp = (struct cg *)bp->b_data;
1108 0 : if (cgp->cg_cs.cs_nifree == 0) {
1109 0 : brelse(bp);
1110 0 : return (0);
1111 : }
1112 :
1113 : /*
1114 : * We are committed to the allocation from now on, so update the time
1115 : * on the cylinder group.
1116 : */
1117 0 : cgp->cg_ffs2_time = cgp->cg_time = time_second;
1118 :
1119 : /*
1120 : * If there was a preferred location for the new inode, try to find it.
1121 : */
1122 0 : if (ipref) {
1123 0 : ipref %= fs->fs_ipg;
1124 0 : if (isclr(cg_inosused(cgp), ipref))
1125 : goto gotit; /* inode is free, grab it. */
1126 : }
1127 :
1128 : /*
1129 : * Otherwise, look for the next available inode, starting at cg_irotor
1130 : * (the position in the bitmap of the last used inode).
1131 : */
1132 0 : start = cgp->cg_irotor / NBBY;
1133 0 : len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1134 0 : loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
1135 0 : if (loc == 0) {
1136 : /*
1137 : * If we didn't find a free inode in the upper part of the
1138 : * bitmap (from cg_irotor to the end), then look at the bottom
1139 : * part (from 0 to cg_irotor).
1140 : */
1141 0 : len = start + 1;
1142 : start = 0;
1143 0 : loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
1144 0 : if (loc == 0) {
1145 : /*
1146 : * If we failed again, then either the bitmap or the
1147 : * counters kept for the cylinder group are wrong.
1148 : */
1149 0 : printf("cg = %d, irotor = %d, fs = %s\n",
1150 0 : cg, cgp->cg_irotor, fs->fs_fsmnt);
1151 0 : panic("ffs_nodealloccg: map corrupted");
1152 : /* NOTREACHED */
1153 : }
1154 : }
1155 :
1156 : /* skpc() returns the position relative to the end */
1157 0 : i = start + len - loc;
1158 :
1159 : /*
1160 : * Okay, so now in 'i' we have the location in the bitmap of a byte
1161 : * holding a free inode. Find the corresponding bit and set it,
1162 : * updating cg_irotor as well, accordingly.
1163 : */
1164 0 : map = cg_inosused(cgp)[i];
1165 0 : ipref = i * NBBY;
1166 0 : for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
1167 0 : if ((map & i) == 0) {
1168 0 : cgp->cg_irotor = ipref;
1169 0 : goto gotit;
1170 : }
1171 : }
1172 :
1173 0 : printf("fs = %s\n", fs->fs_fsmnt);
1174 0 : panic("ffs_nodealloccg: block not in map");
1175 : /* NOTREACHED */
1176 :
1177 : gotit:
1178 :
1179 : #ifdef FFS2
1180 : /*
1181 : * For FFS2, check if all inodes in this cylinder group have been used
1182 : * at least once. If they haven't, and we are allocating an inode past
1183 : * the last allocated block of inodes, read in a block and initialize
1184 : * all inodes in it.
1185 : */
1186 0 : if (fs->fs_magic == FS_UFS2_MAGIC &&
1187 : /* Inode is beyond last initialized block of inodes? */
1188 0 : ipref + INOPB(fs) > cgp->cg_initediblk &&
1189 : /* Has any inode not been used at least once? */
1190 0 : cgp->cg_initediblk < cgp->cg_ffs2_niblk) {
1191 :
1192 0 : ibp = getblk(ip->i_devvp, fsbtodb(fs,
1193 : ino_to_fsba(fs, cg * fs->fs_ipg + cgp->cg_initediblk)),
1194 0 : (int)fs->fs_bsize, 0, 0);
1195 :
1196 0 : memset(ibp->b_data, 0, fs->fs_bsize);
1197 0 : dp2 = (struct ufs2_dinode *)(ibp->b_data);
1198 :
1199 : /* Give each inode a positive generation number */
1200 0 : for (i = 0; i < INOPB(fs); i++) {
1201 0 : dp2->di_gen = (arc4random() & INT32_MAX) / 2 + 1;
1202 0 : dp2++;
1203 : }
1204 :
1205 : /* Update the counter of initialized inodes */
1206 0 : cgp->cg_initediblk += INOPB(fs);
1207 0 : }
1208 : #endif /* FFS2 */
1209 :
1210 0 : if (DOINGSOFTDEP(ITOV(ip)))
1211 0 : softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref);
1212 :
1213 0 : setbit(cg_inosused(cgp), ipref);
1214 :
1215 : /* Update the counters we keep on free inodes */
1216 0 : cgp->cg_cs.cs_nifree--;
1217 0 : fs->fs_cstotal.cs_nifree--;
1218 0 : fs->fs_cs(fs, cg).cs_nifree--;
1219 0 : fs->fs_fmod = 1; /* file system was modified */
1220 :
1221 : /* Update the counters we keep on allocated directories */
1222 0 : if ((mode & IFMT) == IFDIR) {
1223 0 : cgp->cg_cs.cs_ndir++;
1224 0 : fs->fs_cstotal.cs_ndir++;
1225 0 : fs->fs_cs(fs, cg).cs_ndir++;
1226 0 : }
1227 :
1228 0 : bdwrite(bp);
1229 :
1230 : #ifdef FFS2
1231 0 : if (ibp != NULL)
1232 0 : bawrite(ibp);
1233 : #endif
1234 :
1235 : /* Return the allocated inode number */
1236 0 : return (cg * fs->fs_ipg + ipref);
1237 0 : }
1238 :
1239 : /*
1240 : * Free a block or fragment.
1241 : *
1242 : * The specified block or fragment is placed back in the
1243 : * free map. If a fragment is deallocated, a possible
1244 : * block reassembly is checked.
1245 : */
1246 : void
1247 0 : ffs_blkfree(struct inode *ip, daddr_t bno, long size)
1248 : {
1249 : struct fs *fs;
1250 : struct cg *cgp;
1251 : struct buf *bp;
1252 : daddr_t blkno;
1253 : int i, cg, blk, frags, bbase;
1254 :
1255 0 : fs = ip->i_fs;
1256 0 : if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
1257 0 : fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
1258 0 : printf("dev = 0x%x, bsize = %d, size = %ld, fs = %s\n",
1259 0 : ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
1260 0 : panic("ffs_blkfree: bad size");
1261 : }
1262 0 : cg = dtog(fs, bno);
1263 0 : if ((u_int)bno >= fs->fs_size) {
1264 0 : printf("bad block %lld, ino %u\n", (long long)bno,
1265 0 : ip->i_number);
1266 0 : ffs_fserr(fs, DIP(ip, uid), "bad block");
1267 0 : return;
1268 : }
1269 0 : if (!(bp = ffs_cgread(fs, ip, cg)))
1270 0 : return;
1271 :
1272 0 : cgp = (struct cg *)bp->b_data;
1273 0 : cgp->cg_ffs2_time = cgp->cg_time = time_second;
1274 :
1275 0 : bno = dtogd(fs, bno);
1276 0 : if (size == fs->fs_bsize) {
1277 0 : blkno = fragstoblks(fs, bno);
1278 0 : if (!ffs_isfreeblock(fs, cg_blksfree(cgp), blkno)) {
1279 0 : printf("dev = 0x%x, block = %lld, fs = %s\n",
1280 0 : ip->i_dev, (long long)bno, fs->fs_fsmnt);
1281 0 : panic("ffs_blkfree: freeing free block");
1282 : }
1283 0 : ffs_setblock(fs, cg_blksfree(cgp), blkno);
1284 0 : ffs_clusteracct(fs, cgp, blkno, 1);
1285 0 : cgp->cg_cs.cs_nbfree++;
1286 0 : fs->fs_cstotal.cs_nbfree++;
1287 0 : fs->fs_cs(fs, cg).cs_nbfree++;
1288 :
1289 0 : if (fs->fs_magic != FS_UFS2_MAGIC) {
1290 0 : i = cbtocylno(fs, bno);
1291 0 : cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
1292 0 : cg_blktot(cgp)[i]++;
1293 0 : }
1294 :
1295 : } else {
1296 0 : bbase = bno - fragnum(fs, bno);
1297 : /*
1298 : * decrement the counts associated with the old frags
1299 : */
1300 0 : blk = blkmap(fs, cg_blksfree(cgp), bbase);
1301 0 : ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
1302 : /*
1303 : * deallocate the fragment
1304 : */
1305 0 : frags = numfrags(fs, size);
1306 0 : for (i = 0; i < frags; i++) {
1307 0 : if (isset(cg_blksfree(cgp), bno + i)) {
1308 0 : printf("dev = 0x%x, block = %lld, fs = %s\n",
1309 0 : ip->i_dev, (long long)(bno + i),
1310 0 : fs->fs_fsmnt);
1311 0 : panic("ffs_blkfree: freeing free frag");
1312 : }
1313 0 : setbit(cg_blksfree(cgp), bno + i);
1314 : }
1315 0 : cgp->cg_cs.cs_nffree += i;
1316 0 : fs->fs_cstotal.cs_nffree += i;
1317 0 : fs->fs_cs(fs, cg).cs_nffree += i;
1318 : /*
1319 : * add back in counts associated with the new frags
1320 : */
1321 0 : blk = blkmap(fs, cg_blksfree(cgp), bbase);
1322 0 : ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
1323 : /*
1324 : * if a complete block has been reassembled, account for it
1325 : */
1326 0 : blkno = fragstoblks(fs, bbase);
1327 0 : if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) {
1328 0 : cgp->cg_cs.cs_nffree -= fs->fs_frag;
1329 0 : fs->fs_cstotal.cs_nffree -= fs->fs_frag;
1330 0 : fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
1331 0 : ffs_clusteracct(fs, cgp, blkno, 1);
1332 0 : cgp->cg_cs.cs_nbfree++;
1333 0 : fs->fs_cstotal.cs_nbfree++;
1334 0 : fs->fs_cs(fs, cg).cs_nbfree++;
1335 :
1336 0 : if (fs->fs_magic != FS_UFS2_MAGIC) {
1337 0 : i = cbtocylno(fs, bbase);
1338 0 : cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
1339 0 : cg_blktot(cgp)[i]++;
1340 0 : }
1341 : }
1342 : }
1343 0 : fs->fs_fmod = 1;
1344 0 : bdwrite(bp);
1345 0 : }
1346 :
1347 : int
1348 0 : ffs_inode_free(struct inode *pip, ufsino_t ino, mode_t mode)
1349 : {
1350 0 : struct vnode *pvp = ITOV(pip);
1351 :
1352 0 : if (DOINGSOFTDEP(pvp)) {
1353 0 : softdep_freefile(pvp, ino, mode);
1354 0 : return (0);
1355 : }
1356 :
1357 0 : return (ffs_freefile(pip, ino, mode));
1358 0 : }
1359 :
1360 : /*
1361 : * Do the actual free operation.
1362 : * The specified inode is placed back in the free map.
1363 : */
1364 : int
1365 0 : ffs_freefile(struct inode *pip, ufsino_t ino, mode_t mode)
1366 : {
1367 : struct fs *fs;
1368 : struct cg *cgp;
1369 : struct buf *bp;
1370 : int cg;
1371 :
1372 0 : fs = pip->i_fs;
1373 0 : if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
1374 0 : panic("ffs_freefile: range: dev = 0x%x, ino = %d, fs = %s",
1375 0 : pip->i_dev, ino, fs->fs_fsmnt);
1376 :
1377 0 : cg = ino_to_cg(fs, ino);
1378 0 : if (!(bp = ffs_cgread(fs, pip, cg)))
1379 0 : return (0);
1380 :
1381 0 : cgp = (struct cg *)bp->b_data;
1382 0 : cgp->cg_ffs2_time = cgp->cg_time = time_second;
1383 :
1384 0 : ino %= fs->fs_ipg;
1385 0 : if (isclr(cg_inosused(cgp), ino)) {
1386 0 : printf("dev = 0x%x, ino = %u, fs = %s\n",
1387 0 : pip->i_dev, ino, fs->fs_fsmnt);
1388 0 : if (fs->fs_ronly == 0)
1389 0 : panic("ffs_freefile: freeing free inode");
1390 : }
1391 0 : clrbit(cg_inosused(cgp), ino);
1392 0 : if (ino < cgp->cg_irotor)
1393 0 : cgp->cg_irotor = ino;
1394 0 : cgp->cg_cs.cs_nifree++;
1395 0 : fs->fs_cstotal.cs_nifree++;
1396 0 : fs->fs_cs(fs, cg).cs_nifree++;
1397 0 : if ((mode & IFMT) == IFDIR) {
1398 0 : cgp->cg_cs.cs_ndir--;
1399 0 : fs->fs_cstotal.cs_ndir--;
1400 0 : fs->fs_cs(fs, cg).cs_ndir--;
1401 0 : }
1402 0 : fs->fs_fmod = 1;
1403 0 : bdwrite(bp);
1404 0 : return (0);
1405 0 : }
1406 :
1407 :
1408 : /*
1409 : * Find a block of the specified size in the specified cylinder group.
1410 : *
1411 : * It is a panic if a request is made to find a block if none are
1412 : * available.
1413 : */
1414 : daddr_t
1415 0 : ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
1416 : {
1417 : daddr_t bno;
1418 : int start, len, loc, i;
1419 : int blk, field, subfield, pos;
1420 :
1421 : /*
1422 : * find the fragment by searching through the free block
1423 : * map for an appropriate bit pattern
1424 : */
1425 0 : if (bpref)
1426 0 : start = dtogd(fs, bpref) / NBBY;
1427 : else
1428 0 : start = cgp->cg_frotor / NBBY;
1429 0 : len = howmany(fs->fs_fpg, NBBY) - start;
1430 0 : loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start],
1431 0 : (u_char *)fragtbl[fs->fs_frag],
1432 0 : (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1433 0 : if (loc == 0) {
1434 0 : len = start + 1;
1435 : start = 0;
1436 0 : loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0],
1437 0 : (u_char *)fragtbl[fs->fs_frag],
1438 0 : (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1439 0 : if (loc == 0) {
1440 0 : printf("start = %d, len = %d, fs = %s\n",
1441 0 : start, len, fs->fs_fsmnt);
1442 0 : panic("ffs_alloccg: map corrupted");
1443 : /* NOTREACHED */
1444 : }
1445 : }
1446 0 : bno = (start + len - loc) * NBBY;
1447 0 : cgp->cg_frotor = bno;
1448 : /*
1449 : * found the byte in the map
1450 : * sift through the bits to find the selected frag
1451 : */
1452 0 : for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
1453 0 : blk = blkmap(fs, cg_blksfree(cgp), bno);
1454 0 : blk <<= 1;
1455 0 : field = around[allocsiz];
1456 0 : subfield = inside[allocsiz];
1457 0 : for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
1458 0 : if ((blk & field) == subfield)
1459 0 : return (bno + pos);
1460 0 : field <<= 1;
1461 0 : subfield <<= 1;
1462 : }
1463 : }
1464 0 : printf("bno = %lld, fs = %s\n", (long long)bno, fs->fs_fsmnt);
1465 0 : panic("ffs_alloccg: block not in map");
1466 : return (-1);
1467 : }
1468 :
1469 : /*
1470 : * Update the cluster map because of an allocation or free.
1471 : *
1472 : * Cnt == 1 means free; cnt == -1 means allocating.
1473 : */
1474 : void
1475 0 : ffs_clusteracct(struct fs *fs, struct cg *cgp, daddr_t blkno, int cnt)
1476 : {
1477 : int32_t *sump;
1478 : int32_t *lp;
1479 : u_char *freemapp, *mapp;
1480 : int i, start, end, forw, back, map, bit;
1481 :
1482 0 : if (fs->fs_contigsumsize <= 0)
1483 0 : return;
1484 0 : freemapp = cg_clustersfree(cgp);
1485 0 : sump = cg_clustersum(cgp);
1486 : /*
1487 : * Allocate or clear the actual block.
1488 : */
1489 0 : if (cnt > 0)
1490 0 : setbit(freemapp, blkno);
1491 : else
1492 0 : clrbit(freemapp, blkno);
1493 : /*
1494 : * Find the size of the cluster going forward.
1495 : */
1496 0 : start = blkno + 1;
1497 0 : end = start + fs->fs_contigsumsize;
1498 0 : if (end >= cgp->cg_nclusterblks)
1499 0 : end = cgp->cg_nclusterblks;
1500 0 : mapp = &freemapp[start / NBBY];
1501 0 : map = *mapp++;
1502 0 : bit = 1 << (start % NBBY);
1503 0 : for (i = start; i < end; i++) {
1504 0 : if ((map & bit) == 0)
1505 : break;
1506 0 : if ((i & (NBBY - 1)) != (NBBY - 1)) {
1507 0 : bit <<= 1;
1508 0 : } else {
1509 0 : map = *mapp++;
1510 : bit = 1;
1511 : }
1512 : }
1513 0 : forw = i - start;
1514 : /*
1515 : * Find the size of the cluster going backward.
1516 : */
1517 0 : start = blkno - 1;
1518 0 : end = start - fs->fs_contigsumsize;
1519 0 : if (end < 0)
1520 : end = -1;
1521 0 : mapp = &freemapp[start / NBBY];
1522 0 : map = *mapp--;
1523 0 : bit = 1 << (start % NBBY);
1524 0 : for (i = start; i > end; i--) {
1525 0 : if ((map & bit) == 0)
1526 : break;
1527 0 : if ((i & (NBBY - 1)) != 0) {
1528 0 : bit >>= 1;
1529 0 : } else {
1530 0 : map = *mapp--;
1531 : bit = 1 << (NBBY - 1);
1532 : }
1533 : }
1534 0 : back = start - i;
1535 : /*
1536 : * Account for old cluster and the possibly new forward and
1537 : * back clusters.
1538 : */
1539 0 : i = back + forw + 1;
1540 0 : if (i > fs->fs_contigsumsize)
1541 0 : i = fs->fs_contigsumsize;
1542 0 : sump[i] += cnt;
1543 0 : if (back > 0)
1544 0 : sump[back] -= cnt;
1545 0 : if (forw > 0)
1546 0 : sump[forw] -= cnt;
1547 : /*
1548 : * Update cluster summary information.
1549 : */
1550 0 : lp = &sump[fs->fs_contigsumsize];
1551 0 : for (i = fs->fs_contigsumsize; i > 0; i--)
1552 0 : if (*lp-- > 0)
1553 : break;
1554 0 : fs->fs_maxcluster[cgp->cg_cgx] = i;
1555 0 : }
|