LCOV - code coverage report
Current view: top level - sys - buf.h (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 17 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 4 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*      $OpenBSD: buf.h,v 1.107 2018/08/13 15:26:17 visa Exp $  */
       2             : /*      $NetBSD: buf.h,v 1.25 1997/04/09 21:12:17 mycroft Exp $ */
       3             : 
       4             : /*
       5             :  * Copyright (c) 1982, 1986, 1989, 1993
       6             :  *      The Regents of the University of California.  All rights reserved.
       7             :  * (c) UNIX System Laboratories, Inc.
       8             :  * All or some portions of this file are derived from material licensed
       9             :  * to the University of California by American Telephone and Telegraph
      10             :  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
      11             :  * the permission of UNIX System Laboratories, Inc.
      12             :  *
      13             :  * Redistribution and use in source and binary forms, with or without
      14             :  * modification, are permitted provided that the following conditions
      15             :  * are met:
      16             :  * 1. Redistributions of source code must retain the above copyright
      17             :  *    notice, this list of conditions and the following disclaimer.
      18             :  * 2. Redistributions in binary form must reproduce the above copyright
      19             :  *    notice, this list of conditions and the following disclaimer in the
      20             :  *    documentation and/or other materials provided with the distribution.
      21             :  * 3. Neither the name of the University nor the names of its contributors
      22             :  *    may be used to endorse or promote products derived from this software
      23             :  *    without specific prior written permission.
      24             :  *
      25             :  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
      26             :  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
      27             :  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
      28             :  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
      29             :  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
      30             :  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
      31             :  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
      32             :  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
      33             :  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
      34             :  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
      35             :  * SUCH DAMAGE.
      36             :  *
      37             :  *      @(#)buf.h       8.7 (Berkeley) 1/21/94
      38             :  */
      39             : 
      40             : #ifndef _SYS_BUF_H_
      41             : #define _SYS_BUF_H_
      42             : #include <sys/queue.h>
      43             : #include <sys/tree.h>
      44             : #include <sys/mutex.h>
      45             : 
      46             : #define NOLIST ((struct buf *)0x87654321)
      47             : 
      48             : struct buf;
      49             : struct vnode;
      50             : 
      51             : LIST_HEAD(bufhead, buf);
      52             : 
      53             : /*
      54             :  * To avoid including <ufs/ffs/softdep.h>
      55             :  */
      56             : 
      57             : LIST_HEAD(workhead, worklist);
      58             : 
      59             : /*
      60             :  * Buffer queues
      61             :  */
      62             : #define BUFQ_NSCAN_N    128
      63             : #define BUFQ_FIFO       0
      64             : #define BUFQ_NSCAN      1
      65             : #define BUFQ_DEFAULT    BUFQ_NSCAN
      66             : #define BUFQ_HOWMANY    2
      67             : 
      68             : /*
      69             :  * Write limits for bufq - defines high and low water marks for how
      70             :  * many kva slots are allowed to be consumed to parallelize writes from
      71             :  * the buffer cache from any individual bufq.
      72             :  */
      73             : #define BUFQ_HI         128
      74             : #define BUFQ_LOW        64
      75             : 
      76             : struct bufq_impl;
      77             : 
      78             : struct bufq {
      79             :         SLIST_ENTRY(bufq)        bufq_entries;
      80             :         struct mutex             bufq_mtx;
      81             :         void                    *bufq_data;
      82             :         u_int                    bufq_outstanding;
      83             :         u_int                    bufq_hi;
      84             :         u_int                    bufq_low;
      85             :         int                      bufq_waiting;
      86             :         int                      bufq_stop;
      87             :         int                      bufq_type;
      88             :         const struct bufq_impl  *bufq_impl;
      89             : };
      90             : 
      91             : int              bufq_init(struct bufq *, int);
      92             : int              bufq_switch(struct bufq *, int);
      93             : void             bufq_destroy(struct bufq *);
      94             : 
      95             : void             bufq_queue(struct bufq *, struct buf *);
      96             : struct buf      *bufq_dequeue(struct bufq *);
      97             : void             bufq_requeue(struct bufq *, struct buf *);
      98             : int              bufq_peek(struct bufq *);
      99             : void             bufq_drain(struct bufq *);
     100             : 
     101             : void             bufq_wait(struct bufq *);
     102             : void             bufq_done(struct bufq *, struct buf *);
     103             : void             bufq_quiesce(void);
     104             : void             bufq_restart(void);
     105             : 
     106             : /* fifo */
     107             : SIMPLEQ_HEAD(bufq_fifo_head, buf);
     108             : struct bufq_fifo {
     109             :         SIMPLEQ_ENTRY(buf)      bqf_entries;
     110             : };
     111             : 
     112             : /* nscan */
     113             : SIMPLEQ_HEAD(bufq_nscan_head, buf);
     114             : struct bufq_nscan {
     115             :         SIMPLEQ_ENTRY(buf)      bqf_entries;
     116             : };
     117             : 
     118             : /* bufq link in struct buf */
     119             : union bufq_data {
     120             :         struct bufq_fifo        bufq_data_fifo;
     121             :         struct bufq_nscan       bufq_data_nscan;
     122             : };
     123             : 
     124             : /*
     125             :  * These are currently used only by the soft dependency code, hence
     126             :  * are stored once in a global variable. If other subsystems wanted
     127             :  * to use these hooks, a pointer to a set of bio_ops could be added
     128             :  * to each buffer.
     129             :  */
     130             : extern struct bio_ops {
     131             :         void    (*io_start)(struct buf *);
     132             :         void    (*io_complete)(struct buf *);
     133             :         void    (*io_deallocate)(struct buf *);
     134             :         void    (*io_movedeps)(struct buf *, struct buf *);
     135             :         int     (*io_countdeps)(struct buf *, int, int);
     136             : } bioops;
     137             : 
     138             : /* The buffer header describes an I/O operation in the kernel. */
     139             : struct buf {
     140             :         RBT_ENTRY(buf) b_rbbufs;        /* vnode "hash" tree */
     141             :         LIST_ENTRY(buf) b_list;         /* All allocated buffers. */
     142             :         LIST_ENTRY(buf) b_vnbufs;       /* Buffer's associated vnode. */
     143             :         TAILQ_ENTRY(buf) b_freelist;    /* Free list position if not active. */
     144             :         int cache;                      /* which cache are we in */
     145             :         struct  proc *b_proc;           /* Associated proc; NULL if kernel. */
     146             :         volatile long   b_flags;        /* B_* flags. */
     147             :         long    b_bufsize;              /* Allocated buffer size. */
     148             :         long    b_bcount;               /* Valid bytes in buffer. */
     149             :         size_t  b_resid;                /* Remaining I/O. */
     150             :         int     b_error;                /* Errno value. */
     151             :         dev_t   b_dev;                  /* Device associated with buffer. */
     152             :         caddr_t b_data;                 /* associated data */
     153             :         void    *b_saveaddr;            /* Original b_data for physio. */
     154             : 
     155             :         TAILQ_ENTRY(buf) b_valist;      /* LRU of va to reuse. */
     156             : 
     157             :         union   bufq_data b_bufq;
     158             :         struct  bufq      *b_bq;        /* What bufq this buf is on */
     159             : 
     160             :         struct uvm_object *b_pobj;      /* Object containing the pages */
     161             :         off_t   b_poffs;                /* Offset within object */
     162             : 
     163             :         daddr_t b_lblkno;               /* Logical block number. */
     164             :         daddr_t b_blkno;                /* Underlying physical block number. */
     165             :                                         /* Function to call upon completion.
     166             :                                          * Will be called at splbio(). */
     167             :         void    (*b_iodone)(struct buf *);
     168             :         struct  vnode *b_vp;            /* Device vnode. */
     169             :         int     b_dirtyoff;             /* Offset in buffer of dirty region. */
     170             :         int     b_dirtyend;             /* Offset of end of dirty region. */
     171             :         int     b_validoff;             /* Offset in buffer of valid region. */
     172             :         int     b_validend;             /* Offset of end of valid region. */
     173             :         struct  workhead b_dep;         /* List of filesystem dependencies. */
     174             : };
     175             : 
     176             : TAILQ_HEAD(bufqueue, buf);
     177             : 
     178             : struct bufcache {
     179             :         int64_t hotbufpages;
     180             :         int64_t warmbufpages;
     181             :         int64_t cachepages;
     182             :         struct bufqueue hotqueue;
     183             :         struct bufqueue coldqueue;
     184             :         struct bufqueue warmqueue;
     185             : };
     186             : 
     187             : /* Device driver compatibility definitions. */
     188             : #define b_active b_bcount               /* Driver queue head: drive active. */
     189             : 
     190             : /*
     191             :  * These flags are kept in b_flags.
     192             :  */
     193             : #define B_WRITE         0x00000000      /* Write buffer (pseudo flag). */
     194             : #define B_AGE           0x00000001      /* Move to age queue when I/O done. */
     195             : #define B_NEEDCOMMIT    0x00000002      /* Needs committing to stable storage */
     196             : #define B_ASYNC         0x00000004      /* Start I/O, do not wait. */
     197             : #define B_BAD           0x00000008      /* Bad block revectoring in progress. */
     198             : #define B_BUSY          0x00000010      /* I/O in progress. */
     199             : #define B_CACHE         0x00000020      /* Bread found us in the cache. */
     200             : #define B_CALL          0x00000040      /* Call b_iodone from biodone. */
     201             : #define B_DELWRI        0x00000080      /* Delay I/O until buffer reused. */
     202             : #define B_DONE          0x00000100      /* I/O completed. */
     203             : #define B_EINTR         0x00000200      /* I/O was interrupted */
     204             : #define B_ERROR         0x00000400      /* I/O error occurred. */
     205             : #define B_INVAL         0x00000800      /* Does not contain valid info. */
     206             : #define B_NOCACHE       0x00001000      /* Do not cache block after use. */
     207             : #define B_PHYS          0x00002000      /* I/O to user memory. */
     208             : #define B_RAW           0x00004000      /* Set by physio for raw transfers. */
     209             : #define B_READ          0x00008000      /* Read buffer. */
     210             : #define B_WANTED        0x00010000      /* Process wants this buffer. */
     211             : #define B_WRITEINPROG   0x00020000      /* Write in progress. */
     212             : #define B_XXX           0x00040000      /* Debugging flag. */
     213             : #define B_DEFERRED      0x00080000      /* Skipped over for cleaning */
     214             : #define B_SCANNED       0x00100000      /* Block already pushed during sync */
     215             : #define B_PDAEMON       0x00200000      /* I/O started by pagedaemon */
     216             : #define B_RELEASED      0x00400000      /* free this buffer after its kvm */
     217             : #define B_WARM          0x00800000      /* buffer is or has been on the warm queue */
     218             : #define B_COLD          0x01000000      /* buffer is on the cold queue */
     219             : #define B_BC            0x02000000      /* buffer is managed by the cache */
     220             : #define B_DMA           0x04000000      /* buffer is DMA reachable */
     221             : 
     222             : #define B_BITS  "\20\001AGE\002NEEDCOMMIT\003ASYNC\004BAD\005BUSY" \
     223             :     "\006CACHE\007CALL\010DELWRI\011DONE\012EINTR\013ERROR" \
     224             :     "\014INVAL\015NOCACHE\016PHYS\017RAW\020READ" \
     225             :     "\021WANTED\022WRITEINPROG\023XXX(FORMAT)\024DEFERRED" \
     226             :     "\025SCANNED\026DAEMON\027RELEASED\030WARM\031COLD\032BC\033DMA"
     227             : 
     228             : /*
     229             :  * Zero out the buffer's data area.
     230             :  */
     231             : #define clrbuf(bp) {                                                    \
     232             :         bzero((bp)->b_data, (u_int)(bp)->b_bcount);                       \
     233             :         (bp)->b_resid = 0;                                           \
     234             : }
     235             : 
     236             : 
     237             : /* Flags to low-level allocation routines. */
     238             : #define B_CLRBUF        0x01    /* Request allocated buffer be cleared. */
     239             : #define B_SYNC          0x02    /* Do all allocations synchronously. */
     240             : 
     241             : struct cluster_info {
     242             :         daddr_t ci_lastr;       /* last read (read-ahead) */
     243             :         daddr_t ci_lastw;       /* last write (write cluster) */
     244             :         daddr_t ci_cstart;      /* start block of cluster */
     245             :         daddr_t ci_lasta;       /* last allocation */
     246             :         int     ci_clen;        /* length of current cluster */
     247             :         int     ci_ralen;       /* Read-ahead length */
     248             :         daddr_t ci_maxra;       /* last readahead block */
     249             : };
     250             : 
     251             : #ifdef _KERNEL
     252             : __BEGIN_DECLS
     253             : /* Kva slots (of size MAXPHYS) reserved for syncer and cleaner. */
     254             : #define RESERVE_SLOTS 4
     255             : /* Buffer cache pages reserved for syncer and cleaner. */
     256             : #define RESERVE_PAGES (RESERVE_SLOTS * MAXPHYS / PAGE_SIZE)
     257             : /* Minimum size of the buffer cache, in pages. */
     258             : #define BCACHE_MIN (RESERVE_PAGES * 2)
     259             : #define UNCLEAN_PAGES (bcstats.numbufpages - bcstats.numcleanpages)
     260             : 
     261             : extern struct proc *cleanerproc;
     262             : extern long bufpages;           /* Max number of pages for buffers' data */
     263             : extern struct pool bufpool;
     264             : extern struct bufhead bufhead;
     265             : 
     266             : void    bawrite(struct buf *);
     267             : void    bdwrite(struct buf *);
     268             : void    biodone(struct buf *);
     269             : int     biowait(struct buf *);
     270             : int bread(struct vnode *, daddr_t, int, struct buf **);
     271             : int breadn(struct vnode *, daddr_t, int, daddr_t *, int *, int,
     272             :     struct buf **);
     273             : void    brelse(struct buf *);
     274             : #define bremfree bufcache_take
     275             : void    bufinit(void);
     276             : void    buf_dirty(struct buf *);
     277             : void    buf_undirty(struct buf *);
     278             : void    buf_adjcnt(struct buf *, long);
     279             : int     bwrite(struct buf *);
     280             : struct buf *getblk(struct vnode *, daddr_t, int, int, int);
     281             : struct buf *geteblk(size_t);
     282             : struct buf *incore(struct vnode *, daddr_t);
     283             : 
     284             : /*
     285             :  * bufcache functions
     286             :  */
     287             : void bufcache_take(struct buf *);
     288             : void bufcache_release(struct buf *);
     289             : 
     290             : int buf_flip_high(struct buf *);
     291             : void buf_flip_dma(struct buf *);
     292             : struct buf *bufcache_getcleanbuf(int, int);
     293             : struct buf *bufcache_getanycleanbuf(void);
     294             : struct buf *bufcache_getdirtybuf(void);
     295             : 
     296             : /*
     297             :  * buf_kvm_init initializes the kvm handling for buffers.
     298             :  * buf_acquire sets the B_BUSY flag and ensures that the buffer is
     299             :  * mapped in the kvm.
     300             :  * buf_release clears the B_BUSY flag and allows the buffer to become
     301             :  * unmapped.
     302             :  * buf_unmap is for internal use only. Unmaps the buffer from kvm.
     303             :  */
     304             : void    buf_mem_init(vsize_t);
     305             : void    buf_acquire(struct buf *);
     306             : void    buf_acquire_unmapped(struct buf *);
     307             : void    buf_acquire_nomap(struct buf *);
     308             : void    buf_map(struct buf *);
     309             : void    buf_release(struct buf *);
     310             : int     buf_dealloc_mem(struct buf *);
     311             : void    buf_fix_mapping(struct buf *, vsize_t);
     312             : void    buf_alloc_pages(struct buf *, vsize_t);
     313             : void    buf_free_pages(struct buf *);
     314             : 
     315             : void    minphys(struct buf *bp);
     316             : int     physio(void (*strategy)(struct buf *), dev_t dev, int flags,
     317             :             void (*minphys)(struct buf *), struct uio *uio);
     318             : void  brelvp(struct buf *);
     319             : void  reassignbuf(struct buf *);
     320             : void  bgetvp(struct vnode *, struct buf *);
     321             : 
     322             : void  buf_replacevnode(struct buf *, struct vnode *);
     323             : void  buf_daemon(void *);
     324             : void  buf_replacevnode(struct buf *, struct vnode *);
     325             : int bread_cluster(struct vnode *, daddr_t, int, struct buf **);
     326             : 
     327             : #ifdef DEBUG
     328             : void buf_print(struct buf *);
     329             : #endif
     330             : 
     331             : static __inline void
     332           0 : buf_start(struct buf *bp)
     333             : {
     334           0 :         if (bioops.io_start)
     335           0 :                 (*bioops.io_start)(bp);
     336           0 : }
     337             : 
     338             : static __inline void
     339           0 : buf_complete(struct buf *bp)
     340             : {
     341           0 :         if (bioops.io_complete)
     342           0 :                 (*bioops.io_complete)(bp);
     343           0 : }
     344             : 
     345             : static __inline void
     346           0 : buf_deallocate(struct buf *bp)
     347             : {
     348           0 :         if (bioops.io_deallocate)
     349           0 :                 (*bioops.io_deallocate)(bp);
     350           0 : }
     351             : 
     352             : static __inline void
     353             : buf_movedeps(struct buf *bp, struct buf *bp2)
     354             : {
     355             :         if (bioops.io_movedeps)
     356             :                 (*bioops.io_movedeps)(bp, bp2);
     357             : }
     358             : 
     359             : static __inline int
     360           0 : buf_countdeps(struct buf *bp, int i, int islocked)
     361             : {
     362           0 :         if (bioops.io_countdeps)
     363           0 :                 return ((*bioops.io_countdeps)(bp, i, islocked));
     364             :         else
     365           0 :                 return (0);
     366           0 : }
     367             : 
     368             : void    cluster_write(struct buf *, struct cluster_info *, u_quad_t);
     369             : 
     370             : __END_DECLS
     371             : #endif /* _KERNEL */
     372             : #endif /* !_SYS_BUF_H_ */

Generated by: LCOV version 1.13