LCOV - code coverage report
Current view: top level - dev/pci - if_ixgb.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 869 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 40 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /**************************************************************************
       2             : 
       3             : Copyright (c) 2001-2005, Intel Corporation
       4             : All rights reserved.
       5             : 
       6             : Redistribution and use in source and binary forms, with or without
       7             : modification, are permitted provided that the following conditions are met:
       8             : 
       9             :  1. Redistributions of source code must retain the above copyright notice,
      10             :     this list of conditions and the following disclaimer.
      11             : 
      12             :  2. Redistributions in binary form must reproduce the above copyright
      13             :     notice, this list of conditions and the following disclaimer in the
      14             :     documentation and/or other materials provided with the distribution.
      15             : 
      16             :  3. Neither the name of the Intel Corporation nor the names of its
      17             :     contributors may be used to endorse or promote products derived from
      18             :     this software without specific prior written permission.
      19             : 
      20             : THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
      21             : AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
      22             : IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
      23             : ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
      24             : LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
      25             : CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
      26             : SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
      27             : INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
      28             : CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
      29             : ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
      30             : POSSIBILITY OF SUCH DAMAGE.
      31             : 
      32             : ***************************************************************************/
      33             : 
      34             : /* $OpenBSD: if_ixgb.c,v 1.71 2017/01/22 10:17:38 dlg Exp $ */
      35             : 
      36             : #include <dev/pci/if_ixgb.h>
      37             : 
      38             : #ifdef IXGB_DEBUG
      39             : /*********************************************************************
      40             :  *  Set this to one to display debug statistics
      41             :  *********************************************************************/
      42             : int             ixgb_display_debug_stats = 0;
      43             : #endif
      44             : 
      45             : /*********************************************************************
      46             :  *  Driver version
      47             :  *********************************************************************/
      48             : 
      49             : #define IXGB_DRIVER_VERSION     "6.1.0"
      50             : 
      51             : /*********************************************************************
      52             :  *  PCI Device ID Table
      53             :  *********************************************************************/
      54             : 
      55             : const struct pci_matchid ixgb_devices[] = {
      56             :         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
      57             :         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
      58             :         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
      59             :         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
      60             : };
      61             : 
      62             : /*********************************************************************
      63             :  *  Function prototypes
      64             :  *********************************************************************/
      65             : int  ixgb_probe(struct device *, void *, void *);
      66             : void ixgb_attach(struct device *, struct device *, void *);
      67             : int  ixgb_intr(void *);
      68             : void ixgb_start(struct ifnet *);
      69             : int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
      70             : void ixgb_watchdog(struct ifnet *);
      71             : void ixgb_init(void *);
      72             : void ixgb_stop(void *);
      73             : void ixgb_media_status(struct ifnet *, struct ifmediareq *);
      74             : int  ixgb_media_change(struct ifnet *);
      75             : void ixgb_identify_hardware(struct ixgb_softc *);
      76             : int  ixgb_allocate_pci_resources(struct ixgb_softc *);
      77             : void ixgb_free_pci_resources(struct ixgb_softc *);
      78             : void ixgb_local_timer(void *);
      79             : int  ixgb_hardware_init(struct ixgb_softc *);
      80             : void ixgb_setup_interface(struct ixgb_softc *);
      81             : int  ixgb_setup_transmit_structures(struct ixgb_softc *);
      82             : void ixgb_initialize_transmit_unit(struct ixgb_softc *);
      83             : int  ixgb_setup_receive_structures(struct ixgb_softc *);
      84             : void ixgb_initialize_receive_unit(struct ixgb_softc *);
      85             : void ixgb_enable_intr(struct ixgb_softc *);
      86             : void ixgb_disable_intr(struct ixgb_softc *);
      87             : void ixgb_free_transmit_structures(struct ixgb_softc *);
      88             : void ixgb_free_receive_structures(struct ixgb_softc *);
      89             : void ixgb_update_stats_counters(struct ixgb_softc *);
      90             : void ixgb_txeof(struct ixgb_softc *);
      91             : int  ixgb_allocate_receive_structures(struct ixgb_softc *);
      92             : int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
      93             : void ixgb_rxeof(struct ixgb_softc *, int);
      94             : void
      95             : ixgb_receive_checksum(struct ixgb_softc *,
      96             :                       struct ixgb_rx_desc * rx_desc,
      97             :                       struct mbuf *);
      98             : void
      99             : ixgb_transmit_checksum_setup(struct ixgb_softc *,
     100             :                              struct mbuf *,
     101             :                              u_int8_t *);
     102             : void ixgb_set_promisc(struct ixgb_softc *);
     103             : void ixgb_set_multi(struct ixgb_softc *);
     104             : #ifdef IXGB_DEBUG
     105             : void ixgb_print_hw_stats(struct ixgb_softc *);
     106             : #endif
     107             : void ixgb_update_link_status(struct ixgb_softc *);
     108             : int
     109             : ixgb_get_buf(struct ixgb_softc *, int i,
     110             :              struct mbuf *);
     111             : void ixgb_enable_hw_vlans(struct ixgb_softc *);
     112             : int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
     113             : int
     114             : ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
     115             :                 struct ixgb_dma_alloc *, int);
     116             : void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
     117             : 
     118             : /*********************************************************************
     119             :  *  OpenBSD Device Interface Entry Points
     120             :  *********************************************************************/
     121             : 
     122             : struct cfattach ixgb_ca = {
     123             :         sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
     124             : };
     125             : 
     126             : struct cfdriver ixgb_cd = {
     127             :         NULL, "ixgb", DV_IFNET
     128             : };
     129             : 
     130             : /* some defines for controlling descriptor fetches in h/w */
     131             : #define RXDCTL_PTHRESH_DEFAULT 0        /* chip considers prefech below this */
     132             : #define RXDCTL_HTHRESH_DEFAULT 0        /* chip will only prefetch if tail is
     133             :                                          * pushed this many descriptors from
     134             :                                          * head */
     135             : #define RXDCTL_WTHRESH_DEFAULT 0        /* chip writes back at this many or RXT0 */
     136             : 
     137             : 
     138             : /*********************************************************************
     139             :  *  Device identification routine
     140             :  *
     141             :  *  ixgb_probe determines if the driver should be loaded on
     142             :  *  adapter based on PCI vendor/device id of the adapter.
     143             :  *
     144             :  *  return 0 on no match, positive on match
     145             :  *********************************************************************/
     146             : 
     147             : int
     148           0 : ixgb_probe(struct device *parent, void *match, void *aux)
     149             : {
     150             :         INIT_DEBUGOUT("ixgb_probe: begin");
     151             : 
     152           0 :         return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
     153             :             nitems(ixgb_devices)));
     154             : }
     155             : 
     156             : /*********************************************************************
     157             :  *  Device initialization routine
     158             :  *
     159             :  *  The attach entry point is called when the driver is being loaded.
     160             :  *  This routine identifies the type of hardware, allocates all resources
     161             :  *  and initializes the hardware.
     162             :  *
     163             :  *********************************************************************/
     164             : 
     165             : void
     166           0 : ixgb_attach(struct device *parent, struct device *self, void *aux)
     167             : {
     168           0 :         struct pci_attach_args *pa = aux;
     169             :         struct ixgb_softc *sc;
     170             :         int             tsize, rsize;
     171             : 
     172             :         INIT_DEBUGOUT("ixgb_attach: begin");
     173             : 
     174           0 :         sc = (struct ixgb_softc *)self;
     175           0 :         sc->osdep.ixgb_pa = *pa;
     176             : 
     177           0 :         timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
     178             : 
     179             :         /* Determine hardware revision */
     180           0 :         ixgb_identify_hardware(sc);
     181             : 
     182             :         /* Parameters (to be read from user) */
     183           0 :         sc->num_tx_desc = IXGB_MAX_TXD;
     184           0 :         sc->num_rx_desc = IXGB_MAX_RXD;
     185           0 :         sc->tx_int_delay = TIDV;
     186           0 :         sc->rx_int_delay = RDTR;
     187           0 :         sc->rx_buffer_len = IXGB_RXBUFFER_2048;
     188             : 
     189             :         /*
     190             :          * These parameters control the automatic generation(Tx) and
     191             :          * response(Rx) to Ethernet PAUSE frames.
     192             :          */
     193           0 :         sc->hw.fc.high_water = FCRTH;
     194           0 :         sc->hw.fc.low_water = FCRTL;
     195           0 :         sc->hw.fc.pause_time = FCPAUSE;
     196           0 :         sc->hw.fc.send_xon = TRUE;
     197           0 :         sc->hw.fc.type = FLOW_CONTROL;
     198             : 
     199             :         /* Set the max frame size assuming standard ethernet sized frames */
     200           0 :         sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
     201             : 
     202           0 :         if (ixgb_allocate_pci_resources(sc))
     203             :                 goto err_pci;
     204             : 
     205           0 :         tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
     206             :             IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
     207           0 :         tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
     208             : 
     209             :         /* Allocate Transmit Descriptor ring */
     210           0 :         if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
     211           0 :                 printf("%s: Unable to allocate TxDescriptor memory\n",
     212           0 :                        sc->sc_dv.dv_xname);
     213           0 :                 goto err_tx_desc;
     214             :         }
     215           0 :         sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
     216             : 
     217           0 :         rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
     218             :             IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
     219           0 :         rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
     220             : 
     221             :         /* Allocate Receive Descriptor ring */
     222           0 :         if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
     223           0 :                 printf("%s: Unable to allocate rx_desc memory\n",
     224           0 :                        sc->sc_dv.dv_xname);
     225           0 :                 goto err_rx_desc;
     226             :         }
     227           0 :         sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
     228             : 
     229             :         /* Initialize the hardware */
     230           0 :         if (ixgb_hardware_init(sc)) {
     231           0 :                 printf("%s: Unable to initialize the hardware\n",
     232           0 :                        sc->sc_dv.dv_xname);
     233             :                 goto err_hw_init;
     234             :         }
     235             : 
     236             :         /* Setup OS specific network interface */
     237           0 :         ixgb_setup_interface(sc);
     238             : 
     239             :         /* Initialize statistics */
     240           0 :         ixgb_clear_hw_cntrs(&sc->hw);
     241           0 :         ixgb_update_stats_counters(sc);
     242           0 :         ixgb_update_link_status(sc);
     243             : 
     244           0 :         printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
     245             : 
     246             :         INIT_DEBUGOUT("ixgb_attach: end");
     247           0 :         return;
     248             : 
     249             : err_hw_init:
     250           0 :         ixgb_dma_free(sc, &sc->rxdma);
     251             : err_rx_desc:
     252           0 :         ixgb_dma_free(sc, &sc->txdma);
     253             : err_tx_desc:
     254             : err_pci:
     255           0 :         ixgb_free_pci_resources(sc);
     256           0 : }
     257             : 
     258             : /*********************************************************************
     259             :  *  Transmit entry point
     260             :  *
     261             :  *  ixgb_start is called by the stack to initiate a transmit.
     262             :  *  The driver will remain in this routine as long as there are
     263             :  *  packets to transmit and transmit resources are available.
     264             :  *  In case resources are not available stack is notified and
     265             :  *  the packet is requeued.
     266             :  **********************************************************************/
     267             : 
     268             : void
     269           0 : ixgb_start(struct ifnet *ifp)
     270             : {
     271             :         struct mbuf    *m_head;
     272           0 :         struct ixgb_softc *sc = ifp->if_softc;
     273             :         int             post = 0;
     274             : 
     275           0 :         if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
     276           0 :                 return;
     277             : 
     278           0 :         if (!sc->link_active)
     279           0 :                 return;
     280             : 
     281           0 :         bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
     282             :             sc->txdma.dma_map->dm_mapsize,
     283             :             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
     284             : 
     285           0 :         for (;;) {
     286           0 :                 m_head = ifq_deq_begin(&ifp->if_snd);
     287           0 :                 if (m_head == NULL)
     288             :                         break;
     289             : 
     290           0 :                 if (ixgb_encap(sc, m_head)) {
     291           0 :                         ifq_deq_rollback(&ifp->if_snd, m_head);
     292           0 :                         ifq_set_oactive(&ifp->if_snd);
     293           0 :                         break;
     294             :                 }
     295             : 
     296           0 :                 ifq_deq_commit(&ifp->if_snd, m_head);
     297             : 
     298             : #if NBPFILTER > 0
     299             :                 /* Send a copy of the frame to the BPF listener */
     300           0 :                 if (ifp->if_bpf)
     301           0 :                         bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
     302             : #endif
     303             : 
     304             :                 /* Set timeout in case hardware has problems transmitting */
     305           0 :                 ifp->if_timer = IXGB_TX_TIMEOUT;
     306             : 
     307             :                 post = 1;
     308             :         }
     309             : 
     310           0 :         bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
     311             :             sc->txdma.dma_map->dm_mapsize,
     312             :             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
     313             :         /*
     314             :          * Advance the Transmit Descriptor Tail (Tdt),
     315             :          * this tells the E1000 that this frame
     316             :          * is available to transmit.
     317             :          */
     318           0 :         if (post)
     319           0 :                 IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
     320           0 : }
     321             : 
     322             : /*********************************************************************
     323             :  *  Ioctl entry point
     324             :  *
     325             :  *  ixgb_ioctl is called when the user wants to configure the
     326             :  *  interface.
     327             :  *
     328             :  *  return 0 on success, positive on failure
     329             :  **********************************************************************/
     330             : 
     331             : int
     332           0 : ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
     333             : {
     334           0 :         struct ixgb_softc *sc = ifp->if_softc;
     335           0 :         struct ifreq    *ifr = (struct ifreq *) data;
     336             :         int             s, error = 0;
     337             : 
     338           0 :         s = splnet();
     339             : 
     340           0 :         switch (command) {
     341             :         case SIOCSIFADDR:
     342             :                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
     343             :                                "Addr)");
     344           0 :                 ifp->if_flags |= IFF_UP;
     345           0 :                 if (!(ifp->if_flags & IFF_RUNNING))
     346           0 :                         ixgb_init(sc);
     347             :                 break;
     348             : 
     349             :         case SIOCSIFFLAGS:
     350             :                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
     351           0 :                 if (ifp->if_flags & IFF_UP) {
     352             :                         /*
     353             :                          * If only the PROMISC or ALLMULTI flag changes, then
     354             :                          * don't do a full re-init of the chip, just update
     355             :                          * the Rx filter.
     356             :                          */
     357           0 :                         if ((ifp->if_flags & IFF_RUNNING) &&
     358           0 :                             ((ifp->if_flags ^ sc->if_flags) &
     359           0 :                              (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
     360           0 :                                 ixgb_set_promisc(sc);
     361           0 :                         } else {
     362           0 :                                 if (!(ifp->if_flags & IFF_RUNNING))
     363           0 :                                         ixgb_init(sc);
     364             :                         }
     365             :                 } else {
     366           0 :                         if (ifp->if_flags & IFF_RUNNING)
     367           0 :                                 ixgb_stop(sc);
     368             :                 }
     369           0 :                 sc->if_flags = ifp->if_flags;
     370           0 :                 break;
     371             : 
     372             :         case SIOCSIFMEDIA:
     373             :         case SIOCGIFMEDIA:
     374             :                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
     375           0 :                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
     376           0 :                 break;
     377             : 
     378             :         default:
     379           0 :                 error = ether_ioctl(ifp, &sc->interface_data, command, data);
     380           0 :         }
     381             : 
     382           0 :         if (error == ENETRESET) {
     383           0 :                 if (ifp->if_flags & IFF_RUNNING) {
     384           0 :                         ixgb_disable_intr(sc);
     385           0 :                         ixgb_set_multi(sc);
     386           0 :                         ixgb_enable_intr(sc);
     387           0 :                 }
     388             :                 error = 0;
     389           0 :         }
     390             : 
     391           0 :         splx(s);
     392           0 :         return (error);
     393             : }
     394             : 
     395             : /*********************************************************************
     396             :  *  Watchdog entry point
     397             :  *
     398             :  *  This routine is called whenever hardware quits transmitting.
     399             :  *
     400             :  **********************************************************************/
     401             : 
     402             : void
     403           0 : ixgb_watchdog(struct ifnet * ifp)
     404             : {
     405           0 :         struct ixgb_softc *sc = ifp->if_softc;
     406             : 
     407             :         /*
     408             :          * If we are in this routine because of pause frames, then don't
     409             :          * reset the hardware.
     410             :          */
     411           0 :         if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
     412           0 :                 ifp->if_timer = IXGB_TX_TIMEOUT;
     413           0 :                 return;
     414             :         }
     415             : 
     416           0 :         printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
     417             : 
     418           0 :         ixgb_init(sc);
     419             : 
     420           0 :         sc->watchdog_events++;
     421           0 : }
     422             : 
     423             : /*********************************************************************
     424             :  *  Init entry point
     425             :  *
     426             :  *  This routine is used in two ways. It is used by the stack as
     427             :  *  init entry point in network interface structure. It is also used
     428             :  *  by the driver as a hw/sw initialization routine to get to a
     429             :  *  consistent state.
     430             :  *
     431             :  **********************************************************************/
     432             : 
     433             : void
     434           0 : ixgb_init(void *arg)
     435             : {
     436           0 :         struct ixgb_softc *sc = arg;
     437           0 :         struct ifnet   *ifp = &sc->interface_data.ac_if;
     438             :         uint32_t temp_reg;
     439             :         int s;
     440             : 
     441             :         INIT_DEBUGOUT("ixgb_init: begin");
     442             : 
     443           0 :         s = splnet();
     444             : 
     445           0 :         ixgb_stop(sc);
     446             : 
     447             :         /* Get the latest mac address, User can use a LAA */
     448           0 :         bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
     449             :               IXGB_ETH_LENGTH_OF_ADDRESS);
     450             : 
     451             :         /* Initialize the hardware */
     452           0 :         if (ixgb_hardware_init(sc)) {
     453           0 :                 printf("%s: Unable to initialize the hardware\n",
     454           0 :                        sc->sc_dv.dv_xname);
     455           0 :                 splx(s);
     456           0 :                 return;
     457             :         }
     458             : 
     459           0 :         if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
     460           0 :                 ixgb_enable_hw_vlans(sc);
     461             : 
     462             :         /* Prepare transmit descriptors and buffers */
     463           0 :         if (ixgb_setup_transmit_structures(sc)) {
     464           0 :                 printf("%s: Could not setup transmit structures\n",
     465           0 :                        sc->sc_dv.dv_xname);
     466           0 :                 ixgb_stop(sc);
     467           0 :                 splx(s);
     468           0 :                 return;
     469             :         }
     470           0 :         ixgb_initialize_transmit_unit(sc);
     471             : 
     472             :         /* Setup Multicast table */
     473           0 :         ixgb_set_multi(sc);
     474             : 
     475             :         /* Prepare receive descriptors and buffers */
     476           0 :         if (ixgb_setup_receive_structures(sc)) {
     477           0 :                 printf("%s: Could not setup receive structures\n",
     478           0 :                        sc->sc_dv.dv_xname);
     479           0 :                 ixgb_stop(sc);
     480           0 :                 splx(s);
     481           0 :                 return;
     482             :         }
     483           0 :         ixgb_initialize_receive_unit(sc);
     484             : 
     485             :         /* Don't lose promiscuous settings */
     486           0 :         ixgb_set_promisc(sc);
     487             : 
     488           0 :         ifp->if_flags |= IFF_RUNNING;
     489           0 :         ifq_clr_oactive(&ifp->if_snd);
     490             : 
     491             :         /* Enable jumbo frames */
     492           0 :         IXGB_WRITE_REG(&sc->hw, MFRMS,
     493             :             sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
     494           0 :         temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
     495           0 :         temp_reg |= IXGB_CTRL0_JFE;
     496           0 :         IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
     497             : 
     498           0 :         timeout_add_sec(&sc->timer_handle, 1);
     499           0 :         ixgb_clear_hw_cntrs(&sc->hw);
     500           0 :         ixgb_enable_intr(sc);
     501             : 
     502           0 :         splx(s);
     503           0 : }
     504             : 
     505             : /*********************************************************************
     506             :  *
     507             :  *  Interrupt Service routine
     508             :  *
     509             :  **********************************************************************/
     510             : 
     511             : int
     512           0 : ixgb_intr(void *arg)
     513             : {
     514           0 :         struct ixgb_softc *sc = arg;
     515             :         struct ifnet    *ifp;
     516             :         u_int32_t       reg_icr;
     517             :         boolean_t       rxdmt0 = FALSE;
     518             :         int claimed = 0;
     519             : 
     520           0 :         ifp = &sc->interface_data.ac_if;
     521             : 
     522           0 :         for (;;) {
     523           0 :                 reg_icr = IXGB_READ_REG(&sc->hw, ICR);
     524           0 :                 if (reg_icr == 0)
     525             :                         break;
     526             : 
     527             :                 claimed = 1;
     528             : 
     529           0 :                 if (reg_icr & IXGB_INT_RXDMT0)
     530           0 :                         rxdmt0 = TRUE;
     531             : 
     532           0 :                 if (ifp->if_flags & IFF_RUNNING) {
     533           0 :                         ixgb_rxeof(sc, -1);
     534           0 :                         ixgb_txeof(sc);
     535           0 :                 }
     536             : 
     537             :                 /* Link status change */
     538           0 :                 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
     539           0 :                         timeout_del(&sc->timer_handle);
     540           0 :                         ixgb_check_for_link(&sc->hw);
     541           0 :                         ixgb_update_link_status(sc);
     542           0 :                         timeout_add_sec(&sc->timer_handle, 1);
     543           0 :                 }
     544             : 
     545           0 :                 if (rxdmt0 && sc->raidc) {
     546           0 :                         IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
     547           0 :                         IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
     548           0 :                 }
     549             :         }
     550             : 
     551           0 :         if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
     552           0 :                 ixgb_start(ifp);
     553             : 
     554           0 :         return (claimed);
     555             : }
     556             : 
     557             : 
     558             : /*********************************************************************
     559             :  *
     560             :  *  Media Ioctl callback
     561             :  *
     562             :  *  This routine is called whenever the user queries the status of
     563             :  *  the interface using ifconfig.
     564             :  *
     565             :  **********************************************************************/
     566             : void
     567           0 : ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
     568             : {
     569           0 :         struct ixgb_softc *sc = ifp->if_softc;
     570             : 
     571             :         INIT_DEBUGOUT("ixgb_media_status: begin");
     572             : 
     573           0 :         ixgb_check_for_link(&sc->hw);
     574           0 :         ixgb_update_link_status(sc);
     575             : 
     576           0 :         ifmr->ifm_status = IFM_AVALID;
     577           0 :         ifmr->ifm_active = IFM_ETHER;
     578             : 
     579           0 :         if (!sc->hw.link_up) {
     580           0 :                 ifmr->ifm_active |= IFM_NONE;
     581           0 :                 return;
     582             :         }
     583             : 
     584           0 :         ifmr->ifm_status |= IFM_ACTIVE;
     585           0 :         if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
     586           0 :             (sc->hw.phy_type == ixgb_phy_type_txn17401))
     587           0 :                 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
     588             :         else
     589           0 :                 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
     590             : 
     591           0 :         return;
     592           0 : }
     593             : 
     594             : /*********************************************************************
     595             :  *
     596             :  *  Media Ioctl callback
     597             :  *
     598             :  *  This routine is called when the user changes speed/duplex using
     599             :  *  media/mediopt option with ifconfig.
     600             :  *
     601             :  **********************************************************************/
     602             : int
     603           0 : ixgb_media_change(struct ifnet * ifp)
     604             : {
     605           0 :         struct ixgb_softc *sc = ifp->if_softc;
     606           0 :         struct ifmedia *ifm = &sc->media;
     607             : 
     608             :         INIT_DEBUGOUT("ixgb_media_change: begin");
     609             : 
     610           0 :         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
     611           0 :                 return (EINVAL);
     612             : 
     613           0 :         return (0);
     614           0 : }
     615             : 
     616             : /*********************************************************************
     617             :  *
     618             :  *  This routine maps the mbufs to tx descriptors.
     619             :  *
     620             :  *  return 0 on success, positive on failure
     621             :  **********************************************************************/
     622             : 
     623             : int
     624           0 : ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
     625             : {
     626             :         u_int8_t        txd_popts;
     627             :         int             i, j, error = 0;
     628             :         bus_dmamap_t    map;
     629             : 
     630             :         struct ixgb_buffer *tx_buffer;
     631             :         struct ixgb_tx_desc *current_tx_desc = NULL;
     632             : 
     633             :         /*
     634             :          * Force a cleanup if number of TX descriptors available hits the
     635             :          * threshold
     636             :          */
     637           0 :         if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
     638           0 :                 ixgb_txeof(sc);
     639             :                 /* Now do we at least have a minimal? */
     640           0 :                 if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
     641           0 :                         sc->no_tx_desc_avail1++;
     642           0 :                         return (ENOBUFS);
     643             :                 }
     644             :         }
     645             : 
     646             :         /*
     647             :          * Map the packet for DMA.
     648             :          */
     649           0 :         tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
     650           0 :         map = tx_buffer->map;
     651             : 
     652           0 :         error = bus_dmamap_load_mbuf(sc->txtag, map,
     653             :                                      m_head, BUS_DMA_NOWAIT);
     654           0 :         if (error != 0) {
     655           0 :                 sc->no_tx_dma_setup++;
     656           0 :                 return (error);
     657             :         }
     658             :         IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
     659             : 
     660           0 :         if (map->dm_nsegs > sc->num_tx_desc_avail)
     661             :                 goto fail;
     662             : 
     663             : #ifdef IXGB_CSUM_OFFLOAD
     664             :         ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
     665             : #else
     666             :         txd_popts = 0;
     667             : #endif
     668             : 
     669           0 :         i = sc->next_avail_tx_desc;
     670           0 :         for (j = 0; j < map->dm_nsegs; j++) {
     671           0 :                 tx_buffer = &sc->tx_buffer_area[i];
     672           0 :                 current_tx_desc = &sc->tx_desc_base[i];
     673             : 
     674           0 :                 current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
     675           0 :                 current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
     676           0 :                 current_tx_desc->popts = txd_popts;
     677           0 :                 if (++i == sc->num_tx_desc)
     678             :                         i = 0;
     679             : 
     680           0 :                 tx_buffer->m_head = NULL;
     681             :         }
     682             : 
     683           0 :         sc->num_tx_desc_avail -= map->dm_nsegs;
     684           0 :         sc->next_avail_tx_desc = i;
     685             : 
     686             :         /* Find out if we are in VLAN mode */
     687           0 :         if (m_head->m_flags & M_VLANTAG) {
     688             :                 /* Set the VLAN id */
     689           0 :                 current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag);
     690             : 
     691             :                 /* Tell hardware to add tag */
     692           0 :                 current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE);
     693           0 :         }
     694             : 
     695           0 :         tx_buffer->m_head = m_head;
     696           0 :         bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
     697             :             BUS_DMASYNC_PREWRITE);
     698             : 
     699             :         /*
     700             :          * Last Descriptor of Packet needs End Of Packet (EOP)
     701             :          */
     702           0 :         current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
     703             : 
     704           0 :         return (0);
     705             : 
     706             : fail:
     707           0 :         sc->no_tx_desc_avail2++;
     708           0 :         bus_dmamap_unload(sc->txtag, map);
     709           0 :         return (ENOBUFS);
     710           0 : }
     711             : 
     712             : void
     713           0 : ixgb_set_promisc(struct ixgb_softc *sc)
     714             : {
     715             : 
     716             :         u_int32_t       reg_rctl;
     717           0 :         struct ifnet   *ifp = &sc->interface_data.ac_if;
     718             : 
     719           0 :         reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
     720             : 
     721           0 :         if (ifp->if_flags & IFF_PROMISC) {
     722           0 :                 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
     723           0 :         } else if (ifp->if_flags & IFF_ALLMULTI) {
     724           0 :                 reg_rctl |= IXGB_RCTL_MPE;
     725           0 :                 reg_rctl &= ~IXGB_RCTL_UPE;
     726           0 :         } else {
     727           0 :                 reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
     728             :         }
     729           0 :         IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
     730           0 : }
     731             : 
     732             : /*********************************************************************
     733             :  *  Multicast Update
     734             :  *
     735             :  *  This routine is called whenever multicast address list is updated.
     736             :  *
     737             :  **********************************************************************/
     738             : 
     739             : void
     740           0 : ixgb_set_multi(struct ixgb_softc *sc)
     741             : {
     742             :         u_int32_t       reg_rctl = 0;
     743           0 :         u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
     744             :         int             mcnt = 0;
     745           0 :         struct ifnet   *ifp = &sc->interface_data.ac_if;
     746             :         struct arpcom *ac = &sc->interface_data;
     747             :         struct ether_multi *enm;
     748             :         struct ether_multistep step;
     749             : 
     750             :         IOCTL_DEBUGOUT("ixgb_set_multi: begin");
     751             : 
     752           0 :         if (ac->ac_multirangecnt > 0) {
     753           0 :                 ifp->if_flags |= IFF_ALLMULTI;
     754             :                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
     755           0 :                 goto setit;
     756             :         }
     757             : 
     758           0 :         ETHER_FIRST_MULTI(step, ac, enm);
     759           0 :         while (enm != NULL) {
     760           0 :                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
     761             :                         break;
     762           0 :                 bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
     763             :                       IXGB_ETH_LENGTH_OF_ADDRESS);
     764           0 :                 mcnt++;
     765           0 :                 ETHER_NEXT_MULTI(step, enm);
     766             :         }
     767             : 
     768             : setit:
     769           0 :         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
     770           0 :                 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
     771           0 :                 reg_rctl |= IXGB_RCTL_MPE;
     772           0 :                 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
     773           0 :         } else
     774           0 :                 ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
     775           0 : }
     776             : 
     777             : 
     778             : /*********************************************************************
     779             :  *  Timer routine
     780             :  *
     781             :  *  This routine checks for link status and updates statistics.
     782             :  *
     783             :  **********************************************************************/
     784             : 
     785             : void
     786           0 : ixgb_local_timer(void *arg)
     787             : {
     788             :         struct ifnet   *ifp;
     789           0 :         struct ixgb_softc *sc = arg;
     790             :         int s;
     791             : 
     792           0 :         ifp = &sc->interface_data.ac_if;
     793             : 
     794           0 :         s = splnet();
     795             : 
     796           0 :         ixgb_check_for_link(&sc->hw);
     797           0 :         ixgb_update_link_status(sc);
     798           0 :         ixgb_update_stats_counters(sc);
     799             : #ifdef IXGB_DEBUG
     800             :         if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
     801             :                 ixgb_print_hw_stats(sc);
     802             : #endif
     803             : 
     804           0 :         timeout_add_sec(&sc->timer_handle, 1);
     805             : 
     806           0 :         splx(s);
     807           0 : }
     808             : 
     809             : void
     810           0 : ixgb_update_link_status(struct ixgb_softc *sc)
     811             : {
     812           0 :         struct ifnet *ifp = &sc->interface_data.ac_if;
     813             : 
     814           0 :         if (sc->hw.link_up) {
     815           0 :                 if (!sc->link_active) {
     816           0 :                         ifp->if_baudrate = IF_Gbps(10);
     817           0 :                         sc->link_active = 1;
     818           0 :                         ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
     819           0 :                         if_link_state_change(ifp);
     820           0 :                 }
     821             :         } else {
     822           0 :                 if (sc->link_active) {
     823           0 :                         ifp->if_baudrate = 0;
     824           0 :                         sc->link_active = 0;
     825           0 :                         ifp->if_link_state = LINK_STATE_DOWN;
     826           0 :                         if_link_state_change(ifp);
     827           0 :                 }
     828             :         }
     829           0 : }
     830             : 
     831             : /*********************************************************************
     832             :  *
     833             :  *  This routine disables all traffic on the adapter by issuing a
     834             :  *  global reset on the MAC and deallocates TX/RX buffers.
     835             :  *
     836             :  **********************************************************************/
     837             : 
     838             : void
     839           0 : ixgb_stop(void *arg)
     840             : {
     841             :         struct ifnet   *ifp;
     842           0 :         struct ixgb_softc *sc = arg;
     843           0 :         ifp = &sc->interface_data.ac_if;
     844             : 
     845             :         INIT_DEBUGOUT("ixgb_stop: begin\n");
     846           0 :         ixgb_disable_intr(sc);
     847           0 :         sc->hw.adapter_stopped = FALSE;
     848           0 :         ixgb_adapter_stop(&sc->hw);
     849           0 :         timeout_del(&sc->timer_handle);
     850             : 
     851             :         /* Tell the stack that the interface is no longer active */
     852           0 :         ifp->if_flags &= ~IFF_RUNNING;
     853           0 :         ifq_clr_oactive(&ifp->if_snd);
     854             : 
     855           0 :         ixgb_free_transmit_structures(sc);
     856           0 :         ixgb_free_receive_structures(sc);
     857           0 : }
     858             : 
     859             : 
     860             : /*********************************************************************
     861             :  *
     862             :  *  Determine hardware revision.
     863             :  *
     864             :  **********************************************************************/
     865             : void
     866           0 : ixgb_identify_hardware(struct ixgb_softc *sc)
     867             : {
     868             :         u_int32_t       reg;
     869           0 :         struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
     870             : 
     871             :         /* Make sure our PCI config space has the necessary stuff set */
     872           0 :         sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
     873             :                                             PCI_COMMAND_STATUS_REG);
     874             : 
     875             :         /* Save off the information about this board */
     876           0 :         sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
     877           0 :         sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
     878             : 
     879           0 :         reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
     880           0 :         sc->hw.revision_id = PCI_REVISION(reg);
     881             : 
     882           0 :         reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
     883           0 :         sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
     884           0 :         sc->hw.subsystem_id = PCI_PRODUCT(reg);
     885             : 
     886             :         /* Set MacType, etc. based on this PCI info */
     887           0 :         switch (sc->hw.device_id) {
     888             :         case IXGB_DEVICE_ID_82597EX:
     889             :         case IXGB_DEVICE_ID_82597EX_SR:
     890             :         case IXGB_DEVICE_ID_82597EX_LR:
     891             :         case IXGB_DEVICE_ID_82597EX_CX4:
     892           0 :                 sc->hw.mac_type = ixgb_82597;
     893           0 :                 break;
     894             :         default:
     895             :                 INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
     896           0 :                 printf("%s: unsupported device id 0x%x\n",
     897           0 :                     sc->sc_dv.dv_xname, sc->hw.device_id);
     898           0 :         }
     899           0 : }
     900             : 
     901             : int
     902           0 : ixgb_allocate_pci_resources(struct ixgb_softc *sc)
     903             :         
     904             : {
     905             :         int val;
     906           0 :         pci_intr_handle_t       ih;
     907             :         const char              *intrstr = NULL;
     908           0 :         struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
     909           0 :         pci_chipset_tag_t       pc = pa->pa_pc;
     910             : 
     911           0 :         val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
     912           0 :         if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
     913           0 :                 printf(": mmba is not mem space\n");
     914           0 :                 return (ENXIO);
     915             :         }
     916           0 :         if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
     917           0 :             &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
     918           0 :             &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
     919           0 :                 printf(": cannot find mem space\n");
     920           0 :                 return (ENXIO);
     921             :         }
     922             : 
     923           0 :         if (pci_intr_map(pa, &ih)) {
     924           0 :                 printf(": couldn't map interrupt\n");
     925           0 :                 return (ENXIO);
     926             :         }
     927             : 
     928           0 :         sc->hw.back = &sc->osdep;
     929             : 
     930           0 :         intrstr = pci_intr_string(pc, ih);
     931           0 :         sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
     932           0 :                                             sc->sc_dv.dv_xname);
     933           0 :         if (sc->sc_intrhand == NULL) {
     934           0 :                 printf(": couldn't establish interrupt");
     935           0 :                 if (intrstr != NULL)
     936           0 :                         printf(" at %s", intrstr);
     937           0 :                 printf("\n");
     938           0 :                 return (ENXIO);
     939             :         }
     940           0 :         printf(": %s", intrstr);
     941             : 
     942           0 :         return (0);
     943           0 : }
     944             : 
     945             : void
     946           0 : ixgb_free_pci_resources(struct ixgb_softc *sc)
     947             : {
     948           0 :         struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
     949           0 :         pci_chipset_tag_t       pc = pa->pa_pc;
     950             : 
     951           0 :         if (sc->sc_intrhand)
     952           0 :                 pci_intr_disestablish(pc, sc->sc_intrhand);
     953           0 :         sc->sc_intrhand = 0;
     954             : 
     955           0 :         if (sc->osdep.ixgb_membase)
     956           0 :                 bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
     957           0 :                                 sc->osdep.ixgb_memsize);
     958           0 :         sc->osdep.ixgb_membase = 0;
     959           0 : }
     960             : 
     961             : /*********************************************************************
     962             :  *
     963             :  *  Initialize the hardware to a configuration as specified by the
     964             :  *  adapter structure. The controller is reset, the EEPROM is
     965             :  *  verified, the MAC address is set, then the shared initialization
     966             :  *  routines are called.
     967             :  *
     968             :  **********************************************************************/
     969             : int
     970           0 : ixgb_hardware_init(struct ixgb_softc *sc)
     971             : {
     972             :         /* Issue a global reset */
     973           0 :         sc->hw.adapter_stopped = FALSE;
     974           0 :         ixgb_adapter_stop(&sc->hw);
     975             : 
     976             :         /* Make sure we have a good EEPROM before we read from it */
     977           0 :         if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
     978           0 :                 printf("%s: The EEPROM Checksum Is Not Valid\n",
     979           0 :                        sc->sc_dv.dv_xname);
     980           0 :                 return (EIO);
     981             :         }
     982           0 :         if (!ixgb_init_hw(&sc->hw)) {
     983           0 :                 printf("%s: Hardware Initialization Failed",
     984           0 :                        sc->sc_dv.dv_xname);
     985           0 :                 return (EIO);
     986             :         }
     987           0 :         bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
     988             :               IXGB_ETH_LENGTH_OF_ADDRESS);
     989             : 
     990           0 :         return (0);
     991           0 : }
     992             : 
     993             : /*********************************************************************
     994             :  *
     995             :  *  Setup networking device structure and register an interface.
     996             :  *
     997             :  **********************************************************************/
     998             : void
     999           0 : ixgb_setup_interface(struct ixgb_softc *sc)
    1000             : {
    1001             :         struct ifnet   *ifp;
    1002             :         INIT_DEBUGOUT("ixgb_setup_interface: begin");
    1003             : 
    1004           0 :         ifp = &sc->interface_data.ac_if;
    1005           0 :         strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
    1006             : 
    1007           0 :         ifp->if_softc = sc;
    1008           0 :         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    1009           0 :         ifp->if_ioctl = ixgb_ioctl;
    1010           0 :         ifp->if_start = ixgb_start;
    1011           0 :         ifp->if_watchdog = ixgb_watchdog;
    1012           0 :         ifp->if_hardmtu =
    1013             :                 IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
    1014           0 :         IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
    1015             : 
    1016           0 :         ifp->if_capabilities = IFCAP_VLAN_MTU;
    1017             : 
    1018             : #if NVLAN > 0
    1019           0 :         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    1020             : #endif
    1021             : 
    1022             : #ifdef IXGB_CSUM_OFFLOAD
    1023             :         ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
    1024             : #endif
    1025             : 
    1026             :         /*
    1027             :          * Specify the media types supported by this adapter and register
    1028             :          * callbacks to update media and link information
    1029             :          */
    1030           0 :         ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
    1031             :                      ixgb_media_status);
    1032           0 :         if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
    1033           0 :             (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
    1034           0 :                 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
    1035             :                     IFM_FDX, 0, NULL);
    1036           0 :         } else {
    1037           0 :                 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
    1038             :                     IFM_FDX, 0, NULL);
    1039             :         }
    1040           0 :         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
    1041           0 :         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
    1042             : 
    1043           0 :         if_attach(ifp);
    1044           0 :         ether_ifattach(ifp);
    1045           0 : }
    1046             : 
    1047             : /********************************************************************
    1048             :  * Manage DMA'able memory.
    1049             :  *******************************************************************/
    1050             : int
    1051           0 : ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
    1052             :                 struct ixgb_dma_alloc * dma, int mapflags)
    1053             : {
    1054             :         int r;
    1055             : 
    1056           0 :         dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
    1057           0 :         r = bus_dmamap_create(dma->dma_tag, size, 1,
    1058             :             size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
    1059           0 :         if (r != 0) {
    1060           0 :                 printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
    1061           0 :                         "error %u\n", sc->sc_dv.dv_xname, r);
    1062           0 :                 goto fail_0;
    1063             :         }
    1064             : 
    1065           0 :         r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
    1066             :             1, &dma->dma_nseg, BUS_DMA_NOWAIT);
    1067           0 :         if (r != 0) {
    1068           0 :                 printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; "
    1069           0 :                         "size %lu, error %d\n", sc->sc_dv.dv_xname,
    1070             :                         (unsigned long)size, r);
    1071           0 :                 goto fail_1;
    1072             :         }
    1073             : 
    1074           0 :         r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
    1075             :             &dma->dma_vaddr, BUS_DMA_NOWAIT);
    1076           0 :         if (r != 0) {
    1077           0 :                 printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; "
    1078           0 :                         "size %lu, error %d\n", sc->sc_dv.dv_xname,
    1079             :                         (unsigned long)size, r);
    1080           0 :                 goto fail_2;
    1081             :         }
    1082             : 
    1083           0 :         r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
    1084             :                             dma->dma_vaddr, size, NULL,
    1085             :                             mapflags | BUS_DMA_NOWAIT);
    1086           0 :         if (r != 0) {
    1087           0 :                 printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
    1088           0 :                         "error %u\n", sc->sc_dv.dv_xname, r);
    1089             :                 goto fail_3;
    1090             :         }
    1091             : 
    1092           0 :         dma->dma_size = size;
    1093           0 :         return (0);
    1094             : 
    1095             : fail_3: 
    1096           0 :         bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
    1097             : fail_2: 
    1098           0 :         bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
    1099             : fail_1: 
    1100           0 :         bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
    1101             : fail_0: 
    1102           0 :         dma->dma_map = NULL;
    1103           0 :         dma->dma_tag = NULL;
    1104             : 
    1105           0 :         return (r);
    1106           0 : }
    1107             : 
    1108             : void
    1109           0 : ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
    1110             : {
    1111           0 :         if (dma->dma_tag == NULL)
    1112             :                 return;
    1113             : 
    1114           0 :         if (dma->dma_map != NULL) {
    1115           0 :                 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
    1116             :                     dma->dma_map->dm_mapsize,
    1117             :                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    1118           0 :                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
    1119           0 :                 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
    1120           0 :                 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
    1121           0 :                 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
    1122           0 :         }
    1123           0 : }
    1124             : 
    1125             : /*********************************************************************
    1126             :  *
    1127             :  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
    1128             :  *  the information needed to transmit a packet on the wire.
    1129             :  *
    1130             :  **********************************************************************/
    1131             : int
    1132           0 : ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
    1133             : {
    1134           0 :         if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
    1135             :             sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
    1136           0 :                 printf("%s: Unable to allocate tx_buffer memory\n",
    1137           0 :                        sc->sc_dv.dv_xname);
    1138           0 :                 return (ENOMEM);
    1139             :         }
    1140             : 
    1141           0 :         return (0);
    1142           0 : }
    1143             : 
    1144             : /*********************************************************************
    1145             :  *
    1146             :  *  Allocate and initialize transmit structures.
    1147             :  *
    1148             :  **********************************************************************/
    1149             : int
    1150           0 : ixgb_setup_transmit_structures(struct ixgb_softc *sc)
    1151             : {
    1152             :         struct  ixgb_buffer *tx_buffer;
    1153             :         int error, i;
    1154             : 
    1155           0 :         if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
    1156             :                 goto fail;
    1157             : 
    1158           0 :         bzero((void *)sc->tx_desc_base,
    1159             :               (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
    1160             : 
    1161           0 :         sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
    1162             : 
    1163           0 :         tx_buffer = sc->tx_buffer_area;
    1164           0 :         for (i = 0; i < sc->num_tx_desc; i++) {
    1165           0 :                 error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
    1166             :                             IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
    1167             :                             BUS_DMA_NOWAIT, &tx_buffer->map);
    1168           0 :                 if (error != 0) {
    1169           0 :                         printf("%s: Unable to create TX DMA map\n",
    1170           0 :                             sc->sc_dv.dv_xname);
    1171           0 :                         goto fail;
    1172             :                 }
    1173           0 :                 tx_buffer++;
    1174             :         }
    1175             : 
    1176           0 :         sc->next_avail_tx_desc = 0;
    1177           0 :         sc->oldest_used_tx_desc = 0;
    1178             : 
    1179             :         /* Set number of descriptors available */
    1180           0 :         sc->num_tx_desc_avail = sc->num_tx_desc;
    1181             : 
    1182             :         /* Set checksum context */
    1183           0 :         sc->active_checksum_context = OFFLOAD_NONE;
    1184           0 :         bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
    1185             :            sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    1186             : 
    1187           0 :         return (0);
    1188             : 
    1189             : fail:
    1190           0 :         ixgb_free_transmit_structures(sc);
    1191           0 :         return (error);
    1192           0 : }
    1193             : 
    1194             : /*********************************************************************
    1195             :  *
    1196             :  *  Enable transmit unit.
    1197             :  *
    1198             :  **********************************************************************/
    1199             : void
    1200           0 : ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
    1201             : {
    1202             :         u_int32_t       reg_tctl;
    1203             :         u_int64_t       bus_addr;
    1204             : 
    1205             :         /* Setup the Base and Length of the Tx Descriptor Ring */
    1206           0 :         bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
    1207           0 :         IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
    1208           0 :         IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
    1209           0 :         IXGB_WRITE_REG(&sc->hw, TDLEN,
    1210             :                        sc->num_tx_desc *
    1211             :                        sizeof(struct ixgb_tx_desc));
    1212             : 
    1213             :         /* Setup the HW Tx Head and Tail descriptor pointers */
    1214           0 :         IXGB_WRITE_REG(&sc->hw, TDH, 0);
    1215           0 :         IXGB_WRITE_REG(&sc->hw, TDT, 0);
    1216             : 
    1217             :         HW_DEBUGOUT2("Base = %x, Length = %x\n",
    1218             :                      IXGB_READ_REG(&sc->hw, TDBAL),
    1219             :                      IXGB_READ_REG(&sc->hw, TDLEN));
    1220             : 
    1221           0 :         IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
    1222             : 
    1223             :         /* Program the Transmit Control Register */
    1224           0 :         reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
    1225             :         reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
    1226           0 :         IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
    1227             : 
    1228             :         /* Setup Transmit Descriptor Settings for this adapter */
    1229           0 :         sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
    1230             : 
    1231           0 :         if (sc->tx_int_delay > 0)
    1232           0 :                 sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
    1233           0 : }
    1234             : 
    1235             : /*********************************************************************
    1236             :  *
    1237             :  *  Free all transmit related data structures.
    1238             :  *
    1239             :  **********************************************************************/
    1240             : void
    1241           0 : ixgb_free_transmit_structures(struct ixgb_softc *sc)
    1242             : {
    1243             :         struct ixgb_buffer *tx_buffer;
    1244             :         int             i;
    1245             : 
    1246             :         INIT_DEBUGOUT("free_transmit_structures: begin");
    1247             : 
    1248           0 :         if (sc->tx_buffer_area != NULL) {
    1249             :                 tx_buffer = sc->tx_buffer_area;
    1250           0 :                 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
    1251           0 :                         if (tx_buffer->map != NULL &&
    1252           0 :                             tx_buffer->map->dm_nsegs > 0) {
    1253           0 :                                 bus_dmamap_sync(sc->txtag, tx_buffer->map,
    1254             :                                     0, tx_buffer->map->dm_mapsize,
    1255             :                                     BUS_DMASYNC_POSTWRITE);
    1256           0 :                                 bus_dmamap_unload(sc->txtag,
    1257             :                                     tx_buffer->map);
    1258           0 :                         }
    1259             : 
    1260           0 :                         if (tx_buffer->m_head != NULL) {
    1261           0 :                                 m_freem(tx_buffer->m_head);
    1262           0 :                                 tx_buffer->m_head = NULL;
    1263           0 :                         }
    1264           0 :                         if (tx_buffer->map != NULL) {
    1265           0 :                                 bus_dmamap_destroy(sc->txtag,
    1266             :                                     tx_buffer->map);
    1267           0 :                                 tx_buffer->map = NULL;
    1268           0 :                         }
    1269             :                 }
    1270             :         }
    1271           0 :         if (sc->tx_buffer_area != NULL) {
    1272           0 :                 free(sc->tx_buffer_area, M_DEVBUF, 0);
    1273           0 :                 sc->tx_buffer_area = NULL;
    1274           0 :         }
    1275           0 :         if (sc->txtag != NULL) {
    1276           0 :                 sc->txtag = NULL;
    1277           0 :         }
    1278           0 : }
    1279             : 
    1280             : /*********************************************************************
    1281             :  *
    1282             :  *  The offload context needs to be set when we transfer the first
    1283             :  *  packet of a particular protocol (TCP/UDP). We change the
    1284             :  *  context only if the protocol type changes.
    1285             :  *
    1286             :  **********************************************************************/
    1287             : void
    1288           0 : ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
    1289             :                              struct mbuf *mp,
    1290             :                              u_int8_t *txd_popts)
    1291             : {
    1292             :         struct ixgb_context_desc *TXD;
    1293             :         struct ixgb_buffer *tx_buffer;
    1294             :         int             curr_txd;
    1295             : 
    1296           0 :         if (mp->m_pkthdr.csum_flags) {
    1297             : 
    1298           0 :                 if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
    1299           0 :                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
    1300           0 :                         if (sc->active_checksum_context == OFFLOAD_TCP_IP)
    1301           0 :                                 return;
    1302             :                         else
    1303           0 :                                 sc->active_checksum_context = OFFLOAD_TCP_IP;
    1304             : 
    1305           0 :                 } else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
    1306           0 :                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
    1307           0 :                         if (sc->active_checksum_context == OFFLOAD_UDP_IP)
    1308           0 :                                 return;
    1309             :                         else
    1310           0 :                                 sc->active_checksum_context = OFFLOAD_UDP_IP;
    1311             :                 } else {
    1312           0 :                         *txd_popts = 0;
    1313           0 :                         return;
    1314             :                 }
    1315             :         } else {
    1316           0 :                 *txd_popts = 0;
    1317           0 :                 return;
    1318             :         }
    1319             : 
    1320             :         /*
    1321             :          * If we reach this point, the checksum offload context needs to be
    1322             :          * reset.
    1323             :          */
    1324           0 :         curr_txd = sc->next_avail_tx_desc;
    1325           0 :         tx_buffer = &sc->tx_buffer_area[curr_txd];
    1326           0 :         TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
    1327             : 
    1328           0 :         TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
    1329           0 :         TXD->tucse = 0;
    1330             : 
    1331           0 :         TXD->mss = 0;
    1332             : 
    1333           0 :         if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
    1334           0 :                 TXD->tucso =
    1335             :                         ENET_HEADER_SIZE + sizeof(struct ip) +
    1336             :                         offsetof(struct tcphdr, th_sum);
    1337           0 :         } else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
    1338           0 :                 TXD->tucso =
    1339             :                         ENET_HEADER_SIZE + sizeof(struct ip) +
    1340             :                         offsetof(struct udphdr, uh_sum);
    1341           0 :         }
    1342           0 :         TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
    1343             :             IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
    1344             : 
    1345           0 :         tx_buffer->m_head = NULL;
    1346             : 
    1347           0 :         if (++curr_txd == sc->num_tx_desc)
    1348             :                 curr_txd = 0;
    1349             : 
    1350           0 :         sc->num_tx_desc_avail--;
    1351           0 :         sc->next_avail_tx_desc = curr_txd;
    1352           0 : }
    1353             : 
    1354             : /**********************************************************************
    1355             :  *
    1356             :  *  Examine each tx_buffer in the used queue. If the hardware is done
    1357             :  *  processing the packet then free associated resources. The
    1358             :  *  tx_buffer is put back on the free queue.
    1359             :  *
    1360             :  **********************************************************************/
    1361             : void
    1362           0 : ixgb_txeof(struct ixgb_softc *sc)
    1363             : {
    1364             :         int             i, num_avail;
    1365             :         struct ixgb_buffer *tx_buffer;
    1366             :         struct ixgb_tx_desc *tx_desc;
    1367           0 :         struct ifnet    *ifp = &sc->interface_data.ac_if;
    1368             : 
    1369           0 :         if (sc->num_tx_desc_avail == sc->num_tx_desc)
    1370           0 :                 return;
    1371             : 
    1372           0 :         num_avail = sc->num_tx_desc_avail;
    1373           0 :         i = sc->oldest_used_tx_desc;
    1374             : 
    1375           0 :         tx_buffer = &sc->tx_buffer_area[i];
    1376           0 :         tx_desc = &sc->tx_desc_base[i];
    1377             : 
    1378           0 :         bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
    1379             :             sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
    1380           0 :         while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
    1381             : 
    1382           0 :                 tx_desc->status = 0;
    1383           0 :                 num_avail++;
    1384             : 
    1385           0 :                 if (tx_buffer->m_head != NULL) {
    1386           0 :                         if (tx_buffer->map->dm_nsegs > 0) {
    1387           0 :                                 bus_dmamap_sync(sc->txtag, tx_buffer->map,
    1388             :                                     0, tx_buffer->map->dm_mapsize,
    1389             :                                     BUS_DMASYNC_POSTWRITE);
    1390           0 :                                 bus_dmamap_unload(sc->txtag, tx_buffer->map);
    1391           0 :                         }
    1392             : 
    1393           0 :                         m_freem(tx_buffer->m_head);
    1394           0 :                         tx_buffer->m_head = NULL;
    1395           0 :                 }
    1396           0 :                 if (++i == sc->num_tx_desc)
    1397             :                         i = 0;
    1398             : 
    1399           0 :                 tx_buffer = &sc->tx_buffer_area[i];
    1400           0 :                 tx_desc = &sc->tx_desc_base[i];
    1401             :         }
    1402           0 :         bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
    1403             :             sc->txdma.dma_map->dm_mapsize,
    1404             :             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    1405             : 
    1406           0 :         sc->oldest_used_tx_desc = i;
    1407             : 
    1408             :         /*
    1409             :          * If we have enough room, clear IFF_OACTIVE to tell the stack that
    1410             :          * it is OK to send packets. If there are no pending descriptors,
    1411             :          * clear the timeout. Otherwise, if some descriptors have been freed,
    1412             :          * restart the timeout.
    1413             :          */
    1414           0 :         if (num_avail > IXGB_TX_CLEANUP_THRESHOLD)
    1415           0 :                 ifq_clr_oactive(&ifp->if_snd);
    1416             : 
    1417             :         /* All clean, turn off the timer */
    1418           0 :         if (num_avail == sc->num_tx_desc)
    1419           0 :                 ifp->if_timer = 0;
    1420             :         /* Some cleaned, reset the timer */
    1421           0 :         else if (num_avail != sc->num_tx_desc_avail)
    1422           0 :                 ifp->if_timer = IXGB_TX_TIMEOUT;
    1423             : 
    1424           0 :         sc->num_tx_desc_avail = num_avail;
    1425           0 : }
    1426             : 
    1427             : 
    1428             : /*********************************************************************
    1429             :  *
    1430             :  *  Get a buffer from system mbuf buffer pool.
    1431             :  *
    1432             :  **********************************************************************/
    1433             : int
    1434           0 : ixgb_get_buf(struct ixgb_softc *sc, int i,
    1435             :              struct mbuf *nmp)
    1436             : {
    1437             :         struct mbuf *mp = nmp;
    1438             :         struct ixgb_buffer *rx_buffer;
    1439             :         int             error;
    1440             : 
    1441           0 :         if (mp == NULL) {
    1442           0 :                 MGETHDR(mp, M_DONTWAIT, MT_DATA);
    1443           0 :                 if (mp == NULL) {
    1444           0 :                         sc->mbuf_alloc_failed++;
    1445           0 :                         return (ENOBUFS);
    1446             :                 }
    1447           0 :                 MCLGET(mp, M_DONTWAIT);
    1448           0 :                 if ((mp->m_flags & M_EXT) == 0) {
    1449           0 :                         m_freem(mp);
    1450           0 :                         sc->mbuf_cluster_failed++;
    1451           0 :                         return (ENOBUFS);
    1452             :                 }
    1453           0 :                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
    1454           0 :         } else {
    1455           0 :                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
    1456           0 :                 mp->m_data = mp->m_ext.ext_buf;
    1457           0 :                 mp->m_next = NULL;
    1458             :         }
    1459             : 
    1460           0 :         if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
    1461           0 :                 m_adj(mp, ETHER_ALIGN);
    1462             : 
    1463           0 :         rx_buffer = &sc->rx_buffer_area[i];
    1464             : 
    1465             :         /*
    1466             :          * Using memory from the mbuf cluster pool, invoke the bus_dma
    1467             :          * machinery to arrange the memory mapping.
    1468             :          */
    1469           0 :         error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
    1470             :             mp, BUS_DMA_NOWAIT);
    1471           0 :         if (error) {
    1472           0 :                 m_freem(mp);
    1473           0 :                 return (error);
    1474             :         }
    1475           0 :         rx_buffer->m_head = mp;
    1476           0 :         bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i]));
    1477           0 :         sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
    1478           0 :         bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
    1479             :             rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
    1480             : 
    1481           0 :         return (0);
    1482           0 : }
    1483             : 
    1484             : /*********************************************************************
    1485             :  *
    1486             :  *  Allocate memory for rx_buffer structures. Since we use one
    1487             :  *  rx_buffer per received packet, the maximum number of rx_buffer's
    1488             :  *  that we'll need is equal to the number of receive descriptors
    1489             :  *  that we've allocated.
    1490             :  *
    1491             :  **********************************************************************/
    1492             : int
    1493           0 : ixgb_allocate_receive_structures(struct ixgb_softc *sc)
    1494             : {
    1495             :         int             i, error;
    1496             :         struct ixgb_buffer *rx_buffer;
    1497             : 
    1498           0 :         if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
    1499             :             sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
    1500           0 :                 printf("%s: Unable to allocate rx_buffer memory\n",
    1501           0 :                        sc->sc_dv.dv_xname);
    1502           0 :                 return (ENOMEM);
    1503             :         }
    1504             : 
    1505           0 :         sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
    1506             : 
    1507           0 :         rx_buffer = sc->rx_buffer_area;
    1508           0 :         for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
    1509           0 :                 error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
    1510             :                                           MCLBYTES, 0, BUS_DMA_NOWAIT,
    1511             :                                           &rx_buffer->map);
    1512           0 :                 if (error != 0) {
    1513           0 :                         printf("%s: ixgb_allocate_receive_structures: "
    1514             :                                "bus_dmamap_create failed; error %u\n",
    1515           0 :                                sc->sc_dv.dv_xname, error);
    1516           0 :                         goto fail;
    1517             :                 }
    1518             :         }
    1519             : 
    1520           0 :         for (i = 0; i < sc->num_rx_desc; i++) {
    1521           0 :                 error = ixgb_get_buf(sc, i, NULL);
    1522           0 :                 if (error != 0)
    1523             :                         goto fail;
    1524             :         }
    1525           0 :         bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
    1526             :             sc->rxdma.dma_map->dm_mapsize,
    1527             :             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    1528             : 
    1529           0 :         return (0);
    1530             : 
    1531             : fail:
    1532           0 :         ixgb_free_receive_structures(sc);
    1533           0 :         return (error);
    1534           0 : }
    1535             : 
    1536             : /*********************************************************************
    1537             :  *
    1538             :  *  Allocate and initialize receive structures.
    1539             :  *
    1540             :  **********************************************************************/
    1541             : int
    1542           0 : ixgb_setup_receive_structures(struct ixgb_softc *sc)
    1543             : {
    1544           0 :         bzero((void *)sc->rx_desc_base,
    1545             :               (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
    1546             : 
    1547           0 :         if (ixgb_allocate_receive_structures(sc))
    1548           0 :                 return (ENOMEM);
    1549             : 
    1550             :         /* Setup our descriptor pointers */
    1551           0 :         sc->next_rx_desc_to_check = 0;
    1552           0 :         sc->next_rx_desc_to_use = 0;
    1553           0 :         return (0);
    1554           0 : }
    1555             : 
    1556             : /*********************************************************************
    1557             :  *
    1558             :  *  Enable receive unit.
    1559             :  *
    1560             :  **********************************************************************/
    1561             : void
    1562           0 : ixgb_initialize_receive_unit(struct ixgb_softc *sc)
    1563             : {
    1564             :         u_int32_t       reg_rctl;
    1565             :         u_int32_t       reg_rxcsum;
    1566             :         u_int32_t       reg_rxdctl;
    1567             :         u_int64_t       bus_addr;
    1568             : 
    1569             :         /*
    1570             :          * Make sure receives are disabled while setting up the descriptor
    1571             :          * ring
    1572             :          */
    1573           0 :         reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
    1574           0 :         IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
    1575             : 
    1576             :         /* Set the Receive Delay Timer Register */
    1577           0 :         IXGB_WRITE_REG(&sc->hw, RDTR,
    1578             :                        sc->rx_int_delay);
    1579             : 
    1580             :         /* Setup the Base and Length of the Rx Descriptor Ring */
    1581           0 :         bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
    1582           0 :         IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
    1583           0 :         IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
    1584           0 :         IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
    1585             :                        sizeof(struct ixgb_rx_desc));
    1586             : 
    1587             :         /* Setup the HW Rx Head and Tail Descriptor Pointers */
    1588           0 :         IXGB_WRITE_REG(&sc->hw, RDH, 0);
    1589             : 
    1590           0 :         IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
    1591             : 
    1592             :         reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
    1593             :                 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
    1594             :                 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
    1595           0 :         IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
    1596             : 
    1597           0 :         sc->raidc = 1;
    1598           0 :         if (sc->raidc) {
    1599             :                 uint32_t        raidc;
    1600             :                 uint8_t         poll_threshold;
    1601             : #define IXGB_RAIDC_POLL_DEFAULT 120
    1602             : 
    1603           0 :                 poll_threshold = ((sc->num_rx_desc - 1) >> 3);
    1604           0 :                 poll_threshold >>= 1;
    1605           0 :                 poll_threshold &= 0x3F;
    1606             :                 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
    1607           0 :                         (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
    1608           0 :                         (sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
    1609           0 :                         poll_threshold;
    1610           0 :                 IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
    1611           0 :         }
    1612             : 
    1613             :         /* Enable Receive Checksum Offload for TCP and UDP ? */
    1614           0 :         reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
    1615           0 :         reg_rxcsum |= IXGB_RXCSUM_TUOFL;
    1616           0 :         IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
    1617             : 
    1618             :         /* Setup the Receive Control Register */
    1619           0 :         reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
    1620           0 :         reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
    1621           0 :         reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
    1622           0 :                 IXGB_RCTL_CFF |
    1623           0 :                 (sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
    1624             : 
    1625           0 :         switch (sc->rx_buffer_len) {
    1626             :         default:
    1627             :         case IXGB_RXBUFFER_2048:
    1628             :                 reg_rctl |= IXGB_RCTL_BSIZE_2048;
    1629           0 :                 break;
    1630             :         case IXGB_RXBUFFER_4096:
    1631           0 :                 reg_rctl |= IXGB_RCTL_BSIZE_4096;
    1632           0 :                 break;
    1633             :         case IXGB_RXBUFFER_8192:
    1634           0 :                 reg_rctl |= IXGB_RCTL_BSIZE_8192;
    1635           0 :                 break;
    1636             :         case IXGB_RXBUFFER_16384:
    1637           0 :                 reg_rctl |= IXGB_RCTL_BSIZE_16384;
    1638           0 :                 break;
    1639             :         }
    1640             : 
    1641           0 :         reg_rctl |= IXGB_RCTL_RXEN;
    1642             : 
    1643             :         /* Enable Receives */
    1644           0 :         IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
    1645           0 : }
    1646             : 
    1647             : /*********************************************************************
    1648             :  *
    1649             :  *  Free receive related data structures.
    1650             :  *
    1651             :  **********************************************************************/
    1652             : void
    1653           0 : ixgb_free_receive_structures(struct ixgb_softc *sc)
    1654             : {
    1655             :         struct ixgb_buffer *rx_buffer;
    1656             :         int             i;
    1657             : 
    1658             :         INIT_DEBUGOUT("free_receive_structures: begin");
    1659             : 
    1660           0 :         if (sc->rx_buffer_area != NULL) {
    1661             :                 rx_buffer = sc->rx_buffer_area;
    1662           0 :                 for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
    1663           0 :                         if (rx_buffer->map != NULL &&
    1664           0 :                             rx_buffer->map->dm_nsegs > 0) {
    1665           0 :                                 bus_dmamap_sync(sc->rxtag, rx_buffer->map,
    1666             :                                     0, rx_buffer->map->dm_mapsize,
    1667             :                                     BUS_DMASYNC_POSTREAD);
    1668           0 :                                 bus_dmamap_unload(sc->rxtag,
    1669             :                                     rx_buffer->map);
    1670           0 :                         }
    1671           0 :                         if (rx_buffer->m_head != NULL) {
    1672           0 :                                 m_freem(rx_buffer->m_head);
    1673           0 :                                 rx_buffer->m_head = NULL;
    1674           0 :                         }
    1675           0 :                         if (rx_buffer->map != NULL) {
    1676           0 :                                 bus_dmamap_destroy(sc->rxtag,
    1677             :                                     rx_buffer->map);
    1678           0 :                                 rx_buffer->map = NULL;
    1679           0 :                         }
    1680             :                 }
    1681             :         }
    1682           0 :         if (sc->rx_buffer_area != NULL) {
    1683           0 :                 free(sc->rx_buffer_area, M_DEVBUF, 0);
    1684           0 :                 sc->rx_buffer_area = NULL;
    1685           0 :         }
    1686           0 :         if (sc->rxtag != NULL)
    1687           0 :                 sc->rxtag = NULL;
    1688           0 : }
    1689             : 
    1690             : /*********************************************************************
    1691             :  *
    1692             :  *  This routine executes in interrupt context. It replenishes
    1693             :  *  the mbufs in the descriptor and sends data which has been
    1694             :  *  dma'ed into host memory to upper layer.
    1695             :  *
    1696             :  *  We loop at most count times if count is > 0, or until done if
    1697             :  *  count < 0.
    1698             :  *
    1699             :  *********************************************************************/
    1700             : void
    1701           0 : ixgb_rxeof(struct ixgb_softc *sc, int count)
    1702             : {
    1703             :         struct ifnet   *ifp;
    1704           0 :         struct mbuf_list ml = MBUF_LIST_INITIALIZER();
    1705             :         struct mbuf    *mp;
    1706             :         int             eop = 0;
    1707             :         int             len;
    1708             :         u_int8_t        accept_frame = 0;
    1709             :         int             i;
    1710             :         int             next_to_use = 0;
    1711             :         int             eop_desc;
    1712             : 
    1713             :         /* Pointer to the receive descriptor being examined. */
    1714             :         struct ixgb_rx_desc *current_desc;
    1715             : 
    1716           0 :         ifp = &sc->interface_data.ac_if;
    1717           0 :         i = sc->next_rx_desc_to_check;
    1718           0 :         next_to_use = sc->next_rx_desc_to_use;
    1719             :         eop_desc = sc->next_rx_desc_to_check;
    1720           0 :         current_desc = &sc->rx_desc_base[i];
    1721           0 :         bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
    1722             :             sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
    1723             : 
    1724           0 :         if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
    1725           0 :                 return;
    1726             : 
    1727           0 :         while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
    1728           0 :                     (count != 0) &&
    1729           0 :                     (ifp->if_flags & IFF_RUNNING)) {
    1730             : 
    1731           0 :                 mp = sc->rx_buffer_area[i].m_head;
    1732           0 :                 bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
    1733             :                     0, sc->rx_buffer_area[i].map->dm_mapsize,
    1734             :                     BUS_DMASYNC_POSTREAD);
    1735           0 :                 bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
    1736             : 
    1737             :                 accept_frame = 1;
    1738           0 :                 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
    1739           0 :                         count--;
    1740             :                         eop = 1;
    1741           0 :                 } else {
    1742             :                         eop = 0;
    1743             :                 }
    1744           0 :                 len = letoh16(current_desc->length);
    1745             : 
    1746           0 :                 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
    1747             :                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
    1748             :                                             IXGB_RX_DESC_ERRORS_RXE))
    1749           0 :                         accept_frame = 0;
    1750           0 :                 if (accept_frame) {
    1751             : 
    1752             :                         /* Assign correct length to the current fragment */
    1753           0 :                         mp->m_len = len;
    1754             : 
    1755           0 :                         if (sc->fmp == NULL) {
    1756           0 :                                 mp->m_pkthdr.len = len;
    1757           0 :                                 sc->fmp = mp;        /* Store the first mbuf */
    1758           0 :                                 sc->lmp = mp;
    1759           0 :                         } else {
    1760             :                                 /* Chain mbuf's together */
    1761           0 :                                 mp->m_flags &= ~M_PKTHDR;
    1762           0 :                                 sc->lmp->m_next = mp;
    1763           0 :                                 sc->lmp = sc->lmp->m_next;
    1764           0 :                                 sc->fmp->m_pkthdr.len += len;
    1765             :                         }
    1766             : 
    1767           0 :                         if (eop) {
    1768             :                                 eop_desc = i;
    1769           0 :                                 ixgb_receive_checksum(sc, current_desc, sc->fmp);
    1770             : 
    1771             : #if NVLAN > 0
    1772           0 :                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
    1773           0 :                                         sc->fmp->m_pkthdr.ether_vtag =
    1774           0 :                                             letoh16(current_desc->special);
    1775           0 :                                         sc->fmp->m_flags |= M_VLANTAG;
    1776           0 :                                 }
    1777             : #endif
    1778             : 
    1779             : 
    1780           0 :                                 ml_enqueue(&ml, sc->fmp);
    1781           0 :                                 sc->fmp = NULL;
    1782           0 :                                 sc->lmp = NULL;
    1783           0 :                         }
    1784           0 :                         sc->rx_buffer_area[i].m_head = NULL;
    1785           0 :                 } else {
    1786           0 :                         sc->dropped_pkts++;
    1787           0 :                         m_freem(sc->fmp);
    1788           0 :                         sc->fmp = NULL;
    1789           0 :                         sc->lmp = NULL;
    1790             :                 }
    1791             : 
    1792             :                 /* Zero out the receive descriptors status  */
    1793           0 :                 current_desc->status = 0;
    1794           0 :                 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
    1795             :                     sc->rxdma.dma_map->dm_mapsize,
    1796             :                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    1797             : 
    1798             :                 /* Advance our pointers to the next descriptor */
    1799           0 :                 if (++i == sc->num_rx_desc) {
    1800             :                         i = 0;
    1801           0 :                         current_desc = sc->rx_desc_base;
    1802           0 :                 } else
    1803           0 :                         current_desc++;
    1804             :         }
    1805           0 :         sc->next_rx_desc_to_check = i;
    1806             : 
    1807           0 :         if (--i < 0)
    1808           0 :                 i = (sc->num_rx_desc - 1);
    1809             : 
    1810             :         /*
    1811             :          * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
    1812             :          * memory corruption). Avoid using and re-submitting the most recently received RX
    1813             :          * descriptor back to hardware.
    1814             :          *
    1815             :          * if(Last written back descriptor == EOP bit set descriptor)
    1816             :          *      then avoid re-submitting the most recently received RX descriptor 
    1817             :          *      back to hardware.
    1818             :          * if(Last written back descriptor != EOP bit set descriptor)
    1819             :          *      then avoid re-submitting the most recently received RX descriptors
    1820             :          *      till last EOP bit set descriptor. 
    1821             :          */
    1822           0 :         if (eop_desc != i) {
    1823           0 :                 if (++eop_desc == sc->num_rx_desc)
    1824             :                         eop_desc = 0;
    1825             :                 i = eop_desc;
    1826           0 :         } 
    1827             :         /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
    1828           0 :         while (next_to_use != i) {
    1829           0 :                 current_desc = &sc->rx_desc_base[next_to_use];
    1830           0 :                 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
    1831             :                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
    1832             :                                              IXGB_RX_DESC_ERRORS_RXE))) {
    1833           0 :                         mp = sc->rx_buffer_area[next_to_use].m_head;
    1834           0 :                         ixgb_get_buf(sc, next_to_use, mp);
    1835           0 :                 } else {
    1836           0 :                         if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
    1837             :                                 break;
    1838             :                 }
    1839             :                 /* Advance our pointers to the next descriptor */
    1840           0 :                 if (++next_to_use == sc->num_rx_desc) 
    1841             :                         next_to_use = 0;
    1842             :         }
    1843           0 :         sc->next_rx_desc_to_use = next_to_use;
    1844           0 :         if (--next_to_use < 0)
    1845           0 :                 next_to_use = (sc->num_rx_desc - 1);
    1846             :         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
    1847           0 :         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
    1848             : 
    1849           0 :         if_input(ifp, &ml);
    1850           0 : }
    1851             : 
    1852             : /*********************************************************************
    1853             :  *
    1854             :  *  Verify that the hardware indicated that the checksum is valid.
    1855             :  *  Inform the stack about the status of checksum so that stack
    1856             :  *  doesn't spend time verifying the checksum.
    1857             :  *
    1858             :  *********************************************************************/
    1859             : void
    1860           0 : ixgb_receive_checksum(struct ixgb_softc *sc,
    1861             :                       struct ixgb_rx_desc *rx_desc,
    1862             :                       struct mbuf *mp)
    1863             : {
    1864           0 :         if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
    1865           0 :                 mp->m_pkthdr.csum_flags = 0;
    1866           0 :                 return;
    1867             :         }
    1868             : 
    1869           0 :         if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
    1870             :                 /* Did it pass? */
    1871           0 :                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
    1872             :                         /* IP Checksum Good */
    1873           0 :                         mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
    1874             : 
    1875           0 :                 } else {
    1876           0 :                         mp->m_pkthdr.csum_flags = 0;
    1877             :                 }
    1878             :         }
    1879           0 :         if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
    1880             :                 /* Did it pass? */
    1881           0 :                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
    1882           0 :                         mp->m_pkthdr.csum_flags |=
    1883             :                                 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
    1884           0 :                 }
    1885             :         }
    1886           0 : }
    1887             : 
    1888             : /*
    1889             :  * This turns on the hardware offload of the VLAN
    1890             :  * tag insertion and strip
    1891             :  */
    1892             : void
    1893           0 : ixgb_enable_hw_vlans(struct ixgb_softc *sc)
    1894             : {
    1895             :         uint32_t ctrl;
    1896             : 
    1897           0 :         ctrl = IXGB_READ_REG(&sc->hw, CTRL0);
    1898           0 :         ctrl |= IXGB_CTRL0_VME;
    1899           0 :         IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl);
    1900           0 : }
    1901             : 
    1902             : void
    1903           0 : ixgb_enable_intr(struct ixgb_softc *sc)
    1904             : {
    1905             :         uint32_t val;
    1906             : 
    1907             :         val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 |
    1908             :               IXGB_INT_LSC | IXGB_INT_RXO;
    1909           0 :         if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
    1910           0 :                 val |= IXGB_INT_GPI0;
    1911           0 :         IXGB_WRITE_REG(&sc->hw, IMS, val);
    1912           0 : }
    1913             : 
    1914             : void
    1915           0 : ixgb_disable_intr(struct ixgb_softc *sc)
    1916             : {
    1917           0 :         IXGB_WRITE_REG(&sc->hw, IMC, ~0);
    1918           0 : }
    1919             : 
    1920             : void
    1921           0 : ixgb_write_pci_cfg(struct ixgb_hw *hw,
    1922             :                    uint32_t reg,
    1923             :                    uint16_t *value)
    1924             : {
    1925           0 :         struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
    1926           0 :         pci_chipset_tag_t pc = pa->pa_pc;
    1927             :         /* Should we do read/mask/write...?  16 vs 32 bit!!! */
    1928           0 :         pci_conf_write(pc, pa->pa_tag, reg, *value);
    1929           0 : }
    1930             : 
    1931             : /**********************************************************************
    1932             :  *
    1933             :  *  Update the board statistics counters.
    1934             :  *
    1935             :  **********************************************************************/
    1936             : void
    1937           0 : ixgb_update_stats_counters(struct ixgb_softc *sc)
    1938             : {
    1939             :         struct ifnet   *ifp;
    1940             : 
    1941           0 :         sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
    1942           0 :         sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
    1943           0 :         sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
    1944           0 :         sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
    1945           0 :         sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
    1946           0 :         sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
    1947           0 :         sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
    1948           0 :         sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
    1949           0 :         sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
    1950           0 :         sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
    1951             : 
    1952           0 :         sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
    1953           0 :         sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
    1954           0 :         sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
    1955           0 :         sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
    1956           0 :         sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
    1957           0 :         sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
    1958           0 :         sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
    1959           0 :         sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
    1960           0 :         sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
    1961           0 :         sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
    1962           0 :         sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
    1963           0 :         sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
    1964           0 :         sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
    1965           0 :         sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
    1966           0 :         sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
    1967           0 :         sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
    1968           0 :         sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
    1969           0 :         sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
    1970           0 :         sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
    1971           0 :         sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
    1972           0 :         sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
    1973           0 :         sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
    1974           0 :         sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
    1975           0 :         sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
    1976           0 :         sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
    1977           0 :         sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
    1978           0 :         sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
    1979             : 
    1980           0 :         sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
    1981           0 :         sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
    1982           0 :         sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
    1983           0 :         sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
    1984           0 :         sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
    1985           0 :         sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
    1986           0 :         sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
    1987           0 :         sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
    1988           0 :         sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
    1989           0 :         sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
    1990           0 :         sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
    1991           0 :         sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
    1992           0 :         sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
    1993           0 :         sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
    1994           0 :         sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
    1995           0 :         sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
    1996           0 :         sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
    1997           0 :         sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
    1998           0 :         sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
    1999           0 :         sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
    2000           0 :         sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
    2001           0 :         sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
    2002             : 
    2003           0 :         ifp = &sc->interface_data.ac_if;
    2004             : 
    2005             :         /* Fill out the OS statistics structure */
    2006           0 :         ifp->if_collisions = 0;
    2007             : 
    2008             :         /* Rx Errors */
    2009           0 :         ifp->if_ierrors =
    2010           0 :                 sc->dropped_pkts +
    2011           0 :                 sc->stats.crcerrs +
    2012           0 :                 sc->stats.rnbc +
    2013           0 :                 sc->stats.mpc +
    2014           0 :                 sc->stats.rlec;
    2015             : 
    2016             :         /* Tx Errors */
    2017           0 :         ifp->if_oerrors =
    2018           0 :                 sc->watchdog_events;
    2019           0 : }
    2020             : 
    2021             : #ifdef IXGB_DEBUG
    2022             : /**********************************************************************
    2023             :  *
    2024             :  *  This routine is called only when ixgb_display_debug_stats is enabled.
    2025             :  *  This routine provides a way to take a look at important statistics
    2026             :  *  maintained by the driver and hardware.
    2027             :  *
    2028             :  **********************************************************************/
    2029             : void
    2030             : ixgb_print_hw_stats(struct ixgb_softc *sc)
    2031             : {
    2032             :         char            buf_speed[100], buf_type[100];
    2033             :         ixgb_bus_speed  bus_speed;
    2034             :         ixgb_bus_type   bus_type;
    2035             :         const char * const unit = sc->sc_dv.dv_xname;
    2036             : 
    2037             :         bus_speed = sc->hw.bus.speed;
    2038             :         bus_type = sc->hw.bus.type;
    2039             :         snprintf(buf_speed, sizeof(buf_speed),
    2040             :                 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
    2041             :                 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
    2042             :                 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
    2043             :                 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
    2044             :                 "UNKNOWN");
    2045             :         printf("%s: PCI_Bus_Speed = %s\n", unit,
    2046             :                 buf_speed);
    2047             : 
    2048             :         snprintf(buf_type, sizeof(buf_type),
    2049             :                 bus_type == ixgb_bus_type_pci ? "PCI" :
    2050             :                 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
    2051             :                 "UNKNOWN");
    2052             :         printf("%s: PCI_Bus_Type = %s\n", unit,
    2053             :                 buf_type);
    2054             : 
    2055             :         printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
    2056             :                 sc->no_tx_desc_avail1);
    2057             :         printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
    2058             :                 sc->no_tx_desc_avail2);
    2059             :         printf("%s: Std Mbuf Failed = %ld\n", unit,
    2060             :                 sc->mbuf_alloc_failed);
    2061             :         printf("%s: Std Cluster Failed = %ld\n", unit,
    2062             :                 sc->mbuf_cluster_failed);
    2063             : 
    2064             :         printf("%s: Defer count = %lld\n", unit,
    2065             :                 (long long)sc->stats.dc);
    2066             :         printf("%s: Missed Packets = %lld\n", unit,
    2067             :                 (long long)sc->stats.mpc);
    2068             :         printf("%s: Receive No Buffers = %lld\n", unit,
    2069             :                 (long long)sc->stats.rnbc);
    2070             :         printf("%s: Receive length errors = %lld\n", unit,
    2071             :                 (long long)sc->stats.rlec);
    2072             :         printf("%s: Crc errors = %lld\n", unit,
    2073             :                 (long long)sc->stats.crcerrs);
    2074             :         printf("%s: Driver dropped packets = %ld\n", unit,
    2075             :                 sc->dropped_pkts);
    2076             : 
    2077             :         printf("%s: XON Rcvd = %lld\n", unit,
    2078             :                 (long long)sc->stats.xonrxc);
    2079             :         printf("%s: XON Xmtd = %lld\n", unit,
    2080             :                 (long long)sc->stats.xontxc);
    2081             :         printf("%s: XOFF Rcvd = %lld\n", unit,
    2082             :                 (long long)sc->stats.xoffrxc);
    2083             :         printf("%s: XOFF Xmtd = %lld\n", unit,
    2084             :                 (long long)sc->stats.xofftxc);
    2085             : 
    2086             :         printf("%s: Good Packets Rcvd = %lld\n", unit,
    2087             :                 (long long)sc->stats.gprcl);
    2088             :         printf("%s: Good Packets Xmtd = %lld\n", unit,
    2089             :                 (long long)sc->stats.gptcl);
    2090             : 
    2091             :         printf("%s: Jumbo frames recvd = %lld\n", unit,
    2092             :                 (long long)sc->stats.jprcl);
    2093             :         printf("%s: Jumbo frames Xmtd = %lld\n", unit,
    2094             :                 (long long)sc->stats.jptcl);
    2095             : }
    2096             : #endif

Generated by: LCOV version 1.13