Line data Source code
1 : /* $OpenBSD: if_ix.c,v 1.152 2017/06/22 02:44:37 deraadt Exp $ */
2 :
3 : /******************************************************************************
4 :
5 : Copyright (c) 2001-2013, Intel Corporation
6 : All rights reserved.
7 :
8 : Redistribution and use in source and binary forms, with or without
9 : modification, are permitted provided that the following conditions are met:
10 :
11 : 1. Redistributions of source code must retain the above copyright notice,
12 : this list of conditions and the following disclaimer.
13 :
14 : 2. Redistributions in binary form must reproduce the above copyright
15 : notice, this list of conditions and the following disclaimer in the
16 : documentation and/or other materials provided with the distribution.
17 :
18 : 3. Neither the name of the Intel Corporation nor the names of its
19 : contributors may be used to endorse or promote products derived from
20 : this software without specific prior written permission.
21 :
22 : THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 : AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 : IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 : ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 : LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 : CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 : SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 : INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 : CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 : ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 : POSSIBILITY OF SUCH DAMAGE.
33 :
34 : ******************************************************************************/
35 : /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36 :
37 : #include <dev/pci/if_ix.h>
38 : #include <dev/pci/ixgbe_type.h>
39 :
40 : /*********************************************************************
41 : * Driver version
42 : *********************************************************************/
43 : /* char ixgbe_driver_version[] = "2.5.13"; */
44 :
45 : /*********************************************************************
46 : * PCI Device ID Table
47 : *
48 : * Used by probe to select devices to load on
49 : *********************************************************************/
50 :
51 : const struct pci_matchid ixgbe_devices[] = {
52 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
53 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
54 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
55 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
56 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
57 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
58 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
59 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
60 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
61 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
62 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
63 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
64 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
65 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
66 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
67 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
68 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
69 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
70 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
71 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
72 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
73 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
74 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
75 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
76 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
77 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
78 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
79 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
80 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
81 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
82 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
83 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
84 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
85 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
86 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
87 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
88 : };
89 :
90 : /*********************************************************************
91 : * Function prototypes
92 : *********************************************************************/
93 : int ixgbe_probe(struct device *, void *, void *);
94 : void ixgbe_attach(struct device *, struct device *, void *);
95 : int ixgbe_detach(struct device *, int);
96 : void ixgbe_start(struct ifqueue *);
97 : int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
98 : int ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
99 : void ixgbe_watchdog(struct ifnet *);
100 : void ixgbe_init(void *);
101 : void ixgbe_stop(void *);
102 : void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
103 : int ixgbe_media_change(struct ifnet *);
104 : void ixgbe_identify_hardware(struct ix_softc *);
105 : int ixgbe_allocate_pci_resources(struct ix_softc *);
106 : int ixgbe_allocate_legacy(struct ix_softc *);
107 : int ixgbe_allocate_queues(struct ix_softc *);
108 : void ixgbe_free_pci_resources(struct ix_softc *);
109 : void ixgbe_local_timer(void *);
110 : void ixgbe_setup_interface(struct ix_softc *);
111 : void ixgbe_config_gpie(struct ix_softc *);
112 : void ixgbe_config_delay_values(struct ix_softc *);
113 : void ixgbe_add_media_types(struct ix_softc *);
114 : void ixgbe_config_link(struct ix_softc *);
115 :
116 : int ixgbe_allocate_transmit_buffers(struct tx_ring *);
117 : int ixgbe_setup_transmit_structures(struct ix_softc *);
118 : int ixgbe_setup_transmit_ring(struct tx_ring *);
119 : void ixgbe_initialize_transmit_units(struct ix_softc *);
120 : void ixgbe_free_transmit_structures(struct ix_softc *);
121 : void ixgbe_free_transmit_buffers(struct tx_ring *);
122 :
123 : int ixgbe_allocate_receive_buffers(struct rx_ring *);
124 : int ixgbe_setup_receive_structures(struct ix_softc *);
125 : int ixgbe_setup_receive_ring(struct rx_ring *);
126 : void ixgbe_initialize_receive_units(struct ix_softc *);
127 : void ixgbe_free_receive_structures(struct ix_softc *);
128 : void ixgbe_free_receive_buffers(struct rx_ring *);
129 : void ixgbe_initialize_rss_mapping(struct ix_softc *);
130 : int ixgbe_rxfill(struct rx_ring *);
131 : void ixgbe_rxrefill(void *);
132 :
133 : void ixgbe_enable_intr(struct ix_softc *);
134 : void ixgbe_disable_intr(struct ix_softc *);
135 : void ixgbe_update_stats_counters(struct ix_softc *);
136 : int ixgbe_txeof(struct tx_ring *);
137 : int ixgbe_rxeof(struct ix_queue *);
138 : void ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
139 : void ixgbe_iff(struct ix_softc *);
140 : #ifdef IX_DEBUG
141 : void ixgbe_print_hw_stats(struct ix_softc *);
142 : #endif
143 : void ixgbe_update_link_status(struct ix_softc *);
144 : int ixgbe_get_buf(struct rx_ring *, int);
145 : int ixgbe_encap(struct tx_ring *, struct mbuf *);
146 : int ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
147 : struct ixgbe_dma_alloc *, int);
148 : void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
149 : int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
150 : uint32_t *);
151 : int ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
152 : uint32_t *);
153 : void ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
154 : void ixgbe_configure_ivars(struct ix_softc *);
155 : uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
156 :
157 : void ixgbe_setup_vlan_hw_support(struct ix_softc *);
158 :
159 : /* Support for pluggable optic modules */
160 : void ixgbe_setup_optics(struct ix_softc *);
161 : void ixgbe_handle_mod(struct ix_softc *);
162 : void ixgbe_handle_msf(struct ix_softc *);
163 : void ixgbe_handle_phy(struct ix_softc *);
164 :
165 : /* Legacy (single vector interrupt handler */
166 : int ixgbe_intr(void *);
167 : void ixgbe_enable_queue(struct ix_softc *, uint32_t);
168 : void ixgbe_disable_queue(struct ix_softc *, uint32_t);
169 : void ixgbe_rearm_queue(struct ix_softc *, uint32_t);
170 :
171 : /*********************************************************************
172 : * OpenBSD Device Interface Entry Points
173 : *********************************************************************/
174 :
175 : struct cfdriver ix_cd = {
176 : NULL, "ix", DV_IFNET
177 : };
178 :
179 : struct cfattach ix_ca = {
180 : sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
181 : };
182 :
183 : int ixgbe_smart_speed = ixgbe_smart_speed_on;
184 :
185 : /*********************************************************************
186 : * Device identification routine
187 : *
188 : * ixgbe_probe determines if the driver should be loaded on
189 : * adapter based on PCI vendor/device id of the adapter.
190 : *
191 : * return 0 on success, positive on failure
192 : *********************************************************************/
193 :
194 : int
195 0 : ixgbe_probe(struct device *parent, void *match, void *aux)
196 : {
197 : INIT_DEBUGOUT("ixgbe_probe: begin");
198 :
199 0 : return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
200 : nitems(ixgbe_devices)));
201 : }
202 :
203 : /*********************************************************************
204 : * Device initialization routine
205 : *
206 : * The attach entry point is called when the driver is being loaded.
207 : * This routine identifies the type of hardware, allocates all resources
208 : * and initializes the hardware.
209 : *
210 : * return 0 on success, positive on failure
211 : *********************************************************************/
212 :
213 : void
214 0 : ixgbe_attach(struct device *parent, struct device *self, void *aux)
215 : {
216 0 : struct pci_attach_args *pa = (struct pci_attach_args *)aux;
217 0 : struct ix_softc *sc = (struct ix_softc *)self;
218 : int error = 0;
219 0 : uint16_t csum;
220 : uint32_t ctrl_ext;
221 0 : struct ixgbe_hw *hw = &sc->hw;
222 :
223 : INIT_DEBUGOUT("ixgbe_attach: begin");
224 :
225 0 : sc->osdep.os_sc = sc;
226 0 : sc->osdep.os_pa = *pa;
227 :
228 : /* Set up the timer callout */
229 0 : timeout_set(&sc->timer, ixgbe_local_timer, sc);
230 0 : timeout_set(&sc->rx_refill, ixgbe_rxrefill, sc);
231 :
232 : /* Determine hardware revision */
233 0 : ixgbe_identify_hardware(sc);
234 :
235 : /* Indicate to RX setup to use Jumbo Clusters */
236 0 : sc->num_tx_desc = DEFAULT_TXD;
237 0 : sc->num_rx_desc = DEFAULT_RXD;
238 :
239 : /* Do base PCI setup - map BAR0 */
240 0 : if (ixgbe_allocate_pci_resources(sc))
241 : goto err_out;
242 :
243 : /* Allocate our TX/RX Queues */
244 0 : if (ixgbe_allocate_queues(sc))
245 : goto err_out;
246 :
247 : /* Allocate multicast array memory. */
248 0 : sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
249 : MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
250 0 : if (sc->mta == NULL) {
251 0 : printf(": Can not allocate multicast setup array\n");
252 0 : goto err_late;
253 : }
254 :
255 : /* Initialize the shared code */
256 0 : error = ixgbe_init_shared_code(hw);
257 0 : if (error) {
258 0 : printf(": Unable to initialize the shared code\n");
259 0 : goto err_late;
260 : }
261 :
262 : /* Make sure we have a good EEPROM before we read from it */
263 0 : if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
264 0 : printf(": The EEPROM Checksum Is Not Valid\n");
265 0 : goto err_late;
266 : }
267 :
268 0 : error = ixgbe_init_hw(hw);
269 0 : if (error == IXGBE_ERR_EEPROM_VERSION) {
270 0 : printf(": This device is a pre-production adapter/"
271 : "LOM. Please be aware there may be issues associated "
272 : "with your hardware.\nIf you are experiencing problems "
273 : "please contact your Intel or hardware representative "
274 : "who provided you with this hardware.\n");
275 0 : } else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
276 0 : error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
277 0 : printf(": Hardware Initialization Failure\n");
278 0 : goto err_late;
279 : }
280 :
281 : /* Detect and set physical type */
282 0 : ixgbe_setup_optics(sc);
283 :
284 0 : bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
285 : IXGBE_ETH_LENGTH_OF_ADDRESS);
286 :
287 0 : error = ixgbe_allocate_legacy(sc);
288 0 : if (error)
289 : goto err_late;
290 :
291 : /* Enable the optics for 82599 SFP+ fiber */
292 0 : if (sc->hw.mac.ops.enable_tx_laser)
293 0 : sc->hw.mac.ops.enable_tx_laser(&sc->hw);
294 :
295 : /* Enable power to the phy */
296 0 : if (hw->phy.ops.set_phy_power)
297 0 : hw->phy.ops.set_phy_power(&sc->hw, TRUE);
298 :
299 : /* Setup OS specific network interface */
300 0 : ixgbe_setup_interface(sc);
301 :
302 : /* Initialize statistics */
303 0 : ixgbe_update_stats_counters(sc);
304 :
305 : /* Get the PCI-E bus info and determine LAN ID */
306 0 : hw->mac.ops.get_bus_info(hw);
307 :
308 : /* Set an initial default flow control value */
309 0 : sc->fc = ixgbe_fc_full;
310 :
311 : /* let hardware know driver is loaded */
312 0 : ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
313 0 : ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
314 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
315 :
316 0 : printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
317 :
318 : INIT_DEBUGOUT("ixgbe_attach: end");
319 0 : return;
320 :
321 : err_late:
322 0 : ixgbe_free_transmit_structures(sc);
323 0 : ixgbe_free_receive_structures(sc);
324 : err_out:
325 0 : ixgbe_free_pci_resources(sc);
326 0 : free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
327 : MAX_NUM_MULTICAST_ADDRESSES);
328 0 : }
329 :
330 : /*********************************************************************
331 : * Device removal routine
332 : *
333 : * The detach entry point is called when the driver is being removed.
334 : * This routine stops the adapter and deallocates all the resources
335 : * that were allocated for driver operation.
336 : *
337 : * return 0 on success, positive on failure
338 : *********************************************************************/
339 :
340 : int
341 0 : ixgbe_detach(struct device *self, int flags)
342 : {
343 0 : struct ix_softc *sc = (struct ix_softc *)self;
344 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
345 : uint32_t ctrl_ext;
346 :
347 : INIT_DEBUGOUT("ixgbe_detach: begin");
348 :
349 0 : ixgbe_stop(sc);
350 :
351 : /* let hardware know driver is unloading */
352 0 : ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
353 0 : ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
354 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
355 :
356 0 : ether_ifdetach(ifp);
357 0 : if_detach(ifp);
358 :
359 0 : timeout_del(&sc->timer);
360 0 : timeout_del(&sc->rx_refill);
361 0 : ixgbe_free_pci_resources(sc);
362 :
363 0 : ixgbe_free_transmit_structures(sc);
364 0 : ixgbe_free_receive_structures(sc);
365 0 : free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
366 : MAX_NUM_MULTICAST_ADDRESSES);
367 :
368 0 : return (0);
369 : }
370 :
371 : /*********************************************************************
372 : * Transmit entry point
373 : *
374 : * ixgbe_start is called by the stack to initiate a transmit.
375 : * The driver will remain in this routine as long as there are
376 : * packets to transmit and transmit resources are available.
377 : * In case resources are not available stack is notified and
378 : * the packet is requeued.
379 : **********************************************************************/
380 :
381 : void
382 0 : ixgbe_start(struct ifqueue *ifq)
383 : {
384 0 : struct ifnet *ifp = ifq->ifq_if;
385 0 : struct ix_softc *sc = ifp->if_softc;
386 0 : struct tx_ring *txr = sc->tx_rings;
387 : struct mbuf *m_head;
388 : int post = 0;
389 :
390 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(ifq))
391 0 : return;
392 0 : if (!sc->link_up)
393 0 : return;
394 :
395 0 : bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
396 : txr->txdma.dma_map->dm_mapsize,
397 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
398 :
399 0 : for (;;) {
400 : /* Check that we have the minimal number of TX descriptors. */
401 0 : if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
402 0 : ifq_set_oactive(ifq);
403 0 : break;
404 : }
405 :
406 0 : m_head = ifq_dequeue(ifq);
407 0 : if (m_head == NULL)
408 : break;
409 :
410 0 : if (ixgbe_encap(txr, m_head)) {
411 0 : m_freem(m_head);
412 0 : continue;
413 : }
414 :
415 : #if NBPFILTER > 0
416 0 : if (ifp->if_bpf)
417 0 : bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
418 : #endif
419 :
420 : /* Set timeout in case hardware has problems transmitting */
421 0 : txr->watchdog_timer = IXGBE_TX_TIMEOUT;
422 0 : ifp->if_timer = IXGBE_TX_TIMEOUT;
423 :
424 : post = 1;
425 : }
426 :
427 0 : bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
428 : 0, txr->txdma.dma_map->dm_mapsize,
429 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
430 :
431 : /*
432 : * Advance the Transmit Descriptor Tail (Tdt), this tells the
433 : * hardware that this frame is available to transmit.
434 : */
435 0 : if (post)
436 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
437 : txr->next_avail_desc);
438 0 : }
439 :
440 : /*********************************************************************
441 : * Ioctl entry point
442 : *
443 : * ixgbe_ioctl is called when the user wants to configure the
444 : * interface.
445 : *
446 : * return 0 on success, positive on failure
447 : **********************************************************************/
448 :
449 : int
450 0 : ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
451 : {
452 0 : struct ix_softc *sc = ifp->if_softc;
453 0 : struct ifreq *ifr = (struct ifreq *) data;
454 : int s, error = 0;
455 :
456 0 : s = splnet();
457 :
458 0 : switch (command) {
459 : case SIOCSIFADDR:
460 : IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
461 0 : ifp->if_flags |= IFF_UP;
462 0 : if (!(ifp->if_flags & IFF_RUNNING))
463 0 : ixgbe_init(sc);
464 : break;
465 :
466 : case SIOCSIFFLAGS:
467 : IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
468 0 : if (ifp->if_flags & IFF_UP) {
469 0 : if (ifp->if_flags & IFF_RUNNING)
470 0 : error = ENETRESET;
471 : else
472 0 : ixgbe_init(sc);
473 : } else {
474 0 : if (ifp->if_flags & IFF_RUNNING)
475 0 : ixgbe_stop(sc);
476 : }
477 : break;
478 :
479 : case SIOCSIFMEDIA:
480 : case SIOCGIFMEDIA:
481 : IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
482 0 : error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
483 0 : break;
484 :
485 : case SIOCGIFRXR:
486 0 : error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
487 0 : break;
488 :
489 : default:
490 0 : error = ether_ioctl(ifp, &sc->arpcom, command, data);
491 0 : }
492 :
493 0 : if (error == ENETRESET) {
494 0 : if (ifp->if_flags & IFF_RUNNING) {
495 0 : ixgbe_disable_intr(sc);
496 0 : ixgbe_iff(sc);
497 0 : ixgbe_enable_intr(sc);
498 0 : }
499 : error = 0;
500 0 : }
501 :
502 0 : splx(s);
503 0 : return (error);
504 : }
505 :
506 : int
507 0 : ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
508 : {
509 0 : struct if_rxring_info *ifr, ifr1;
510 : struct rx_ring *rxr;
511 : int error, i;
512 : u_int n = 0;
513 :
514 0 : if (sc->num_queues > 1) {
515 0 : if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
516 0 : M_WAITOK | M_ZERO)) == NULL)
517 0 : return (ENOMEM);
518 : } else
519 : ifr = &ifr1;
520 :
521 0 : for (i = 0; i < sc->num_queues; i++) {
522 0 : rxr = &sc->rx_rings[i];
523 0 : ifr[n].ifr_size = sc->rx_mbuf_sz;
524 0 : snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
525 0 : ifr[n].ifr_info = rxr->rx_ring;
526 0 : n++;
527 : }
528 :
529 0 : error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
530 :
531 0 : if (sc->num_queues > 1)
532 0 : free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
533 0 : return (error);
534 0 : }
535 :
536 : /*********************************************************************
537 : * Watchdog entry point
538 : *
539 : **********************************************************************/
540 :
541 : void
542 0 : ixgbe_watchdog(struct ifnet * ifp)
543 : {
544 0 : struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
545 0 : struct tx_ring *txr = sc->tx_rings;
546 0 : struct ixgbe_hw *hw = &sc->hw;
547 : int tx_hang = FALSE;
548 : int i;
549 :
550 : /*
551 : * The timer is set to 5 every time ixgbe_start() queues a packet.
552 : * Anytime all descriptors are clean the timer is set to 0.
553 : */
554 0 : for (i = 0; i < sc->num_queues; i++, txr++) {
555 0 : if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
556 : continue;
557 : else {
558 : tx_hang = TRUE;
559 0 : break;
560 : }
561 : }
562 0 : if (tx_hang == FALSE)
563 0 : return;
564 :
565 : /*
566 : * If we are in this routine because of pause frames, then don't
567 : * reset the hardware.
568 : */
569 0 : if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
570 0 : for (i = 0; i < sc->num_queues; i++, txr++)
571 0 : txr->watchdog_timer = IXGBE_TX_TIMEOUT;
572 0 : ifp->if_timer = IXGBE_TX_TIMEOUT;
573 0 : return;
574 : }
575 :
576 :
577 0 : printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
578 0 : for (i = 0; i < sc->num_queues; i++, txr++) {
579 0 : printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
580 0 : IXGBE_READ_REG(hw, IXGBE_TDH(i)),
581 0 : IXGBE_READ_REG(hw, IXGBE_TDT(i)));
582 0 : printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
583 0 : i, txr->tx_avail, txr->next_to_clean);
584 : }
585 0 : ifp->if_flags &= ~IFF_RUNNING;
586 0 : sc->watchdog_events++;
587 :
588 0 : ixgbe_init(sc);
589 0 : }
590 :
591 : /*********************************************************************
592 : * Init entry point
593 : *
594 : * This routine is used in two ways. It is used by the stack as
595 : * init entry point in network interface structure. It is also used
596 : * by the driver as a hw/sw initialization routine to get to a
597 : * consistent state.
598 : *
599 : * return 0 on success, positive on failure
600 : **********************************************************************/
601 : #define IXGBE_MHADD_MFS_SHIFT 16
602 :
603 : void
604 0 : ixgbe_init(void *arg)
605 : {
606 0 : struct ix_softc *sc = (struct ix_softc *)arg;
607 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
608 0 : struct rx_ring *rxr = sc->rx_rings;
609 : uint32_t k, txdctl, rxdctl, rxctrl, mhadd, itr;
610 : int i, s, err;
611 :
612 : INIT_DEBUGOUT("ixgbe_init: begin");
613 :
614 0 : s = splnet();
615 :
616 0 : ixgbe_stop(sc);
617 :
618 : /* reprogram the RAR[0] in case user changed it. */
619 0 : ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
620 :
621 : /* Get the latest mac address, User can use a LAA */
622 0 : bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
623 : IXGBE_ETH_LENGTH_OF_ADDRESS);
624 0 : ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
625 0 : sc->hw.addr_ctrl.rar_used_count = 1;
626 :
627 : /* Prepare transmit descriptors and buffers */
628 0 : if (ixgbe_setup_transmit_structures(sc)) {
629 0 : printf("%s: Could not setup transmit structures\n",
630 0 : ifp->if_xname);
631 0 : ixgbe_stop(sc);
632 0 : splx(s);
633 0 : return;
634 : }
635 :
636 0 : ixgbe_init_hw(&sc->hw);
637 0 : ixgbe_initialize_transmit_units(sc);
638 :
639 : /* Use 2k clusters, even for jumbo frames */
640 0 : sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
641 :
642 : /* Prepare receive descriptors and buffers */
643 0 : if (ixgbe_setup_receive_structures(sc)) {
644 0 : printf("%s: Could not setup receive structures\n",
645 0 : ifp->if_xname);
646 0 : ixgbe_stop(sc);
647 0 : splx(s);
648 0 : return;
649 : }
650 :
651 : /* Configure RX settings */
652 0 : ixgbe_initialize_receive_units(sc);
653 :
654 : /* Enable SDP & MSIX interrupts based on adapter */
655 0 : ixgbe_config_gpie(sc);
656 :
657 : /* Program promiscuous mode and multicast filters. */
658 0 : ixgbe_iff(sc);
659 :
660 : /* Set MRU size */
661 0 : mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
662 0 : mhadd &= ~IXGBE_MHADD_MFS_MASK;
663 0 : mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
664 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
665 :
666 : /* Now enable all the queues */
667 0 : for (i = 0; i < sc->num_queues; i++) {
668 0 : txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
669 0 : txdctl |= IXGBE_TXDCTL_ENABLE;
670 : /* Set WTHRESH to 8, burst writeback */
671 0 : txdctl |= (8 << 16);
672 : /*
673 : * When the internal queue falls below PTHRESH (16),
674 : * start prefetching as long as there are at least
675 : * HTHRESH (1) buffers ready.
676 : */
677 0 : txdctl |= (16 << 0) | (1 << 8);
678 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
679 : }
680 :
681 0 : for (i = 0; i < sc->num_queues; i++) {
682 0 : rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
683 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB) {
684 : /*
685 : * PTHRESH = 21
686 : * HTHRESH = 4
687 : * WTHRESH = 8
688 : */
689 0 : rxdctl &= ~0x3FFFFF;
690 0 : rxdctl |= 0x080420;
691 0 : }
692 0 : rxdctl |= IXGBE_RXDCTL_ENABLE;
693 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
694 0 : for (k = 0; k < 10; k++) {
695 0 : if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
696 : IXGBE_RXDCTL_ENABLE)
697 : break;
698 : else
699 0 : msec_delay(1);
700 : }
701 0 : IXGBE_WRITE_FLUSH(&sc->hw);
702 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
703 : }
704 :
705 : /* Set up VLAN support and filter */
706 0 : ixgbe_setup_vlan_hw_support(sc);
707 :
708 : /* Enable Receive engine */
709 0 : rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
710 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB)
711 0 : rxctrl |= IXGBE_RXCTRL_DMBYPS;
712 0 : rxctrl |= IXGBE_RXCTRL_RXEN;
713 0 : sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
714 :
715 0 : timeout_add_sec(&sc->timer, 1);
716 :
717 : /* Set up MSI/X routing */
718 0 : if (sc->msix > 1) {
719 0 : ixgbe_configure_ivars(sc);
720 : /* Set up auto-mask */
721 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB)
722 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
723 : else {
724 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
725 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
726 : }
727 : } else { /* Simple settings for Legacy/MSI */
728 0 : ixgbe_set_ivar(sc, 0, 0, 0);
729 0 : ixgbe_set_ivar(sc, 0, 0, 1);
730 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
731 : }
732 :
733 : /* Check on any SFP devices that need to be kick-started */
734 0 : if (sc->hw.phy.type == ixgbe_phy_none) {
735 0 : err = sc->hw.phy.ops.identify(&sc->hw);
736 0 : if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
737 0 : printf("Unsupported SFP+ module type was detected.\n");
738 0 : splx(s);
739 0 : return;
740 : }
741 : }
742 :
743 : /* Setup interrupt moderation */
744 : itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
745 0 : if (sc->hw.mac.type != ixgbe_mac_82598EB)
746 0 : itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
747 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
748 :
749 : /* Enable power to the phy */
750 0 : if (sc->hw.phy.ops.set_phy_power)
751 0 : sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
752 :
753 : /* Config/Enable Link */
754 0 : ixgbe_config_link(sc);
755 :
756 : /* Hardware Packet Buffer & Flow Control setup */
757 0 : ixgbe_config_delay_values(sc);
758 :
759 : /* Initialize the FC settings */
760 0 : sc->hw.mac.ops.start_hw(&sc->hw);
761 :
762 : /* And now turn on interrupts */
763 0 : ixgbe_enable_intr(sc);
764 :
765 : /* Now inform the stack we're ready */
766 0 : ifp->if_flags |= IFF_RUNNING;
767 0 : ifq_clr_oactive(&ifp->if_snd);
768 :
769 0 : splx(s);
770 0 : }
771 :
772 : void
773 0 : ixgbe_config_gpie(struct ix_softc *sc)
774 : {
775 0 : struct ixgbe_hw *hw = &sc->hw;
776 : uint32_t gpie;
777 :
778 0 : gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
779 :
780 : /* Fan Failure Interrupt */
781 0 : if (hw->device_id == IXGBE_DEV_ID_82598AT)
782 0 : gpie |= IXGBE_SDP1_GPIEN;
783 :
784 0 : if (sc->hw.mac.type == ixgbe_mac_82599EB) {
785 : /* Add for Module detection */
786 0 : gpie |= IXGBE_SDP2_GPIEN;
787 :
788 : /* Media ready */
789 0 : if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
790 0 : gpie |= IXGBE_SDP1_GPIEN;
791 :
792 : /*
793 : * Set LL interval to max to reduce the number of low latency
794 : * interrupts hitting the card when the ring is getting full.
795 : */
796 0 : gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
797 0 : }
798 :
799 0 : if (sc->hw.mac.type == ixgbe_mac_X540 ||
800 0 : hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
801 0 : hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
802 : /*
803 : * Thermal Failure Detection (X540)
804 : * Link Detection (X552 SFP+, X552/X557-AT)
805 : */
806 0 : gpie |= IXGBE_SDP0_GPIEN_X540;
807 :
808 : /*
809 : * Set LL interval to max to reduce the number of low latency
810 : * interrupts hitting the card when the ring is getting full.
811 : */
812 0 : gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
813 0 : }
814 :
815 0 : if (sc->msix > 1) {
816 : /* Enable Enhanced MSIX mode */
817 0 : gpie |= IXGBE_GPIE_MSIX_MODE;
818 0 : gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
819 : IXGBE_GPIE_OCD;
820 0 : }
821 :
822 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
823 0 : }
824 :
825 : /*
826 : * Requires sc->max_frame_size to be set.
827 : */
828 : void
829 0 : ixgbe_config_delay_values(struct ix_softc *sc)
830 : {
831 0 : struct ixgbe_hw *hw = &sc->hw;
832 : uint32_t rxpb, frame, size, tmp;
833 :
834 0 : frame = sc->max_frame_size;
835 :
836 : /* Calculate High Water */
837 0 : switch (hw->mac.type) {
838 : case ixgbe_mac_X540:
839 : case ixgbe_mac_X550:
840 : case ixgbe_mac_X550EM_x:
841 0 : tmp = IXGBE_DV_X540(frame, frame);
842 0 : break;
843 : default:
844 0 : tmp = IXGBE_DV(frame, frame);
845 0 : break;
846 : }
847 0 : size = IXGBE_BT2KB(tmp);
848 0 : rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
849 0 : hw->fc.high_water[0] = rxpb - size;
850 :
851 : /* Now calculate Low Water */
852 0 : switch (hw->mac.type) {
853 : case ixgbe_mac_X540:
854 : case ixgbe_mac_X550:
855 : case ixgbe_mac_X550EM_x:
856 0 : tmp = IXGBE_LOW_DV_X540(frame);
857 0 : break;
858 : default:
859 0 : tmp = IXGBE_LOW_DV(frame);
860 0 : break;
861 : }
862 0 : hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
863 :
864 0 : hw->fc.requested_mode = sc->fc;
865 0 : hw->fc.pause_time = IXGBE_FC_PAUSE;
866 0 : hw->fc.send_xon = TRUE;
867 0 : }
868 :
869 : /*
870 : * MSIX Interrupt Handlers
871 : */
872 : void
873 0 : ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
874 : {
875 0 : uint64_t queue = 1ULL << vector;
876 : uint32_t mask;
877 :
878 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB) {
879 0 : mask = (IXGBE_EIMS_RTX_QUEUE & queue);
880 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
881 0 : } else {
882 0 : mask = (queue & 0xFFFFFFFF);
883 0 : if (mask)
884 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
885 0 : mask = (queue >> 32);
886 0 : if (mask)
887 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
888 : }
889 0 : }
890 :
891 : void
892 0 : ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
893 : {
894 0 : uint64_t queue = 1ULL << vector;
895 : uint32_t mask;
896 :
897 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB) {
898 0 : mask = (IXGBE_EIMS_RTX_QUEUE & queue);
899 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
900 0 : } else {
901 0 : mask = (queue & 0xFFFFFFFF);
902 0 : if (mask)
903 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
904 0 : mask = (queue >> 32);
905 0 : if (mask)
906 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
907 : }
908 0 : }
909 :
910 : /*********************************************************************
911 : *
912 : * Legacy Interrupt Service routine
913 : *
914 : **********************************************************************/
915 :
916 : int
917 0 : ixgbe_intr(void *arg)
918 : {
919 0 : struct ix_softc *sc = (struct ix_softc *)arg;
920 0 : struct ix_queue *que = sc->queues;
921 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
922 0 : struct tx_ring *txr = sc->tx_rings;
923 0 : struct ixgbe_hw *hw = &sc->hw;
924 : uint32_t reg_eicr, mod_mask, msf_mask;
925 : int i, refill = 0;
926 :
927 0 : reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
928 0 : if (reg_eicr == 0) {
929 0 : ixgbe_enable_intr(sc);
930 0 : return (0);
931 : }
932 :
933 0 : if (ISSET(ifp->if_flags, IFF_RUNNING)) {
934 0 : ixgbe_rxeof(que);
935 0 : ixgbe_txeof(txr);
936 : refill = 1;
937 0 : }
938 :
939 0 : if (refill) {
940 0 : if (ixgbe_rxfill(que->rxr)) {
941 : /* Advance the Rx Queue "Tail Pointer" */
942 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
943 : que->rxr->last_desc_filled);
944 0 : } else
945 0 : timeout_add(&sc->rx_refill, 1);
946 : }
947 :
948 : /* Link status change */
949 0 : if (reg_eicr & IXGBE_EICR_LSC) {
950 0 : KERNEL_LOCK();
951 0 : ixgbe_update_link_status(sc);
952 0 : KERNEL_UNLOCK();
953 0 : ifq_start(&ifp->if_snd);
954 0 : }
955 :
956 0 : if (hw->mac.type != ixgbe_mac_82598EB) {
957 0 : if (reg_eicr & IXGBE_EICR_ECC) {
958 0 : printf("%s: CRITICAL: ECC ERROR!! "
959 0 : "Please Reboot!!\n", sc->dev.dv_xname);
960 0 : IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
961 0 : }
962 : /* Check for over temp condition */
963 0 : if (reg_eicr & IXGBE_EICR_TS) {
964 0 : printf("%s: CRITICAL: OVER TEMP!! "
965 0 : "PHY IS SHUT DOWN!!\n", ifp->if_xname);
966 0 : IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
967 0 : }
968 : }
969 :
970 : /* Pluggable optics-related interrupt */
971 0 : if (ixgbe_is_sfp(hw)) {
972 0 : if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
973 : mod_mask = IXGBE_EICR_GPI_SDP0_X540;
974 : msf_mask = IXGBE_EICR_GPI_SDP1_X540;
975 0 : } else if (hw->mac.type == ixgbe_mac_X540 ||
976 0 : hw->mac.type == ixgbe_mac_X550 ||
977 0 : hw->mac.type == ixgbe_mac_X550EM_x) {
978 : mod_mask = IXGBE_EICR_GPI_SDP2_X540;
979 : msf_mask = IXGBE_EICR_GPI_SDP1_X540;
980 0 : } else {
981 : mod_mask = IXGBE_EICR_GPI_SDP2;
982 : msf_mask = IXGBE_EICR_GPI_SDP1;
983 : }
984 0 : if (reg_eicr & mod_mask) {
985 : /* Clear the interrupt */
986 0 : IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
987 0 : KERNEL_LOCK();
988 0 : ixgbe_handle_mod(sc);
989 0 : KERNEL_UNLOCK();
990 0 : } else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
991 0 : (reg_eicr & msf_mask)) {
992 : /* Clear the interrupt */
993 0 : IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
994 0 : KERNEL_LOCK();
995 0 : ixgbe_handle_msf(sc);
996 0 : KERNEL_UNLOCK();
997 0 : }
998 : }
999 :
1000 : /* Check for fan failure */
1001 0 : if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1002 0 : (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1003 0 : printf("%s: CRITICAL: FAN FAILURE!! "
1004 0 : "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1005 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1006 0 : }
1007 :
1008 : /* External PHY interrupt */
1009 0 : if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1010 0 : (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1011 : /* Clear the interrupt */
1012 0 : IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1013 0 : KERNEL_LOCK();
1014 0 : ixgbe_handle_phy(sc);
1015 0 : KERNEL_UNLOCK();
1016 0 : }
1017 :
1018 0 : for (i = 0; i < sc->num_queues; i++, que++)
1019 0 : ixgbe_enable_queue(sc, que->msix);
1020 :
1021 0 : return (1);
1022 0 : }
1023 :
1024 : /*********************************************************************
1025 : *
1026 : * Media Ioctl callback
1027 : *
1028 : * This routine is called whenever the user queries the status of
1029 : * the interface using ifconfig.
1030 : *
1031 : **********************************************************************/
1032 : void
1033 0 : ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1034 : {
1035 0 : struct ix_softc *sc = ifp->if_softc;
1036 :
1037 0 : ifmr->ifm_active = IFM_ETHER;
1038 0 : ifmr->ifm_status = IFM_AVALID;
1039 :
1040 : INIT_DEBUGOUT("ixgbe_media_status: begin");
1041 0 : ixgbe_update_link_status(sc);
1042 :
1043 0 : if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1044 0 : ifmr->ifm_status |= IFM_ACTIVE;
1045 :
1046 0 : switch (sc->link_speed) {
1047 : case IXGBE_LINK_SPEED_100_FULL:
1048 0 : ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1049 0 : break;
1050 : case IXGBE_LINK_SPEED_1GB_FULL:
1051 0 : switch (sc->optics) {
1052 : case IFM_10G_SR: /* multi-speed fiber */
1053 0 : ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1054 0 : break;
1055 : case IFM_10G_LR: /* multi-speed fiber */
1056 0 : ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1057 0 : break;
1058 : default:
1059 0 : ifmr->ifm_active |= sc->optics | IFM_FDX;
1060 0 : break;
1061 : }
1062 : break;
1063 : case IXGBE_LINK_SPEED_10GB_FULL:
1064 0 : ifmr->ifm_active |= sc->optics | IFM_FDX;
1065 0 : break;
1066 : }
1067 :
1068 0 : switch (sc->hw.fc.current_mode) {
1069 : case ixgbe_fc_tx_pause:
1070 0 : ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1071 0 : break;
1072 : case ixgbe_fc_rx_pause:
1073 0 : ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1074 0 : break;
1075 : case ixgbe_fc_full:
1076 0 : ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1077 : IFM_ETH_TXPAUSE;
1078 0 : break;
1079 : default:
1080 0 : ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1081 : IFM_ETH_TXPAUSE);
1082 0 : break;
1083 : }
1084 : }
1085 0 : }
1086 :
1087 : /*********************************************************************
1088 : *
1089 : * Media Ioctl callback
1090 : *
1091 : * This routine is called when the user changes speed/duplex using
1092 : * media/mediopt option with ifconfig.
1093 : *
1094 : **********************************************************************/
1095 : int
1096 0 : ixgbe_media_change(struct ifnet *ifp)
1097 : {
1098 0 : struct ix_softc *sc = ifp->if_softc;
1099 0 : struct ixgbe_hw *hw = &sc->hw;
1100 0 : struct ifmedia *ifm = &sc->media;
1101 : ixgbe_link_speed speed = 0;
1102 :
1103 0 : if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1104 0 : return (EINVAL);
1105 :
1106 0 : if (hw->phy.media_type == ixgbe_media_type_backplane)
1107 0 : return (ENODEV);
1108 :
1109 0 : switch (IFM_SUBTYPE(ifm->ifm_media)) {
1110 : case IFM_AUTO:
1111 : case IFM_10G_T:
1112 0 : speed |= IXGBE_LINK_SPEED_100_FULL;
1113 : case IFM_10G_SR: /* KR, too */
1114 : case IFM_10G_LR:
1115 : case IFM_10G_CX4: /* KX4 */
1116 0 : speed |= IXGBE_LINK_SPEED_1GB_FULL;
1117 : case IFM_10G_SFP_CU:
1118 0 : speed |= IXGBE_LINK_SPEED_10GB_FULL;
1119 0 : break;
1120 : case IFM_1000_T:
1121 0 : speed |= IXGBE_LINK_SPEED_100_FULL;
1122 : case IFM_1000_LX:
1123 : case IFM_1000_SX:
1124 : case IFM_1000_CX: /* KX */
1125 0 : speed |= IXGBE_LINK_SPEED_1GB_FULL;
1126 0 : break;
1127 : case IFM_100_TX:
1128 : speed |= IXGBE_LINK_SPEED_100_FULL;
1129 0 : break;
1130 : default:
1131 0 : return (EINVAL);
1132 : }
1133 :
1134 0 : hw->mac.autotry_restart = TRUE;
1135 0 : hw->mac.ops.setup_link(hw, speed, TRUE);
1136 :
1137 0 : return (0);
1138 0 : }
1139 :
1140 : /*********************************************************************
1141 : *
1142 : * This routine maps the mbufs to tx descriptors, allowing the
1143 : * TX engine to transmit the packets.
1144 : * - return 0 on success, positive on failure
1145 : *
1146 : **********************************************************************/
1147 :
1148 : int
1149 0 : ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1150 : {
1151 0 : struct ix_softc *sc = txr->sc;
1152 0 : uint32_t olinfo_status = 0, cmd_type_len;
1153 : int i, j, error;
1154 : int first, last = 0;
1155 : bus_dmamap_t map;
1156 : struct ixgbe_tx_buf *txbuf;
1157 : union ixgbe_adv_tx_desc *txd = NULL;
1158 :
1159 : /* Basic descriptor defines */
1160 0 : cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1161 : IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1162 :
1163 : #if NVLAN > 0
1164 0 : if (m_head->m_flags & M_VLANTAG)
1165 0 : cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1166 : #endif
1167 :
1168 : /*
1169 : * Important to capture the first descriptor
1170 : * used because it will contain the index of
1171 : * the one we tell the hardware to report back
1172 : */
1173 0 : first = txr->next_avail_desc;
1174 0 : txbuf = &txr->tx_buffers[first];
1175 0 : map = txbuf->map;
1176 :
1177 : /*
1178 : * Map the packet for DMA.
1179 : */
1180 0 : error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m_head,
1181 : BUS_DMA_NOWAIT);
1182 0 : switch (error) {
1183 : case 0:
1184 : break;
1185 : case EFBIG:
1186 0 : if (m_defrag(m_head, M_NOWAIT) == 0 &&
1187 0 : (error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1188 0 : m_head, BUS_DMA_NOWAIT)) == 0)
1189 : break;
1190 : /* FALLTHROUGH */
1191 : default:
1192 0 : sc->no_tx_dma_setup++;
1193 0 : return (error);
1194 : }
1195 :
1196 : /* Make certain there are enough descriptors */
1197 0 : KASSERT(map->dm_nsegs <= txr->tx_avail - 2);
1198 :
1199 : /*
1200 : * Set the appropriate offload context
1201 : * this will becomes the first descriptor.
1202 : */
1203 0 : error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1204 0 : if (error)
1205 : goto xmit_fail;
1206 :
1207 0 : i = txr->next_avail_desc;
1208 0 : for (j = 0; j < map->dm_nsegs; j++) {
1209 0 : txbuf = &txr->tx_buffers[i];
1210 0 : txd = &txr->tx_base[i];
1211 :
1212 0 : txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1213 0 : txd->read.cmd_type_len = htole32(txr->txd_cmd |
1214 : cmd_type_len | map->dm_segs[j].ds_len);
1215 0 : txd->read.olinfo_status = htole32(olinfo_status);
1216 : last = i; /* descriptor that will get completion IRQ */
1217 :
1218 0 : if (++i == sc->num_tx_desc)
1219 : i = 0;
1220 :
1221 0 : txbuf->m_head = NULL;
1222 0 : txbuf->eop_index = -1;
1223 : }
1224 :
1225 0 : txd->read.cmd_type_len |=
1226 : htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1227 :
1228 0 : txbuf->m_head = m_head;
1229 : /*
1230 : * Here we swap the map so the last descriptor,
1231 : * which gets the completion interrupt has the
1232 : * real map, and the first descriptor gets the
1233 : * unused map from this descriptor.
1234 : */
1235 0 : txr->tx_buffers[first].map = txbuf->map;
1236 0 : txbuf->map = map;
1237 0 : bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1238 : BUS_DMASYNC_PREWRITE);
1239 :
1240 : /* Set the index of the descriptor that will be marked done */
1241 0 : txbuf = &txr->tx_buffers[first];
1242 0 : txbuf->eop_index = last;
1243 :
1244 0 : membar_producer();
1245 :
1246 0 : atomic_sub_int(&txr->tx_avail, map->dm_nsegs);
1247 0 : txr->next_avail_desc = i;
1248 :
1249 0 : ++txr->tx_packets;
1250 0 : return (0);
1251 :
1252 : xmit_fail:
1253 0 : bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1254 0 : return (error);
1255 0 : }
1256 :
1257 : void
1258 0 : ixgbe_iff(struct ix_softc *sc)
1259 : {
1260 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1261 : struct arpcom *ac = &sc->arpcom;
1262 : uint32_t fctrl;
1263 : uint8_t *mta;
1264 : uint8_t *update_ptr;
1265 : struct ether_multi *enm;
1266 : struct ether_multistep step;
1267 : int mcnt = 0;
1268 :
1269 : IOCTL_DEBUGOUT("ixgbe_iff: begin");
1270 :
1271 0 : mta = sc->mta;
1272 0 : bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1273 : MAX_NUM_MULTICAST_ADDRESSES);
1274 :
1275 0 : fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1276 0 : fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1277 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1278 :
1279 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1280 0 : ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1281 0 : ifp->if_flags |= IFF_ALLMULTI;
1282 0 : fctrl |= IXGBE_FCTRL_MPE;
1283 0 : if (ifp->if_flags & IFF_PROMISC)
1284 0 : fctrl |= IXGBE_FCTRL_UPE;
1285 : } else {
1286 0 : ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1287 0 : while (enm != NULL) {
1288 0 : bcopy(enm->enm_addrlo,
1289 0 : &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1290 : IXGBE_ETH_LENGTH_OF_ADDRESS);
1291 0 : mcnt++;
1292 :
1293 0 : ETHER_NEXT_MULTI(step, enm);
1294 : }
1295 :
1296 : update_ptr = mta;
1297 0 : sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1298 : ixgbe_mc_array_itr, TRUE);
1299 : }
1300 :
1301 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1302 0 : }
1303 :
1304 : /*
1305 : * This is an iterator function now needed by the multicast
1306 : * shared code. It simply feeds the shared code routine the
1307 : * addresses in the array of ixgbe_iff() one by one.
1308 : */
1309 : uint8_t *
1310 0 : ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1311 : {
1312 0 : uint8_t *addr = *update_ptr;
1313 : uint8_t *newptr;
1314 0 : *vmdq = 0;
1315 :
1316 0 : newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1317 0 : *update_ptr = newptr;
1318 0 : return addr;
1319 : }
1320 :
1321 : void
1322 0 : ixgbe_local_timer(void *arg)
1323 : {
1324 0 : struct ix_softc *sc = arg;
1325 : #ifdef IX_DEBUG
1326 : struct ifnet *ifp = &sc->arpcom.ac_if;
1327 : #endif
1328 : int s;
1329 :
1330 0 : s = splnet();
1331 :
1332 0 : ixgbe_update_stats_counters(sc);
1333 :
1334 : #ifdef IX_DEBUG
1335 : if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1336 : (IFF_RUNNING|IFF_DEBUG))
1337 : ixgbe_print_hw_stats(sc);
1338 : #endif
1339 :
1340 0 : timeout_add_sec(&sc->timer, 1);
1341 :
1342 0 : splx(s);
1343 0 : }
1344 :
1345 : void
1346 0 : ixgbe_update_link_status(struct ix_softc *sc)
1347 : {
1348 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1349 : int link_state = LINK_STATE_DOWN;
1350 :
1351 0 : ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1352 :
1353 0 : ifp->if_baudrate = 0;
1354 0 : if (sc->link_up) {
1355 : link_state = LINK_STATE_FULL_DUPLEX;
1356 :
1357 0 : switch (sc->link_speed) {
1358 : case IXGBE_LINK_SPEED_UNKNOWN:
1359 0 : ifp->if_baudrate = 0;
1360 0 : break;
1361 : case IXGBE_LINK_SPEED_100_FULL:
1362 0 : ifp->if_baudrate = IF_Mbps(100);
1363 0 : break;
1364 : case IXGBE_LINK_SPEED_1GB_FULL:
1365 0 : ifp->if_baudrate = IF_Gbps(1);
1366 0 : break;
1367 : case IXGBE_LINK_SPEED_10GB_FULL:
1368 0 : ifp->if_baudrate = IF_Gbps(10);
1369 0 : break;
1370 : }
1371 :
1372 : /* Update any Flow Control changes */
1373 0 : sc->hw.mac.ops.fc_enable(&sc->hw);
1374 0 : }
1375 0 : if (ifp->if_link_state != link_state) {
1376 0 : ifp->if_link_state = link_state;
1377 0 : if_link_state_change(ifp);
1378 0 : }
1379 0 : }
1380 :
1381 :
1382 : /*********************************************************************
1383 : *
1384 : * This routine disables all traffic on the adapter by issuing a
1385 : * global reset on the MAC and deallocates TX/RX buffers.
1386 : *
1387 : **********************************************************************/
1388 :
1389 : void
1390 0 : ixgbe_stop(void *arg)
1391 : {
1392 0 : struct ix_softc *sc = arg;
1393 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1394 :
1395 : /* Tell the stack that the interface is no longer active */
1396 0 : ifp->if_flags &= ~IFF_RUNNING;
1397 :
1398 : INIT_DEBUGOUT("ixgbe_stop: begin\n");
1399 0 : ixgbe_disable_intr(sc);
1400 :
1401 0 : sc->hw.mac.ops.reset_hw(&sc->hw);
1402 0 : sc->hw.adapter_stopped = FALSE;
1403 0 : sc->hw.mac.ops.stop_adapter(&sc->hw);
1404 0 : if (sc->hw.mac.type == ixgbe_mac_82599EB)
1405 0 : sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1406 : /* Turn off the laser */
1407 0 : if (sc->hw.mac.ops.disable_tx_laser)
1408 0 : sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1409 0 : timeout_del(&sc->timer);
1410 0 : timeout_del(&sc->rx_refill);
1411 :
1412 : /* reprogram the RAR[0] in case user changed it. */
1413 0 : ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1414 :
1415 0 : ifq_barrier(&ifp->if_snd);
1416 0 : intr_barrier(sc->tag);
1417 :
1418 0 : KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1419 :
1420 0 : ifq_clr_oactive(&ifp->if_snd);
1421 :
1422 : /* Should we really clear all structures on stop? */
1423 0 : ixgbe_free_transmit_structures(sc);
1424 0 : ixgbe_free_receive_structures(sc);
1425 0 : }
1426 :
1427 :
1428 : /*********************************************************************
1429 : *
1430 : * Determine hardware revision.
1431 : *
1432 : **********************************************************************/
1433 : void
1434 0 : ixgbe_identify_hardware(struct ix_softc *sc)
1435 : {
1436 0 : struct ixgbe_osdep *os = &sc->osdep;
1437 0 : struct pci_attach_args *pa = &os->os_pa;
1438 : uint32_t reg;
1439 :
1440 : /* Save off the information about this board */
1441 0 : sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1442 0 : sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1443 :
1444 0 : reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1445 0 : sc->hw.revision_id = PCI_REVISION(reg);
1446 :
1447 0 : reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1448 0 : sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1449 0 : sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1450 :
1451 : /* We need this here to set the num_segs below */
1452 0 : ixgbe_set_mac_type(&sc->hw);
1453 :
1454 : /* Pick up the 82599 and VF settings */
1455 0 : if (sc->hw.mac.type != ixgbe_mac_82598EB)
1456 0 : sc->hw.phy.smart_speed = ixgbe_smart_speed;
1457 0 : sc->num_segs = IXGBE_82599_SCATTER;
1458 0 : }
1459 :
1460 : /*********************************************************************
1461 : *
1462 : * Determine optic type
1463 : *
1464 : **********************************************************************/
1465 : void
1466 0 : ixgbe_setup_optics(struct ix_softc *sc)
1467 : {
1468 0 : struct ixgbe_hw *hw = &sc->hw;
1469 : int layer;
1470 :
1471 0 : layer = hw->mac.ops.get_supported_physical_layer(hw);
1472 :
1473 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1474 0 : sc->optics = IFM_10G_T;
1475 0 : else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1476 0 : sc->optics = IFM_1000_T;
1477 0 : else if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1478 0 : sc->optics = IFM_100_TX;
1479 0 : else if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1480 0 : layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1481 0 : sc->optics = IFM_10G_SFP_CU;
1482 0 : else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR ||
1483 0 : layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1484 0 : sc->optics = IFM_10G_LR;
1485 0 : else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
1486 0 : sc->optics = IFM_10G_SR;
1487 0 : else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1488 0 : layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1489 0 : sc->optics = IFM_10G_CX4;
1490 0 : else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1491 0 : sc->optics = IFM_1000_SX;
1492 0 : else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
1493 0 : sc->optics = IFM_1000_LX;
1494 : else
1495 0 : sc->optics = IFM_AUTO;
1496 0 : }
1497 :
1498 : /*********************************************************************
1499 : *
1500 : * Setup the Legacy or MSI Interrupt handler
1501 : *
1502 : **********************************************************************/
1503 : int
1504 0 : ixgbe_allocate_legacy(struct ix_softc *sc)
1505 : {
1506 0 : struct ixgbe_osdep *os = &sc->osdep;
1507 0 : struct pci_attach_args *pa = &os->os_pa;
1508 : const char *intrstr = NULL;
1509 0 : pci_chipset_tag_t pc = pa->pa_pc;
1510 0 : pci_intr_handle_t ih;
1511 :
1512 : /* We allocate a single interrupt resource */
1513 0 : if (pci_intr_map_msi(pa, &ih) != 0 &&
1514 0 : pci_intr_map(pa, &ih) != 0) {
1515 0 : printf(": couldn't map interrupt\n");
1516 0 : return (ENXIO);
1517 : }
1518 :
1519 : #if 0
1520 : /* XXX */
1521 : /* Tasklets for Link, SFP and Multispeed Fiber */
1522 : TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1523 : TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1524 : TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1525 : #endif
1526 :
1527 0 : intrstr = pci_intr_string(pc, ih);
1528 0 : sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1529 0 : ixgbe_intr, sc, sc->dev.dv_xname);
1530 0 : if (sc->tag == NULL) {
1531 0 : printf(": couldn't establish interrupt");
1532 0 : if (intrstr != NULL)
1533 0 : printf(" at %s", intrstr);
1534 0 : printf("\n");
1535 0 : return (ENXIO);
1536 : }
1537 0 : printf(": %s", intrstr);
1538 :
1539 : /* For simplicity in the handlers */
1540 0 : sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1541 :
1542 0 : return (0);
1543 0 : }
1544 :
1545 : int
1546 0 : ixgbe_allocate_pci_resources(struct ix_softc *sc)
1547 : {
1548 0 : struct ixgbe_osdep *os = &sc->osdep;
1549 0 : struct pci_attach_args *pa = &os->os_pa;
1550 : int val;
1551 :
1552 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1553 0 : if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1554 0 : printf(": mmba is not mem space\n");
1555 0 : return (ENXIO);
1556 : }
1557 :
1558 0 : if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1559 0 : &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1560 0 : printf(": cannot find mem space\n");
1561 0 : return (ENXIO);
1562 : }
1563 0 : sc->hw.hw_addr = (uint8_t *)os->os_membase;
1564 :
1565 : /* Legacy defaults */
1566 0 : sc->num_queues = 1;
1567 0 : sc->hw.back = os;
1568 :
1569 : #ifdef notyet
1570 : /* Now setup MSI or MSI/X, return us the number of supported vectors. */
1571 : sc->msix = ixgbe_setup_msix(sc);
1572 : #endif
1573 :
1574 0 : return (0);
1575 0 : }
1576 :
1577 : void
1578 0 : ixgbe_free_pci_resources(struct ix_softc * sc)
1579 : {
1580 0 : struct ixgbe_osdep *os = &sc->osdep;
1581 0 : struct pci_attach_args *pa = &os->os_pa;
1582 0 : struct ix_queue *que = sc->queues;
1583 : int i;
1584 :
1585 : /* Release all msix queue resources: */
1586 0 : for (i = 0; i < sc->num_queues; i++, que++) {
1587 0 : if (que->tag)
1588 0 : pci_intr_disestablish(pa->pa_pc, que->tag);
1589 0 : que->tag = NULL;
1590 : }
1591 :
1592 0 : if (sc->tag)
1593 0 : pci_intr_disestablish(pa->pa_pc, sc->tag);
1594 0 : sc->tag = NULL;
1595 0 : if (os->os_membase != 0)
1596 0 : bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1597 0 : os->os_membase = 0;
1598 0 : }
1599 :
1600 : /*********************************************************************
1601 : *
1602 : * Setup networking device structure and register an interface.
1603 : *
1604 : **********************************************************************/
1605 : void
1606 0 : ixgbe_setup_interface(struct ix_softc *sc)
1607 : {
1608 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1609 :
1610 0 : strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1611 0 : ifp->if_softc = sc;
1612 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1613 0 : ifp->if_xflags = IFXF_MPSAFE;
1614 0 : ifp->if_ioctl = ixgbe_ioctl;
1615 0 : ifp->if_qstart = ixgbe_start;
1616 0 : ifp->if_timer = 0;
1617 0 : ifp->if_watchdog = ixgbe_watchdog;
1618 0 : ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1619 : ETHER_HDR_LEN - ETHER_CRC_LEN;
1620 0 : IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1621 :
1622 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
1623 :
1624 : #if NVLAN > 0
1625 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1626 : #endif
1627 :
1628 : #ifdef IX_CSUM_OFFLOAD
1629 : ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1630 : #endif
1631 :
1632 : /*
1633 : * Specify the media types supported by this sc and register
1634 : * callbacks to update media and link information
1635 : */
1636 0 : ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1637 : ixgbe_media_status);
1638 0 : ixgbe_add_media_types(sc);
1639 0 : ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1640 :
1641 0 : if_attach(ifp);
1642 0 : ether_ifattach(ifp);
1643 :
1644 0 : sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1645 0 : }
1646 :
1647 : void
1648 0 : ixgbe_add_media_types(struct ix_softc *sc)
1649 : {
1650 0 : struct ixgbe_hw *hw = &sc->hw;
1651 : int layer;
1652 :
1653 0 : layer = hw->mac.ops.get_supported_physical_layer(hw);
1654 :
1655 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1656 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1657 0 : if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1658 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1659 0 : if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1660 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1661 0 : if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1662 0 : layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1663 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1664 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1665 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1666 0 : if (hw->phy.multispeed_fiber)
1667 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1668 : NULL);
1669 : }
1670 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1671 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1672 0 : if (hw->phy.multispeed_fiber)
1673 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1674 : NULL);
1675 0 : } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1676 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1677 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1678 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1679 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1680 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1681 0 : if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1682 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1683 0 : if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1684 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1685 :
1686 0 : if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1687 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
1688 : NULL);
1689 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1690 0 : }
1691 :
1692 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1693 0 : }
1694 :
1695 : void
1696 0 : ixgbe_config_link(struct ix_softc *sc)
1697 : {
1698 0 : uint32_t autoneg, err = 0;
1699 0 : bool negotiate;
1700 :
1701 0 : if (ixgbe_is_sfp(&sc->hw)) {
1702 0 : if (sc->hw.phy.multispeed_fiber) {
1703 0 : sc->hw.mac.ops.setup_sfp(&sc->hw);
1704 0 : if (sc->hw.mac.ops.enable_tx_laser)
1705 0 : sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1706 0 : ixgbe_handle_msf(sc);
1707 0 : } else
1708 0 : ixgbe_handle_mod(sc);
1709 : } else {
1710 0 : if (sc->hw.mac.ops.check_link)
1711 0 : err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1712 0 : &sc->link_up, FALSE);
1713 0 : if (err)
1714 0 : return;
1715 0 : autoneg = sc->hw.phy.autoneg_advertised;
1716 0 : if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1717 0 : err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1718 : &autoneg, &negotiate);
1719 0 : if (err)
1720 0 : return;
1721 0 : if (sc->hw.mac.ops.setup_link)
1722 0 : sc->hw.mac.ops.setup_link(&sc->hw,
1723 0 : autoneg, sc->link_up);
1724 : }
1725 0 : }
1726 :
1727 : /********************************************************************
1728 : * Manage DMA'able memory.
1729 : *******************************************************************/
1730 : int
1731 0 : ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1732 : struct ixgbe_dma_alloc *dma, int mapflags)
1733 : {
1734 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1735 0 : struct ixgbe_osdep *os = &sc->osdep;
1736 : int r;
1737 :
1738 0 : dma->dma_tag = os->os_pa.pa_dmat;
1739 0 : r = bus_dmamap_create(dma->dma_tag, size, 1,
1740 : size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1741 0 : if (r != 0) {
1742 0 : printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
1743 0 : "error %u\n", ifp->if_xname, r);
1744 0 : goto fail_0;
1745 : }
1746 :
1747 0 : r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1748 : 1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1749 0 : if (r != 0) {
1750 0 : printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1751 0 : "error %u\n", ifp->if_xname, r);
1752 0 : goto fail_1;
1753 : }
1754 :
1755 0 : r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1756 : &dma->dma_vaddr, BUS_DMA_NOWAIT);
1757 0 : if (r != 0) {
1758 0 : printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1759 0 : "error %u\n", ifp->if_xname, r);
1760 0 : goto fail_2;
1761 : }
1762 :
1763 0 : r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1764 : size, NULL, mapflags | BUS_DMA_NOWAIT);
1765 0 : if (r != 0) {
1766 0 : printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1767 0 : "error %u\n", ifp->if_xname, r);
1768 : goto fail_3;
1769 : }
1770 :
1771 0 : dma->dma_size = size;
1772 0 : return (0);
1773 : fail_3:
1774 0 : bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1775 : fail_2:
1776 0 : bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1777 : fail_1:
1778 0 : bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1779 : fail_0:
1780 0 : dma->dma_map = NULL;
1781 0 : dma->dma_tag = NULL;
1782 0 : return (r);
1783 0 : }
1784 :
1785 : void
1786 0 : ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1787 : {
1788 0 : if (dma->dma_tag == NULL)
1789 : return;
1790 :
1791 0 : if (dma->dma_map != NULL) {
1792 0 : bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1793 : dma->dma_map->dm_mapsize,
1794 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1795 0 : bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1796 0 : bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1797 0 : bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1798 0 : bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1799 0 : dma->dma_map = NULL;
1800 0 : }
1801 0 : }
1802 :
1803 :
1804 : /*********************************************************************
1805 : *
1806 : * Allocate memory for the transmit and receive rings, and then
1807 : * the descriptors associated with each, called only once at attach.
1808 : *
1809 : **********************************************************************/
1810 : int
1811 0 : ixgbe_allocate_queues(struct ix_softc *sc)
1812 : {
1813 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1814 : struct ix_queue *que;
1815 : struct tx_ring *txr;
1816 : struct rx_ring *rxr;
1817 : int rsize, tsize;
1818 : int txconf = 0, rxconf = 0, i;
1819 :
1820 : /* First allocate the top level queue structs */
1821 0 : if (!(sc->queues = mallocarray(sc->num_queues,
1822 : sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1823 0 : printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
1824 0 : goto fail;
1825 : }
1826 :
1827 : /* Then allocate the TX ring struct memory */
1828 0 : if (!(sc->tx_rings = mallocarray(sc->num_queues,
1829 : sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1830 0 : printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1831 0 : goto fail;
1832 : }
1833 :
1834 : /* Next allocate the RX */
1835 0 : if (!(sc->rx_rings = mallocarray(sc->num_queues,
1836 : sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1837 0 : printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1838 0 : goto rx_fail;
1839 : }
1840 :
1841 : /* For the ring itself */
1842 0 : tsize = roundup2(sc->num_tx_desc *
1843 : sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1844 :
1845 : /*
1846 : * Now set up the TX queues, txconf is needed to handle the
1847 : * possibility that things fail midcourse and we need to
1848 : * undo memory gracefully
1849 : */
1850 0 : for (i = 0; i < sc->num_queues; i++, txconf++) {
1851 : /* Set up some basics */
1852 0 : txr = &sc->tx_rings[i];
1853 0 : txr->sc = sc;
1854 0 : txr->me = i;
1855 :
1856 0 : if (ixgbe_dma_malloc(sc, tsize,
1857 0 : &txr->txdma, BUS_DMA_NOWAIT)) {
1858 0 : printf("%s: Unable to allocate TX Descriptor memory\n",
1859 0 : ifp->if_xname);
1860 0 : goto err_tx_desc;
1861 : }
1862 0 : txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1863 0 : bzero((void *)txr->tx_base, tsize);
1864 : }
1865 :
1866 : /*
1867 : * Next the RX queues...
1868 : */
1869 0 : rsize = roundup2(sc->num_rx_desc *
1870 : sizeof(union ixgbe_adv_rx_desc), 4096);
1871 0 : for (i = 0; i < sc->num_queues; i++, rxconf++) {
1872 0 : rxr = &sc->rx_rings[i];
1873 : /* Set up some basics */
1874 0 : rxr->sc = sc;
1875 0 : rxr->me = i;
1876 :
1877 0 : if (ixgbe_dma_malloc(sc, rsize,
1878 0 : &rxr->rxdma, BUS_DMA_NOWAIT)) {
1879 0 : printf("%s: Unable to allocate RxDescriptor memory\n",
1880 0 : ifp->if_xname);
1881 : goto err_rx_desc;
1882 : }
1883 0 : rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1884 0 : bzero((void *)rxr->rx_base, rsize);
1885 : }
1886 :
1887 : /*
1888 : * Finally set up the queue holding structs
1889 : */
1890 0 : for (i = 0; i < sc->num_queues; i++) {
1891 0 : que = &sc->queues[i];
1892 0 : que->sc = sc;
1893 0 : que->txr = &sc->tx_rings[i];
1894 0 : que->rxr = &sc->rx_rings[i];
1895 : }
1896 :
1897 0 : return (0);
1898 :
1899 : err_rx_desc:
1900 0 : for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1901 0 : ixgbe_dma_free(sc, &rxr->rxdma);
1902 : err_tx_desc:
1903 0 : for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1904 0 : ixgbe_dma_free(sc, &txr->txdma);
1905 0 : free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
1906 0 : sc->rx_rings = NULL;
1907 : rx_fail:
1908 0 : free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
1909 0 : sc->tx_rings = NULL;
1910 : fail:
1911 0 : return (ENOMEM);
1912 0 : }
1913 :
1914 : /*********************************************************************
1915 : *
1916 : * Allocate memory for tx_buffer structures. The tx_buffer stores all
1917 : * the information needed to transmit a packet on the wire. This is
1918 : * called only once at attach, setup is done every reset.
1919 : *
1920 : **********************************************************************/
1921 : int
1922 0 : ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1923 : {
1924 0 : struct ix_softc *sc = txr->sc;
1925 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
1926 : struct ixgbe_tx_buf *txbuf;
1927 : int error, i;
1928 :
1929 0 : if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
1930 : sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1931 0 : printf("%s: Unable to allocate tx_buffer memory\n",
1932 0 : ifp->if_xname);
1933 : error = ENOMEM;
1934 0 : goto fail;
1935 : }
1936 0 : txr->txtag = txr->txdma.dma_tag;
1937 :
1938 : /* Create the descriptor buffer dma maps */
1939 0 : for (i = 0; i < sc->num_tx_desc; i++) {
1940 0 : txbuf = &txr->tx_buffers[i];
1941 0 : error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1942 : sc->num_segs, PAGE_SIZE, 0,
1943 : BUS_DMA_NOWAIT, &txbuf->map);
1944 :
1945 0 : if (error != 0) {
1946 0 : printf("%s: Unable to create TX DMA map\n",
1947 0 : ifp->if_xname);
1948 0 : goto fail;
1949 : }
1950 : }
1951 :
1952 0 : return 0;
1953 : fail:
1954 0 : return (error);
1955 0 : }
1956 :
1957 : /*********************************************************************
1958 : *
1959 : * Initialize a transmit ring.
1960 : *
1961 : **********************************************************************/
1962 : int
1963 0 : ixgbe_setup_transmit_ring(struct tx_ring *txr)
1964 : {
1965 0 : struct ix_softc *sc = txr->sc;
1966 : int error;
1967 :
1968 : /* Now allocate transmit buffers for the ring */
1969 0 : if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
1970 0 : return (error);
1971 :
1972 : /* Clear the old ring contents */
1973 0 : bzero((void *)txr->tx_base,
1974 : (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1975 :
1976 : /* Reset indices */
1977 0 : txr->next_avail_desc = 0;
1978 0 : txr->next_to_clean = 0;
1979 :
1980 : /* Set number of descriptors available */
1981 0 : txr->tx_avail = sc->num_tx_desc;
1982 :
1983 0 : bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1984 : 0, txr->txdma.dma_map->dm_mapsize,
1985 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1986 :
1987 0 : return (0);
1988 0 : }
1989 :
1990 : /*********************************************************************
1991 : *
1992 : * Initialize all transmit rings.
1993 : *
1994 : **********************************************************************/
1995 : int
1996 0 : ixgbe_setup_transmit_structures(struct ix_softc *sc)
1997 : {
1998 0 : struct tx_ring *txr = sc->tx_rings;
1999 : int i, error;
2000 :
2001 0 : for (i = 0; i < sc->num_queues; i++, txr++) {
2002 0 : if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2003 : goto fail;
2004 : }
2005 :
2006 0 : return (0);
2007 : fail:
2008 0 : ixgbe_free_transmit_structures(sc);
2009 0 : return (error);
2010 0 : }
2011 :
2012 : /*********************************************************************
2013 : *
2014 : * Enable transmit unit.
2015 : *
2016 : **********************************************************************/
2017 : void
2018 0 : ixgbe_initialize_transmit_units(struct ix_softc *sc)
2019 : {
2020 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
2021 : struct tx_ring *txr;
2022 0 : struct ixgbe_hw *hw = &sc->hw;
2023 : int i;
2024 : uint64_t tdba;
2025 : uint32_t txctrl;
2026 :
2027 : /* Setup the Base and Length of the Tx Descriptor Ring */
2028 :
2029 0 : for (i = 0; i < sc->num_queues; i++) {
2030 0 : txr = &sc->tx_rings[i];
2031 :
2032 : /* Setup descriptor base address */
2033 0 : tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2034 0 : IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2035 : (tdba & 0x00000000ffffffffULL));
2036 0 : IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2037 0 : IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2038 : sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2039 :
2040 : /* Setup the HW Tx Head and Tail descriptor pointers */
2041 0 : IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2042 0 : IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2043 :
2044 : /* Setup Transmit Descriptor Cmd Settings */
2045 0 : txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2046 0 : txr->queue_status = IXGBE_QUEUE_IDLE;
2047 0 : txr->watchdog_timer = 0;
2048 :
2049 : /* Disable Head Writeback */
2050 0 : switch (hw->mac.type) {
2051 : case ixgbe_mac_82598EB:
2052 0 : txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2053 0 : break;
2054 : case ixgbe_mac_82599EB:
2055 : case ixgbe_mac_X540:
2056 : default:
2057 0 : txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2058 0 : break;
2059 : }
2060 0 : txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2061 0 : switch (hw->mac.type) {
2062 : case ixgbe_mac_82598EB:
2063 0 : IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2064 0 : break;
2065 : case ixgbe_mac_82599EB:
2066 : case ixgbe_mac_X540:
2067 : default:
2068 0 : IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2069 0 : break;
2070 : }
2071 : }
2072 0 : ifp->if_timer = 0;
2073 :
2074 0 : if (hw->mac.type != ixgbe_mac_82598EB) {
2075 : uint32_t dmatxctl, rttdcs;
2076 0 : dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2077 0 : dmatxctl |= IXGBE_DMATXCTL_TE;
2078 0 : IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2079 : /* Disable arbiter to set MTQC */
2080 0 : rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2081 0 : rttdcs |= IXGBE_RTTDCS_ARBDIS;
2082 0 : IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2083 0 : IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2084 0 : rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2085 0 : IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2086 0 : }
2087 0 : }
2088 :
2089 : /*********************************************************************
2090 : *
2091 : * Free all transmit rings.
2092 : *
2093 : **********************************************************************/
2094 : void
2095 0 : ixgbe_free_transmit_structures(struct ix_softc *sc)
2096 : {
2097 0 : struct tx_ring *txr = sc->tx_rings;
2098 : int i;
2099 :
2100 0 : for (i = 0; i < sc->num_queues; i++, txr++)
2101 0 : ixgbe_free_transmit_buffers(txr);
2102 0 : }
2103 :
2104 : /*********************************************************************
2105 : *
2106 : * Free transmit ring related data structures.
2107 : *
2108 : **********************************************************************/
2109 : void
2110 0 : ixgbe_free_transmit_buffers(struct tx_ring *txr)
2111 : {
2112 0 : struct ix_softc *sc = txr->sc;
2113 : struct ixgbe_tx_buf *tx_buffer;
2114 : int i;
2115 :
2116 : INIT_DEBUGOUT("free_transmit_ring: begin");
2117 :
2118 0 : if (txr->tx_buffers == NULL)
2119 0 : return;
2120 :
2121 : tx_buffer = txr->tx_buffers;
2122 0 : for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2123 0 : if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2124 0 : bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2125 : 0, tx_buffer->map->dm_mapsize,
2126 : BUS_DMASYNC_POSTWRITE);
2127 0 : bus_dmamap_unload(txr->txdma.dma_tag,
2128 : tx_buffer->map);
2129 0 : }
2130 0 : if (tx_buffer->m_head != NULL) {
2131 0 : m_freem(tx_buffer->m_head);
2132 0 : tx_buffer->m_head = NULL;
2133 0 : }
2134 0 : if (tx_buffer->map != NULL) {
2135 0 : bus_dmamap_destroy(txr->txdma.dma_tag,
2136 : tx_buffer->map);
2137 0 : tx_buffer->map = NULL;
2138 0 : }
2139 : }
2140 :
2141 0 : if (txr->tx_buffers != NULL)
2142 0 : free(txr->tx_buffers, M_DEVBUF,
2143 0 : sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2144 0 : txr->tx_buffers = NULL;
2145 0 : txr->txtag = NULL;
2146 0 : }
2147 :
2148 : /*********************************************************************
2149 : *
2150 : * Advanced Context Descriptor setup for VLAN or CSUM
2151 : *
2152 : **********************************************************************/
2153 :
2154 : int
2155 0 : ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2156 : uint32_t *cmd_type_len, uint32_t *olinfo_status)
2157 : {
2158 0 : struct ix_softc *sc = txr->sc;
2159 : struct ixgbe_adv_tx_context_desc *TXD;
2160 : struct ixgbe_tx_buf *tx_buffer;
2161 : #if NVLAN > 0
2162 : struct ether_vlan_header *eh;
2163 : #else
2164 : struct ether_header *eh;
2165 : #endif
2166 : struct ip *ip;
2167 : #ifdef notyet
2168 : struct ip6_hdr *ip6;
2169 : #endif
2170 : struct mbuf *m;
2171 0 : int ipoff;
2172 : uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2173 : int ehdrlen, ip_hlen = 0;
2174 : uint16_t etype;
2175 : uint8_t ipproto = 0;
2176 : int offload = TRUE;
2177 0 : int ctxd = txr->next_avail_desc;
2178 : #if NVLAN > 0
2179 : uint16_t vtag = 0;
2180 : #endif
2181 :
2182 : #if notyet
2183 : /* First check if TSO is to be used */
2184 : if (mp->m_pkthdr.csum_flags & CSUM_TSO)
2185 : return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2186 : #endif
2187 :
2188 0 : if ((mp->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) == 0)
2189 0 : offload = FALSE;
2190 :
2191 : /* Indicate the whole packet as payload when not doing TSO */
2192 0 : *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2193 :
2194 : /* Now ready a context descriptor */
2195 0 : TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2196 0 : tx_buffer = &txr->tx_buffers[ctxd];
2197 :
2198 : /*
2199 : * In advanced descriptors the vlan tag must
2200 : * be placed into the descriptor itself. Hence
2201 : * we need to make one even if not doing offloads.
2202 : */
2203 : #if NVLAN > 0
2204 0 : if (mp->m_flags & M_VLANTAG) {
2205 0 : vtag = mp->m_pkthdr.ether_vtag;
2206 0 : vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2207 0 : } else
2208 : #endif
2209 0 : if (offload == FALSE)
2210 0 : return (0); /* No need for CTX */
2211 :
2212 : /*
2213 : * Determine where frame payload starts.
2214 : * Jump over vlan headers if already present,
2215 : * helpful for QinQ too.
2216 : */
2217 0 : if (mp->m_len < sizeof(struct ether_header))
2218 0 : return (1);
2219 : #if NVLAN > 0
2220 0 : eh = mtod(mp, struct ether_vlan_header *);
2221 0 : if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2222 0 : if (mp->m_len < sizeof(struct ether_vlan_header))
2223 0 : return (1);
2224 0 : etype = ntohs(eh->evl_proto);
2225 : ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2226 0 : } else {
2227 0 : etype = ntohs(eh->evl_encap_proto);
2228 : ehdrlen = ETHER_HDR_LEN;
2229 : }
2230 : #else
2231 : eh = mtod(mp, struct ether_header *);
2232 : etype = ntohs(eh->ether_type);
2233 : ehdrlen = ETHER_HDR_LEN;
2234 : #endif
2235 :
2236 : /* Set the ether header length */
2237 0 : vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2238 :
2239 0 : switch (etype) {
2240 : case ETHERTYPE_IP:
2241 0 : if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip))
2242 0 : return (1);
2243 0 : m = m_getptr(mp, ehdrlen, &ipoff);
2244 0 : KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip));
2245 0 : ip = (struct ip *)(m->m_data + ipoff);
2246 0 : ip_hlen = ip->ip_hl << 2;
2247 0 : ipproto = ip->ip_p;
2248 : type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2249 0 : break;
2250 : #ifdef notyet
2251 : case ETHERTYPE_IPV6:
2252 : if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip6))
2253 : return (1);
2254 : m = m_getptr(mp, ehdrlen, &ipoff);
2255 : KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6));
2256 : ip6 = (struct ip6 *)(m->m_data + ipoff);
2257 : ip_hlen = sizeof(*ip6);
2258 : /* XXX-BZ this will go badly in case of ext hdrs. */
2259 : ipproto = ip6->ip6_nxt;
2260 : type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2261 : break;
2262 : #endif
2263 : default:
2264 : offload = FALSE;
2265 0 : break;
2266 : }
2267 :
2268 0 : vlan_macip_lens |= ip_hlen;
2269 0 : type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2270 :
2271 0 : switch (ipproto) {
2272 : case IPPROTO_TCP:
2273 0 : if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2274 0 : type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2275 : break;
2276 : case IPPROTO_UDP:
2277 0 : if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2278 0 : type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2279 : break;
2280 : default:
2281 : offload = FALSE;
2282 0 : break;
2283 : }
2284 :
2285 0 : if (offload) /* For the TX descriptor setup */
2286 0 : *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2287 :
2288 : /* Now copy bits into descriptor */
2289 0 : TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2290 0 : TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2291 0 : TXD->seqnum_seed = htole32(0);
2292 0 : TXD->mss_l4len_idx = htole32(0);
2293 :
2294 0 : tx_buffer->m_head = NULL;
2295 0 : tx_buffer->eop_index = -1;
2296 :
2297 0 : membar_producer();
2298 :
2299 : /* We've consumed the first desc, adjust counters */
2300 0 : if (++ctxd == sc->num_tx_desc)
2301 : ctxd = 0;
2302 0 : txr->next_avail_desc = ctxd;
2303 0 : atomic_dec_int(&txr->tx_avail);
2304 :
2305 0 : return (0);
2306 0 : }
2307 :
2308 : /**********************************************************************
2309 : *
2310 : * Examine each tx_buffer in the used queue. If the hardware is done
2311 : * processing the packet then free associated resources. The
2312 : * tx_buffer is put back on the free queue.
2313 : *
2314 : **********************************************************************/
2315 : int
2316 0 : ixgbe_txeof(struct tx_ring *txr)
2317 : {
2318 0 : struct ix_softc *sc = txr->sc;
2319 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
2320 : uint32_t first, last, done, processed;
2321 : uint32_t num_avail;
2322 : struct ixgbe_tx_buf *tx_buffer;
2323 : struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2324 :
2325 0 : if (!ISSET(ifp->if_flags, IFF_RUNNING))
2326 0 : return FALSE;
2327 :
2328 0 : if (txr->tx_avail == sc->num_tx_desc) {
2329 0 : txr->queue_status = IXGBE_QUEUE_IDLE;
2330 0 : return FALSE;
2331 : }
2332 :
2333 0 : membar_consumer();
2334 :
2335 : processed = 0;
2336 0 : first = txr->next_to_clean;
2337 : /* was the txt queue cleaned up in the meantime */
2338 0 : if (txr->tx_buffers == NULL)
2339 0 : return FALSE;
2340 0 : tx_buffer = &txr->tx_buffers[first];
2341 : /* For cleanup we just use legacy struct */
2342 0 : tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2343 0 : last = tx_buffer->eop_index;
2344 0 : if (last == -1)
2345 0 : return FALSE;
2346 0 : eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2347 :
2348 : /*
2349 : * Get the index of the first descriptor
2350 : * BEYOND the EOP and call that 'done'.
2351 : * I do this so the comparison in the
2352 : * inner while loop below can be simple
2353 : */
2354 0 : if (++last == sc->num_tx_desc) last = 0;
2355 : done = last;
2356 :
2357 0 : bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2358 : 0, txr->txdma.dma_map->dm_mapsize,
2359 : BUS_DMASYNC_POSTREAD);
2360 :
2361 0 : while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2362 : /* We clean the range of the packet */
2363 0 : while (first != done) {
2364 0 : tx_desc->upper.data = 0;
2365 0 : tx_desc->lower.data = 0;
2366 0 : tx_desc->buffer_addr = 0;
2367 0 : ++processed;
2368 :
2369 0 : if (tx_buffer->m_head) {
2370 0 : bus_dmamap_sync(txr->txdma.dma_tag,
2371 : tx_buffer->map,
2372 : 0, tx_buffer->map->dm_mapsize,
2373 : BUS_DMASYNC_POSTWRITE);
2374 0 : bus_dmamap_unload(txr->txdma.dma_tag,
2375 : tx_buffer->map);
2376 0 : m_freem(tx_buffer->m_head);
2377 0 : tx_buffer->m_head = NULL;
2378 0 : }
2379 0 : tx_buffer->eop_index = -1;
2380 :
2381 0 : if (++first == sc->num_tx_desc)
2382 : first = 0;
2383 :
2384 0 : tx_buffer = &txr->tx_buffers[first];
2385 0 : tx_desc = (struct ixgbe_legacy_tx_desc *)
2386 0 : &txr->tx_base[first];
2387 : }
2388 0 : ++txr->packets;
2389 : /* See if there is more work now */
2390 0 : last = tx_buffer->eop_index;
2391 0 : if (last != -1) {
2392 : eop_desc =
2393 0 : (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2394 : /* Get next done point */
2395 0 : if (++last == sc->num_tx_desc) last = 0;
2396 : done = last;
2397 : } else
2398 : break;
2399 : }
2400 :
2401 0 : bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2402 : 0, txr->txdma.dma_map->dm_mapsize,
2403 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2404 :
2405 0 : txr->next_to_clean = first;
2406 :
2407 0 : num_avail = atomic_add_int_nv(&txr->tx_avail, processed);
2408 :
2409 : /* All clean, turn off the timer */
2410 0 : if (num_avail == sc->num_tx_desc)
2411 0 : ifp->if_timer = 0;
2412 :
2413 0 : if (ifq_is_oactive(&ifp->if_snd))
2414 0 : ifq_restart(&ifp->if_snd);
2415 :
2416 0 : return TRUE;
2417 0 : }
2418 :
2419 : /*********************************************************************
2420 : *
2421 : * Get a buffer from system mbuf buffer pool.
2422 : *
2423 : **********************************************************************/
2424 : int
2425 0 : ixgbe_get_buf(struct rx_ring *rxr, int i)
2426 : {
2427 0 : struct ix_softc *sc = rxr->sc;
2428 : struct ixgbe_rx_buf *rxbuf;
2429 : struct mbuf *mp;
2430 : int error;
2431 : union ixgbe_adv_rx_desc *rxdesc;
2432 : size_t dsize = sizeof(union ixgbe_adv_rx_desc);
2433 :
2434 0 : rxbuf = &rxr->rx_buffers[i];
2435 0 : rxdesc = &rxr->rx_base[i];
2436 0 : if (rxbuf->buf) {
2437 0 : printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2438 0 : sc->dev.dv_xname, i);
2439 0 : return (ENOBUFS);
2440 : }
2441 :
2442 : /* needed in any case so prealocate since this one will fail for sure */
2443 0 : mp = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rx_mbuf_sz);
2444 0 : if (!mp)
2445 0 : return (ENOBUFS);
2446 :
2447 0 : mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2448 0 : m_adj(mp, ETHER_ALIGN);
2449 :
2450 0 : error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2451 : mp, BUS_DMA_NOWAIT);
2452 0 : if (error) {
2453 0 : m_freem(mp);
2454 0 : return (error);
2455 : }
2456 :
2457 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2458 : 0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2459 0 : rxbuf->buf = mp;
2460 :
2461 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2462 : dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
2463 :
2464 0 : rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2465 :
2466 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2467 : dsize * i, dsize, BUS_DMASYNC_PREWRITE);
2468 :
2469 0 : return (0);
2470 0 : }
2471 :
2472 : /*********************************************************************
2473 : *
2474 : * Allocate memory for rx_buffer structures. Since we use one
2475 : * rx_buffer per received packet, the maximum number of rx_buffer's
2476 : * that we'll need is equal to the number of receive descriptors
2477 : * that we've allocated.
2478 : *
2479 : **********************************************************************/
2480 : int
2481 0 : ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2482 : {
2483 0 : struct ix_softc *sc = rxr->sc;
2484 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
2485 : struct ixgbe_rx_buf *rxbuf;
2486 : int i, error;
2487 :
2488 0 : if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2489 : sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2490 0 : printf("%s: Unable to allocate rx_buffer memory\n",
2491 0 : ifp->if_xname);
2492 : error = ENOMEM;
2493 0 : goto fail;
2494 : }
2495 :
2496 : rxbuf = rxr->rx_buffers;
2497 0 : for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2498 0 : error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2499 : 16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2500 0 : if (error) {
2501 0 : printf("%s: Unable to create Pack DMA map\n",
2502 0 : ifp->if_xname);
2503 0 : goto fail;
2504 : }
2505 : }
2506 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2507 : rxr->rxdma.dma_map->dm_mapsize,
2508 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2509 :
2510 0 : return (0);
2511 :
2512 : fail:
2513 0 : return (error);
2514 0 : }
2515 :
2516 : /*********************************************************************
2517 : *
2518 : * Initialize a receive ring and its buffers.
2519 : *
2520 : **********************************************************************/
2521 : int
2522 0 : ixgbe_setup_receive_ring(struct rx_ring *rxr)
2523 : {
2524 0 : struct ix_softc *sc = rxr->sc;
2525 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
2526 : int rsize, error;
2527 :
2528 0 : rsize = roundup2(sc->num_rx_desc *
2529 : sizeof(union ixgbe_adv_rx_desc), 4096);
2530 : /* Clear the ring contents */
2531 0 : bzero((void *)rxr->rx_base, rsize);
2532 :
2533 0 : if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2534 0 : return (error);
2535 :
2536 : /* Setup our descriptor indices */
2537 0 : rxr->next_to_check = 0;
2538 0 : rxr->last_desc_filled = sc->num_rx_desc - 1;
2539 :
2540 0 : if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2541 0 : sc->num_rx_desc);
2542 :
2543 0 : ixgbe_rxfill(rxr);
2544 0 : if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2545 0 : printf("%s: unable to fill any rx descriptors\n",
2546 0 : sc->dev.dv_xname);
2547 0 : return (ENOBUFS);
2548 : }
2549 :
2550 0 : return (0);
2551 0 : }
2552 :
2553 : int
2554 0 : ixgbe_rxfill(struct rx_ring *rxr)
2555 : {
2556 0 : struct ix_softc *sc = rxr->sc;
2557 : int post = 0;
2558 : u_int slots;
2559 : int i;
2560 :
2561 0 : i = rxr->last_desc_filled;
2562 0 : for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2563 0 : slots > 0; slots--) {
2564 0 : if (++i == sc->num_rx_desc)
2565 : i = 0;
2566 :
2567 0 : if (ixgbe_get_buf(rxr, i) != 0)
2568 : break;
2569 :
2570 0 : rxr->last_desc_filled = i;
2571 : post = 1;
2572 : }
2573 :
2574 0 : if_rxr_put(&rxr->rx_ring, slots);
2575 :
2576 0 : return (post);
2577 : }
2578 :
2579 : void
2580 0 : ixgbe_rxrefill(void *xsc)
2581 : {
2582 0 : struct ix_softc *sc = xsc;
2583 0 : struct ix_queue *que = sc->queues;
2584 : int s;
2585 :
2586 0 : s = splnet();
2587 0 : if (ixgbe_rxfill(que->rxr)) {
2588 : /* Advance the Rx Queue "Tail Pointer" */
2589 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
2590 : que->rxr->last_desc_filled);
2591 0 : } else
2592 0 : timeout_add(&sc->rx_refill, 1);
2593 0 : splx(s);
2594 0 : }
2595 :
2596 : /*********************************************************************
2597 : *
2598 : * Initialize all receive rings.
2599 : *
2600 : **********************************************************************/
2601 : int
2602 0 : ixgbe_setup_receive_structures(struct ix_softc *sc)
2603 : {
2604 0 : struct rx_ring *rxr = sc->rx_rings;
2605 : int i;
2606 :
2607 0 : for (i = 0; i < sc->num_queues; i++, rxr++)
2608 0 : if (ixgbe_setup_receive_ring(rxr))
2609 : goto fail;
2610 :
2611 0 : return (0);
2612 : fail:
2613 0 : ixgbe_free_receive_structures(sc);
2614 0 : return (ENOBUFS);
2615 0 : }
2616 :
2617 : /*********************************************************************
2618 : *
2619 : * Setup receive registers and features.
2620 : *
2621 : **********************************************************************/
2622 : #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2623 :
2624 : void
2625 0 : ixgbe_initialize_receive_units(struct ix_softc *sc)
2626 : {
2627 0 : struct rx_ring *rxr = sc->rx_rings;
2628 0 : struct ixgbe_hw *hw = &sc->hw;
2629 : uint32_t bufsz, fctrl, srrctl, rxcsum;
2630 : uint32_t hlreg;
2631 : int i;
2632 :
2633 : /*
2634 : * Make sure receives are disabled while
2635 : * setting up the descriptor ring
2636 : */
2637 0 : ixgbe_disable_rx(hw);
2638 :
2639 : /* Enable broadcasts */
2640 0 : fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2641 0 : fctrl |= IXGBE_FCTRL_BAM;
2642 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2643 0 : fctrl |= IXGBE_FCTRL_DPF;
2644 0 : fctrl |= IXGBE_FCTRL_PMCF;
2645 0 : }
2646 0 : IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2647 :
2648 : /* Always enable jumbo frame reception */
2649 0 : hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2650 0 : hlreg |= IXGBE_HLREG0_JUMBOEN;
2651 0 : IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2652 :
2653 0 : bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2654 :
2655 0 : for (i = 0; i < sc->num_queues; i++, rxr++) {
2656 0 : uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2657 :
2658 : /* Setup the Base and Length of the Rx Descriptor Ring */
2659 0 : IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2660 : (rdba & 0x00000000ffffffffULL));
2661 0 : IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2662 0 : IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2663 : sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2664 :
2665 : /* Set up the SRRCTL register */
2666 0 : srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2667 0 : IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2668 :
2669 : /* Setup the HW Rx Head and Tail Descriptor Pointers */
2670 0 : IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2671 0 : IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2672 : }
2673 :
2674 0 : if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2675 : uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2676 : IXGBE_PSRTYPE_UDPHDR |
2677 : IXGBE_PSRTYPE_IPV4HDR |
2678 : IXGBE_PSRTYPE_IPV6HDR;
2679 0 : IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2680 0 : }
2681 :
2682 0 : rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2683 0 : rxcsum &= ~IXGBE_RXCSUM_PCSD;
2684 :
2685 : /* Setup RSS */
2686 0 : if (sc->num_queues > 1) {
2687 0 : ixgbe_initialize_rss_mapping(sc);
2688 :
2689 : /* RSS and RX IPP Checksum are mutually exclusive */
2690 0 : rxcsum |= IXGBE_RXCSUM_PCSD;
2691 0 : }
2692 :
2693 : /* This is useful for calculating UDP/IP fragment checksums */
2694 0 : if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2695 0 : rxcsum |= IXGBE_RXCSUM_IPPCSE;
2696 :
2697 0 : IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2698 0 : }
2699 :
2700 : void
2701 0 : ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2702 : {
2703 0 : struct ixgbe_hw *hw = &sc->hw;
2704 0 : uint32_t reta = 0, mrqc, rss_key[10];
2705 : int i, j, queue_id, table_size, index_mult;
2706 :
2707 : /* set up random bits */
2708 0 : arc4random_buf(&rss_key, sizeof(rss_key));
2709 :
2710 : /* Set multiplier for RETA setup and table size based on MAC */
2711 : index_mult = 0x1;
2712 : table_size = 128;
2713 0 : switch (sc->hw.mac.type) {
2714 : case ixgbe_mac_82598EB:
2715 : index_mult = 0x11;
2716 0 : break;
2717 : case ixgbe_mac_X550:
2718 : case ixgbe_mac_X550EM_x:
2719 : table_size = 512;
2720 0 : break;
2721 : default:
2722 : break;
2723 : }
2724 :
2725 : /* Set up the redirection table */
2726 0 : for (i = 0, j = 0; i < table_size; i++, j++) {
2727 0 : if (j == sc->num_queues) j = 0;
2728 0 : queue_id = (j * index_mult);
2729 : /*
2730 : * The low 8 bits are for hash value (n+0);
2731 : * The next 8 bits are for hash value (n+1), etc.
2732 : */
2733 0 : reta = reta >> 8;
2734 0 : reta = reta | ( ((uint32_t) queue_id) << 24);
2735 0 : if ((i & 3) == 3) {
2736 0 : if (i < 128)
2737 0 : IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2738 : else
2739 0 : IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
2740 : reta);
2741 : reta = 0;
2742 0 : }
2743 : }
2744 :
2745 : /* Now fill our hash function seeds */
2746 0 : for (i = 0; i < 10; i++)
2747 0 : IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2748 :
2749 : /*
2750 : * Disable UDP - IP fragments aren't currently being handled
2751 : * and so we end up with a mix of 2-tuple and 4-tuple
2752 : * traffic.
2753 : */
2754 : mrqc = IXGBE_MRQC_RSSEN
2755 : | IXGBE_MRQC_RSS_FIELD_IPV4
2756 : | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2757 : | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2758 : | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2759 : | IXGBE_MRQC_RSS_FIELD_IPV6
2760 : | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2761 : ;
2762 0 : IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2763 0 : }
2764 :
2765 : /*********************************************************************
2766 : *
2767 : * Free all receive rings.
2768 : *
2769 : **********************************************************************/
2770 : void
2771 0 : ixgbe_free_receive_structures(struct ix_softc *sc)
2772 : {
2773 : struct rx_ring *rxr;
2774 : int i;
2775 :
2776 0 : for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2777 0 : if_rxr_init(&rxr->rx_ring, 0, 0);
2778 :
2779 0 : for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2780 0 : ixgbe_free_receive_buffers(rxr);
2781 0 : }
2782 :
2783 : /*********************************************************************
2784 : *
2785 : * Free receive ring data structures
2786 : *
2787 : **********************************************************************/
2788 : void
2789 0 : ixgbe_free_receive_buffers(struct rx_ring *rxr)
2790 : {
2791 : struct ix_softc *sc;
2792 : struct ixgbe_rx_buf *rxbuf;
2793 : int i;
2794 :
2795 0 : sc = rxr->sc;
2796 0 : if (rxr->rx_buffers != NULL) {
2797 0 : for (i = 0; i < sc->num_rx_desc; i++) {
2798 0 : rxbuf = &rxr->rx_buffers[i];
2799 0 : if (rxbuf->buf != NULL) {
2800 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2801 : 0, rxbuf->map->dm_mapsize,
2802 : BUS_DMASYNC_POSTREAD);
2803 0 : bus_dmamap_unload(rxr->rxdma.dma_tag,
2804 : rxbuf->map);
2805 0 : m_freem(rxbuf->buf);
2806 0 : rxbuf->buf = NULL;
2807 0 : }
2808 0 : bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2809 0 : rxbuf->map = NULL;
2810 : }
2811 0 : free(rxr->rx_buffers, M_DEVBUF,
2812 0 : sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
2813 0 : rxr->rx_buffers = NULL;
2814 0 : }
2815 0 : }
2816 :
2817 : /*********************************************************************
2818 : *
2819 : * This routine executes in interrupt context. It replenishes
2820 : * the mbufs in the descriptor and sends data which has been
2821 : * dma'ed into host memory to upper layer.
2822 : *
2823 : *********************************************************************/
2824 : int
2825 0 : ixgbe_rxeof(struct ix_queue *que)
2826 : {
2827 0 : struct ix_softc *sc = que->sc;
2828 0 : struct rx_ring *rxr = que->rxr;
2829 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
2830 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2831 : struct mbuf *mp, *sendmp;
2832 : uint8_t eop = 0;
2833 : uint16_t len, vtag;
2834 : uint32_t staterr = 0, ptype;
2835 : struct ixgbe_rx_buf *rxbuf, *nxbuf;
2836 : union ixgbe_adv_rx_desc *rxdesc;
2837 : size_t dsize = sizeof(union ixgbe_adv_rx_desc);
2838 : int i, nextp;
2839 :
2840 0 : if (!ISSET(ifp->if_flags, IFF_RUNNING))
2841 0 : return FALSE;
2842 :
2843 0 : i = rxr->next_to_check;
2844 0 : while (if_rxr_inuse(&rxr->rx_ring) > 0) {
2845 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2846 : dsize * i, dsize, BUS_DMASYNC_POSTREAD);
2847 :
2848 0 : rxdesc = &rxr->rx_base[i];
2849 0 : staterr = letoh32(rxdesc->wb.upper.status_error);
2850 0 : if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2851 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2852 : dsize * i, dsize,
2853 : BUS_DMASYNC_PREREAD);
2854 0 : break;
2855 : }
2856 :
2857 : /* Zero out the receive descriptors status */
2858 0 : rxdesc->wb.upper.status_error = 0;
2859 0 : rxbuf = &rxr->rx_buffers[i];
2860 :
2861 : /* pull the mbuf off the ring */
2862 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2863 : rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2864 0 : bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2865 :
2866 0 : mp = rxbuf->buf;
2867 0 : len = letoh16(rxdesc->wb.upper.length);
2868 0 : ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
2869 : IXGBE_RXDADV_PKTTYPE_MASK;
2870 0 : vtag = letoh16(rxdesc->wb.upper.vlan);
2871 0 : eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
2872 :
2873 0 : if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
2874 0 : sc->dropped_pkts++;
2875 :
2876 0 : if (rxbuf->fmp) {
2877 0 : m_freem(rxbuf->fmp);
2878 0 : rxbuf->fmp = NULL;
2879 0 : }
2880 :
2881 0 : m_freem(mp);
2882 0 : rxbuf->buf = NULL;
2883 0 : goto next_desc;
2884 : }
2885 :
2886 0 : if (mp == NULL) {
2887 0 : panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2888 0 : "(nrx %d, filled %d)", sc->dev.dv_xname,
2889 0 : i, if_rxr_inuse(&rxr->rx_ring),
2890 0 : rxr->last_desc_filled);
2891 : }
2892 :
2893 : /* Currently no HW RSC support of 82599 */
2894 0 : if (!eop) {
2895 : /*
2896 : * Figure out the next descriptor of this frame.
2897 : */
2898 0 : nextp = i + 1;
2899 0 : if (nextp == sc->num_rx_desc)
2900 : nextp = 0;
2901 0 : nxbuf = &rxr->rx_buffers[nextp];
2902 : /* prefetch(nxbuf); */
2903 0 : }
2904 :
2905 : /*
2906 : * Rather than using the fmp/lmp global pointers
2907 : * we now keep the head of a packet chain in the
2908 : * buffer struct and pass this along from one
2909 : * descriptor to the next, until we get EOP.
2910 : */
2911 0 : mp->m_len = len;
2912 : /*
2913 : * See if there is a stored head
2914 : * that determines what we are
2915 : */
2916 0 : sendmp = rxbuf->fmp;
2917 0 : rxbuf->buf = rxbuf->fmp = NULL;
2918 :
2919 0 : if (sendmp != NULL) /* secondary frag */
2920 0 : sendmp->m_pkthdr.len += mp->m_len;
2921 : else {
2922 : /* first desc of a non-ps chain */
2923 : sendmp = mp;
2924 0 : sendmp->m_pkthdr.len = mp->m_len;
2925 : #if NVLAN > 0
2926 0 : if (staterr & IXGBE_RXD_STAT_VP) {
2927 0 : sendmp->m_pkthdr.ether_vtag = vtag;
2928 0 : sendmp->m_flags |= M_VLANTAG;
2929 0 : }
2930 : #endif
2931 : }
2932 :
2933 : /* Pass the head pointer on */
2934 0 : if (eop == 0) {
2935 0 : nxbuf->fmp = sendmp;
2936 : sendmp = NULL;
2937 0 : mp->m_next = nxbuf->buf;
2938 0 : } else { /* Sending this frame? */
2939 0 : rxr->rx_packets++;
2940 : /* capture data for AIM */
2941 0 : rxr->bytes += sendmp->m_pkthdr.len;
2942 0 : rxr->rx_bytes += sendmp->m_pkthdr.len;
2943 :
2944 0 : ixgbe_rx_checksum(staterr, sendmp, ptype);
2945 :
2946 0 : ml_enqueue(&ml, sendmp);
2947 : }
2948 : next_desc:
2949 0 : if_rxr_put(&rxr->rx_ring, 1);
2950 0 : bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2951 : dsize * i, dsize,
2952 : BUS_DMASYNC_PREREAD);
2953 :
2954 : /* Advance our pointers to the next descriptor. */
2955 0 : if (++i == sc->num_rx_desc)
2956 : i = 0;
2957 : }
2958 0 : rxr->next_to_check = i;
2959 :
2960 0 : if_input(ifp, &ml);
2961 :
2962 0 : if (!(staterr & IXGBE_RXD_STAT_DD))
2963 0 : return FALSE;
2964 :
2965 0 : return TRUE;
2966 0 : }
2967 :
2968 : /*********************************************************************
2969 : *
2970 : * Verify that the hardware indicated that the checksum is valid.
2971 : * Inform the stack about the status of checksum so that stack
2972 : * doesn't spend time verifying the checksum.
2973 : *
2974 : *********************************************************************/
2975 : void
2976 0 : ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
2977 : {
2978 0 : uint16_t status = (uint16_t) staterr;
2979 0 : uint8_t errors = (uint8_t) (staterr >> 24);
2980 :
2981 0 : if (status & IXGBE_RXD_STAT_IPCS) {
2982 0 : if (!(errors & IXGBE_RXD_ERR_IPE)) {
2983 : /* IP Checksum Good */
2984 0 : mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2985 0 : } else
2986 0 : mp->m_pkthdr.csum_flags = 0;
2987 : }
2988 0 : if (status & IXGBE_RXD_STAT_L4CS) {
2989 0 : if (!(errors & IXGBE_RXD_ERR_TCPE))
2990 0 : mp->m_pkthdr.csum_flags |=
2991 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2992 : }
2993 0 : }
2994 :
2995 : void
2996 0 : ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
2997 : {
2998 : uint32_t ctrl;
2999 : int i;
3000 :
3001 : /*
3002 : * A soft reset zero's out the VFTA, so
3003 : * we need to repopulate it now.
3004 : */
3005 0 : for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3006 0 : if (sc->shadow_vfta[i] != 0)
3007 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3008 : sc->shadow_vfta[i]);
3009 : }
3010 :
3011 0 : ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3012 : #if 0
3013 : /* Enable the Filter Table if enabled */
3014 : if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3015 : ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3016 : ctrl |= IXGBE_VLNCTRL_VFE;
3017 : }
3018 : #endif
3019 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB)
3020 0 : ctrl |= IXGBE_VLNCTRL_VME;
3021 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3022 :
3023 : /* On 82599 the VLAN enable is per/queue in RXDCTL */
3024 0 : if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3025 0 : for (i = 0; i < sc->num_queues; i++) {
3026 0 : ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3027 0 : ctrl |= IXGBE_RXDCTL_VME;
3028 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3029 : }
3030 : }
3031 0 : }
3032 :
3033 : void
3034 0 : ixgbe_enable_intr(struct ix_softc *sc)
3035 : {
3036 0 : struct ixgbe_hw *hw = &sc->hw;
3037 0 : struct ix_queue *que = sc->queues;
3038 : uint32_t mask, fwsm;
3039 : int i;
3040 :
3041 : mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3042 : /* Enable Fan Failure detection */
3043 0 : if (hw->device_id == IXGBE_DEV_ID_82598AT)
3044 0 : mask |= IXGBE_EIMS_GPI_SDP1;
3045 :
3046 0 : switch (sc->hw.mac.type) {
3047 : case ixgbe_mac_82599EB:
3048 0 : mask |= IXGBE_EIMS_ECC;
3049 : /* Temperature sensor on some adapters */
3050 0 : mask |= IXGBE_EIMS_GPI_SDP0;
3051 : /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3052 : mask |= IXGBE_EIMS_GPI_SDP1;
3053 : mask |= IXGBE_EIMS_GPI_SDP2;
3054 0 : break;
3055 : case ixgbe_mac_X540:
3056 0 : mask |= IXGBE_EIMS_ECC;
3057 : /* Detect if Thermal Sensor is enabled */
3058 0 : fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3059 0 : if (fwsm & IXGBE_FWSM_TS_ENABLED)
3060 0 : mask |= IXGBE_EIMS_TS;
3061 : break;
3062 : case ixgbe_mac_X550:
3063 : case ixgbe_mac_X550EM_x:
3064 0 : mask |= IXGBE_EIMS_ECC;
3065 : /* MAC thermal sensor is automatically enabled */
3066 0 : mask |= IXGBE_EIMS_TS;
3067 : /* Some devices use SDP0 for important information */
3068 0 : if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3069 0 : hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3070 0 : mask |= IXGBE_EIMS_GPI_SDP0_X540;
3071 : default:
3072 : break;
3073 : }
3074 :
3075 0 : IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3076 :
3077 : /* With MSI-X we use auto clear */
3078 0 : if (sc->msix > 1) {
3079 : mask = IXGBE_EIMS_ENABLE_MASK;
3080 : /* Don't autoclear Link */
3081 : mask &= ~IXGBE_EIMS_OTHER;
3082 : mask &= ~IXGBE_EIMS_LSC;
3083 0 : IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3084 0 : }
3085 :
3086 : /*
3087 : * Now enable all queues, this is done separately to
3088 : * allow for handling the extended (beyond 32) MSIX
3089 : * vectors that can be used by 82599
3090 : */
3091 0 : for (i = 0; i < sc->num_queues; i++, que++)
3092 0 : ixgbe_enable_queue(sc, que->msix);
3093 :
3094 0 : IXGBE_WRITE_FLUSH(hw);
3095 0 : }
3096 :
3097 : void
3098 0 : ixgbe_disable_intr(struct ix_softc *sc)
3099 : {
3100 0 : if (sc->msix > 1)
3101 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3102 0 : if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3103 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3104 0 : } else {
3105 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3106 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3107 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3108 : }
3109 0 : IXGBE_WRITE_FLUSH(&sc->hw);
3110 0 : }
3111 :
3112 : uint16_t
3113 0 : ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3114 : {
3115 : struct pci_attach_args *pa;
3116 : uint32_t value;
3117 : int high = 0;
3118 :
3119 0 : if (reg & 0x2) {
3120 : high = 1;
3121 0 : reg &= ~0x2;
3122 0 : }
3123 0 : pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3124 0 : value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3125 :
3126 0 : if (high)
3127 0 : value >>= 16;
3128 :
3129 0 : return (value & 0xffff);
3130 : }
3131 :
3132 : void
3133 0 : ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3134 : {
3135 : struct pci_attach_args *pa;
3136 : uint32_t rv;
3137 : int high = 0;
3138 :
3139 : /* Need to do read/mask/write... because 16 vs 32 bit!!! */
3140 0 : if (reg & 0x2) {
3141 : high = 1;
3142 0 : reg &= ~0x2;
3143 0 : }
3144 0 : pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3145 0 : rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3146 0 : if (!high)
3147 0 : rv = (rv & 0xffff0000) | value;
3148 : else
3149 0 : rv = (rv & 0xffff) | ((uint32_t)value << 16);
3150 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3151 0 : }
3152 :
3153 : /*
3154 : * Setup the correct IVAR register for a particular MSIX interrupt
3155 : * (yes this is all very magic and confusing :)
3156 : * - entry is the register array entry
3157 : * - vector is the MSIX vector for this queue
3158 : * - type is RX/TX/MISC
3159 : */
3160 : void
3161 0 : ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3162 : {
3163 0 : struct ixgbe_hw *hw = &sc->hw;
3164 : uint32_t ivar, index;
3165 :
3166 0 : vector |= IXGBE_IVAR_ALLOC_VAL;
3167 :
3168 0 : switch (hw->mac.type) {
3169 :
3170 : case ixgbe_mac_82598EB:
3171 0 : if (type == -1)
3172 0 : entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3173 : else
3174 0 : entry += (type * 64);
3175 0 : index = (entry >> 2) & 0x1F;
3176 0 : ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3177 0 : ivar &= ~(0xFF << (8 * (entry & 0x3)));
3178 0 : ivar |= (vector << (8 * (entry & 0x3)));
3179 0 : IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3180 0 : break;
3181 :
3182 : case ixgbe_mac_82599EB:
3183 : case ixgbe_mac_X540:
3184 : case ixgbe_mac_X550:
3185 : case ixgbe_mac_X550EM_x:
3186 0 : if (type == -1) { /* MISC IVAR */
3187 0 : index = (entry & 1) * 8;
3188 0 : ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3189 0 : ivar &= ~(0xFF << index);
3190 0 : ivar |= (vector << index);
3191 0 : IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3192 0 : } else { /* RX/TX IVARS */
3193 0 : index = (16 * (entry & 1)) + (8 * type);
3194 0 : ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3195 0 : ivar &= ~(0xFF << index);
3196 0 : ivar |= (vector << index);
3197 0 : IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3198 : }
3199 :
3200 : default:
3201 : break;
3202 : }
3203 0 : }
3204 :
3205 : void
3206 0 : ixgbe_configure_ivars(struct ix_softc *sc)
3207 : {
3208 : #if notyet
3209 : struct ix_queue *que = sc->queues;
3210 : uint32_t newitr;
3211 : int i;
3212 :
3213 : if (ixgbe_max_interrupt_rate > 0)
3214 : newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3215 : else
3216 : newitr = 0;
3217 :
3218 : for (i = 0; i < sc->num_queues; i++, que++) {
3219 : /* First the RX queue entry */
3220 : ixgbe_set_ivar(sc, i, que->msix, 0);
3221 : /* ... and the TX */
3222 : ixgbe_set_ivar(sc, i, que->msix, 1);
3223 : /* Set an Initial EITR value */
3224 : IXGBE_WRITE_REG(&sc->hw,
3225 : IXGBE_EITR(que->msix), newitr);
3226 : }
3227 :
3228 : /* For the Link interrupt */
3229 : ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3230 : #endif
3231 0 : }
3232 :
3233 : /*
3234 : * SFP module interrupts handler
3235 : */
3236 : void
3237 0 : ixgbe_handle_mod(struct ix_softc *sc)
3238 : {
3239 0 : struct ixgbe_hw *hw = &sc->hw;
3240 : uint32_t err;
3241 :
3242 0 : err = hw->phy.ops.identify_sfp(hw);
3243 0 : if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3244 0 : printf("%s: Unsupported SFP+ module type was detected!\n",
3245 0 : sc->dev.dv_xname);
3246 0 : return;
3247 : }
3248 0 : err = hw->mac.ops.setup_sfp(hw);
3249 0 : if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3250 0 : printf("%s: Setup failure - unsupported SFP+ module type!\n",
3251 0 : sc->dev.dv_xname);
3252 0 : return;
3253 : }
3254 : /* Set the optics type so system reports correctly */
3255 0 : ixgbe_setup_optics(sc);
3256 :
3257 0 : ixgbe_handle_msf(sc);
3258 0 : }
3259 :
3260 :
3261 : /*
3262 : * MSF (multispeed fiber) interrupts handler
3263 : */
3264 : void
3265 0 : ixgbe_handle_msf(struct ix_softc *sc)
3266 : {
3267 0 : struct ixgbe_hw *hw = &sc->hw;
3268 0 : uint32_t autoneg;
3269 0 : bool negotiate;
3270 :
3271 0 : autoneg = hw->phy.autoneg_advertised;
3272 0 : if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3273 0 : if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3274 0 : return;
3275 : }
3276 0 : if (hw->mac.ops.setup_link)
3277 0 : hw->mac.ops.setup_link(hw, autoneg, TRUE);
3278 :
3279 0 : ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3280 0 : ixgbe_add_media_types(sc);
3281 0 : ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3282 0 : }
3283 :
3284 : /*
3285 : * External PHY interrupts handler
3286 : */
3287 : void
3288 0 : ixgbe_handle_phy(struct ix_softc *sc)
3289 : {
3290 0 : struct ixgbe_hw *hw = &sc->hw;
3291 : int error;
3292 :
3293 0 : error = hw->phy.ops.handle_lasi(hw);
3294 0 : if (error == IXGBE_ERR_OVERTEMP)
3295 0 : printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3296 : " PHY will downshift to lower power state!\n",
3297 0 : sc->dev.dv_xname);
3298 0 : else if (error)
3299 0 : printf("%s: Error handling LASI interrupt: %d\n",
3300 0 : sc->dev.dv_xname, error);
3301 :
3302 0 : }
3303 :
3304 : /**********************************************************************
3305 : *
3306 : * Update the board statistics counters.
3307 : *
3308 : **********************************************************************/
3309 : void
3310 0 : ixgbe_update_stats_counters(struct ix_softc *sc)
3311 : {
3312 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
3313 0 : struct ixgbe_hw *hw = &sc->hw;
3314 : uint64_t total_missed_rx = 0;
3315 : #ifdef IX_DEBUG
3316 : uint32_t missed_rx = 0, bprc, lxon, lxoff, total;
3317 : int i;
3318 : #endif
3319 :
3320 0 : sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3321 0 : sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3322 :
3323 : #ifdef IX_DEBUG
3324 : for (i = 0; i < 8; i++) {
3325 : uint32_t mp;
3326 : mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3327 : /* missed_rx tallies misses for the gprc workaround */
3328 : missed_rx += mp;
3329 : /* global total per queue */
3330 : sc->stats.mpc[i] += mp;
3331 : /* running comprehensive total for stats display */
3332 : total_missed_rx += sc->stats.mpc[i];
3333 : if (hw->mac.type == ixgbe_mac_82598EB)
3334 : sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3335 : }
3336 :
3337 : /* Hardware workaround, gprc counts missed packets */
3338 : sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3339 : sc->stats.gprc -= missed_rx;
3340 :
3341 : if (hw->mac.type != ixgbe_mac_82598EB) {
3342 : sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3343 : ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3344 : sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3345 : ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3346 : sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3347 : ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3348 : sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3349 : sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3350 : } else {
3351 : sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3352 : sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3353 : /* 82598 only has a counter in the high register */
3354 : sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3355 : sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3356 : sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3357 : }
3358 :
3359 : /*
3360 : * Workaround: mprc hardware is incorrectly counting
3361 : * broadcasts, so for now we subtract those.
3362 : */
3363 : bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3364 : sc->stats.bprc += bprc;
3365 : sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3366 : if (hw->mac.type == ixgbe_mac_82598EB)
3367 : sc->stats.mprc -= bprc;
3368 :
3369 : sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3370 : sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3371 : sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3372 : sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3373 : sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3374 : sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3375 : sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3376 :
3377 : lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3378 : sc->stats.lxontxc += lxon;
3379 : lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3380 : sc->stats.lxofftxc += lxoff;
3381 : total = lxon + lxoff;
3382 :
3383 : sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3384 : sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3385 : sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3386 : sc->stats.gptc -= total;
3387 : sc->stats.mptc -= total;
3388 : sc->stats.ptc64 -= total;
3389 : sc->stats.gotc -= total * ETHER_MIN_LEN;
3390 :
3391 : sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3392 : sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3393 : sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3394 : sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3395 : sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3396 : sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3397 : sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3398 : sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3399 : sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3400 : sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3401 : #endif
3402 :
3403 : /* Fill out the OS statistics structure */
3404 0 : ifp->if_collisions = 0;
3405 0 : ifp->if_oerrors = sc->watchdog_events;
3406 0 : ifp->if_ierrors = total_missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3407 0 : }
3408 :
3409 : #ifdef IX_DEBUG
3410 : /**********************************************************************
3411 : *
3412 : * This routine is called only when ixgbe_display_debug_stats is enabled.
3413 : * This routine provides a way to take a look at important statistics
3414 : * maintained by the driver and hardware.
3415 : *
3416 : **********************************************************************/
3417 : void
3418 : ixgbe_print_hw_stats(struct ix_softc * sc)
3419 : {
3420 : struct ifnet *ifp = &sc->arpcom.ac_if;
3421 :
3422 : printf("%s: missed pkts %llu, rx len errs %llu, crc errs %llu, "
3423 : "dropped pkts %lu, watchdog timeouts %ld, "
3424 : "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3425 : "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3426 : "tso tx %lu\n",
3427 : ifp->if_xname,
3428 : (long long)sc->stats.mpc[0],
3429 : (long long)sc->stats.roc + (long long)sc->stats.ruc,
3430 : (long long)sc->stats.crcerrs,
3431 : sc->dropped_pkts,
3432 : sc->watchdog_events,
3433 : (long long)sc->stats.lxonrxc,
3434 : (long long)sc->stats.lxontxc,
3435 : (long long)sc->stats.lxoffrxc,
3436 : (long long)sc->stats.lxofftxc,
3437 : (long long)sc->stats.tpr,
3438 : (long long)sc->stats.gprc,
3439 : (long long)sc->stats.gptc,
3440 : sc->tso_tx);
3441 : }
3442 : #endif
|