Line data Source code
1 : /**************************************************************************
2 :
3 : Copyright (c) 2001-2003, Intel Corporation
4 : All rights reserved.
5 :
6 : Redistribution and use in source and binary forms, with or without
7 : modification, are permitted provided that the following conditions are met:
8 :
9 : 1. Redistributions of source code must retain the above copyright notice,
10 : this list of conditions and the following disclaimer.
11 :
12 : 2. Redistributions in binary form must reproduce the above copyright
13 : notice, this list of conditions and the following disclaimer in the
14 : documentation and/or other materials provided with the distribution.
15 :
16 : 3. Neither the name of the Intel Corporation nor the names of its
17 : contributors may be used to endorse or promote products derived from
18 : this software without specific prior written permission.
19 :
20 : THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 : AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 : IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 : ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 : LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 : CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 : SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 : INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 : CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 : ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 : POSSIBILITY OF SUCH DAMAGE.
31 :
32 : ***************************************************************************/
33 :
34 : /* $OpenBSD: if_em.c,v 1.341 2018/04/07 11:56:40 sf Exp $ */
35 : /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36 :
37 : #include <dev/pci/if_em.h>
38 : #include <dev/pci/if_em_soc.h>
39 :
40 : /*********************************************************************
41 : * Driver version
42 : *********************************************************************/
43 :
44 : #define EM_DRIVER_VERSION "6.2.9"
45 :
46 : /*********************************************************************
47 : * PCI Device ID Table
48 : *********************************************************************/
49 : const struct pci_matchid em_devices[] = {
50 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
51 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
52 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
53 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
54 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
55 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
56 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
57 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
58 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
59 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
60 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
61 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
62 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
63 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
64 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
65 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
66 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
67 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
68 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
69 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
70 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
71 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
72 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
73 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
74 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
75 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
76 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
77 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
78 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
79 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
80 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
81 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
82 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
83 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
84 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
85 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
86 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
87 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
88 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
89 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
90 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
91 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
92 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
93 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
94 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
95 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
96 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
97 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
98 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
99 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL },
100 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD },
101 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR },
102 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
103 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
104 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
105 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
106 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
107 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
108 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
109 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
110 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
111 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
112 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
113 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L },
114 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA },
115 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER },
116 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_SERDES },
117 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR },
118 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QP_PM },
119 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576 },
120 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER },
121 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES },
122 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER },
123 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET2 },
124 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS },
125 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES },
126 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD },
127 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LC },
128 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LM },
129 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DC },
130 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DM },
131 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579LM },
132 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579V },
133 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER },
134 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1 },
135 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT },
136 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER },
137 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES },
138 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII },
139 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_NF },
140 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_NF },
141 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER },
142 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM },
143 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V },
144 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM },
145 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_2 },
146 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_3 },
147 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V },
148 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_2 },
149 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_3 },
150 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM },
151 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2 },
152 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3 },
153 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4 },
154 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5 },
155 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6 },
156 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7 },
157 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8 },
158 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9 },
159 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V },
160 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2 },
161 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4 },
162 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5 },
163 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6 },
164 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7 },
165 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8 },
166 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9 },
167 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER },
168 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER },
169 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES },
170 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII },
171 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL },
172 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER },
173 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII },
174 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES },
175 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE },
176 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP },
177 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V },
178 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER },
179 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER },
180 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES },
181 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII },
182 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_1GBPS },
183 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_2_5GBPS },
184 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_SGMII },
185 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_82567V_3 },
186 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
187 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
188 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
189 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
190 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
191 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M },
192 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
193 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_BM },
194 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE },
195 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_G },
196 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_GT },
197 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_AMT },
198 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_C },
199 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M },
200 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT },
201 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_V },
202 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LF },
203 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LM },
204 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_V },
205 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LF },
206 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LM },
207 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_V },
208 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_1 },
209 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_2 },
210 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_3 },
211 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_4 },
212 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_5 },
213 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_6 }
214 : };
215 :
216 : /*********************************************************************
217 : * Function prototypes
218 : *********************************************************************/
219 : int em_probe(struct device *, void *, void *);
220 : void em_attach(struct device *, struct device *, void *);
221 : void em_defer_attach(struct device*);
222 : int em_detach(struct device *, int);
223 : int em_activate(struct device *, int);
224 : int em_intr(void *);
225 : void em_start(struct ifqueue *);
226 : int em_ioctl(struct ifnet *, u_long, caddr_t);
227 : void em_watchdog(struct ifnet *);
228 : void em_init(void *);
229 : void em_stop(void *, int);
230 : void em_media_status(struct ifnet *, struct ifmediareq *);
231 : int em_media_change(struct ifnet *);
232 : uint64_t em_flowstatus(struct em_softc *);
233 : void em_identify_hardware(struct em_softc *);
234 : int em_allocate_pci_resources(struct em_softc *);
235 : void em_free_pci_resources(struct em_softc *);
236 : void em_local_timer(void *);
237 : int em_hardware_init(struct em_softc *);
238 : void em_setup_interface(struct em_softc *);
239 : int em_setup_transmit_structures(struct em_softc *);
240 : void em_initialize_transmit_unit(struct em_softc *);
241 : int em_setup_receive_structures(struct em_softc *);
242 : void em_initialize_receive_unit(struct em_softc *);
243 : void em_enable_intr(struct em_softc *);
244 : void em_disable_intr(struct em_softc *);
245 : void em_free_transmit_structures(struct em_softc *);
246 : void em_free_receive_structures(struct em_softc *);
247 : void em_update_stats_counters(struct em_softc *);
248 : void em_disable_aspm(struct em_softc *);
249 : void em_txeof(struct em_softc *);
250 : int em_allocate_receive_structures(struct em_softc *);
251 : int em_allocate_transmit_structures(struct em_softc *);
252 : int em_rxfill(struct em_softc *);
253 : int em_rxeof(struct em_softc *);
254 : void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
255 : struct mbuf *);
256 : u_int em_transmit_checksum_setup(struct em_softc *, struct mbuf *, u_int,
257 : u_int32_t *, u_int32_t *);
258 : void em_iff(struct em_softc *);
259 : #ifdef EM_DEBUG
260 : void em_print_hw_stats(struct em_softc *);
261 : #endif
262 : void em_update_link_status(struct em_softc *);
263 : int em_get_buf(struct em_softc *, int);
264 : void em_enable_hw_vlans(struct em_softc *);
265 : u_int em_encap(struct em_softc *, struct mbuf *);
266 : void em_smartspeed(struct em_softc *);
267 : int em_82547_fifo_workaround(struct em_softc *, int);
268 : void em_82547_update_fifo_head(struct em_softc *, int);
269 : int em_82547_tx_fifo_reset(struct em_softc *);
270 : void em_82547_move_tail(void *arg);
271 : void em_82547_move_tail_locked(struct em_softc *);
272 : int em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *);
273 : void em_dma_free(struct em_softc *, struct em_dma_alloc *);
274 : u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
275 : PDESC_ARRAY desc_array);
276 : void em_flush_tx_ring(struct em_softc *);
277 : void em_flush_rx_ring(struct em_softc *);
278 : void em_flush_desc_rings(struct em_softc *);
279 :
280 : /*********************************************************************
281 : * OpenBSD Device Interface Entry Points
282 : *********************************************************************/
283 :
284 : struct cfattach em_ca = {
285 : sizeof(struct em_softc), em_probe, em_attach, em_detach,
286 : em_activate
287 : };
288 :
289 : struct cfdriver em_cd = {
290 : NULL, "em", DV_IFNET
291 : };
292 :
293 : static int em_smart_pwr_down = FALSE;
294 :
295 : /*********************************************************************
296 : * Device identification routine
297 : *
298 : * em_probe determines if the driver should be loaded on
299 : * adapter based on PCI vendor/device id of the adapter.
300 : *
301 : * return 0 on no match, positive on match
302 : *********************************************************************/
303 :
304 : int
305 0 : em_probe(struct device *parent, void *match, void *aux)
306 : {
307 : INIT_DEBUGOUT("em_probe: begin");
308 :
309 0 : return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
310 : nitems(em_devices)));
311 : }
312 :
313 : void
314 0 : em_defer_attach(struct device *self)
315 : {
316 0 : struct em_softc *sc = (struct em_softc *)self;
317 0 : struct pci_attach_args *pa = &sc->osdep.em_pa;
318 0 : pci_chipset_tag_t pc = pa->pa_pc;
319 : void *gcu;
320 :
321 : INIT_DEBUGOUT("em_defer_attach: begin");
322 :
323 0 : if ((gcu = em_lookup_gcu(self)) == 0) {
324 0 : printf("%s: No GCU found, defered attachment failed\n",
325 0 : DEVNAME(sc));
326 :
327 0 : if (sc->sc_intrhand)
328 0 : pci_intr_disestablish(pc, sc->sc_intrhand);
329 0 : sc->sc_intrhand = 0;
330 :
331 0 : em_stop(sc, 1);
332 :
333 0 : em_free_pci_resources(sc);
334 :
335 0 : sc->sc_rx_desc_ring = NULL;
336 0 : em_dma_free(sc, &sc->sc_rx_dma);
337 0 : sc->sc_tx_desc_ring = NULL;
338 0 : em_dma_free(sc, &sc->sc_tx_dma);
339 :
340 0 : return;
341 : }
342 :
343 0 : sc->hw.gcu = gcu;
344 :
345 0 : em_attach_miibus(self);
346 :
347 0 : em_setup_interface(sc);
348 :
349 0 : em_setup_link(&sc->hw);
350 :
351 0 : em_update_link_status(sc);
352 0 : }
353 :
354 : /*********************************************************************
355 : * Device initialization routine
356 : *
357 : * The attach entry point is called when the driver is being loaded.
358 : * This routine identifies the type of hardware, allocates all resources
359 : * and initializes the hardware.
360 : *
361 : *********************************************************************/
362 :
363 : void
364 0 : em_attach(struct device *parent, struct device *self, void *aux)
365 : {
366 0 : struct pci_attach_args *pa = aux;
367 : struct em_softc *sc;
368 : int defer = 0;
369 :
370 : INIT_DEBUGOUT("em_attach: begin");
371 :
372 0 : sc = (struct em_softc *)self;
373 0 : sc->sc_dmat = pa->pa_dmat;
374 0 : sc->osdep.em_pa = *pa;
375 :
376 0 : timeout_set(&sc->timer_handle, em_local_timer, sc);
377 0 : timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
378 :
379 : /* Determine hardware revision */
380 0 : em_identify_hardware(sc);
381 :
382 : /*
383 : * Only use MSI on the newer PCIe parts, with the exception
384 : * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
385 : */
386 0 : if (sc->hw.mac_type <= em_82572)
387 0 : sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
388 :
389 : /* Parameters (to be read from user) */
390 0 : if (sc->hw.mac_type >= em_82544) {
391 0 : sc->sc_tx_slots = EM_MAX_TXD;
392 0 : sc->sc_rx_slots = EM_MAX_RXD;
393 0 : } else {
394 0 : sc->sc_tx_slots = EM_MAX_TXD_82543;
395 0 : sc->sc_rx_slots = EM_MAX_RXD_82543;
396 : }
397 0 : sc->tx_int_delay = EM_TIDV;
398 0 : sc->tx_abs_int_delay = EM_TADV;
399 0 : sc->rx_int_delay = EM_RDTR;
400 0 : sc->rx_abs_int_delay = EM_RADV;
401 0 : sc->hw.autoneg = DO_AUTO_NEG;
402 0 : sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
403 0 : sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
404 0 : sc->hw.tbi_compatibility_en = TRUE;
405 0 : sc->sc_rx_buffer_len = EM_RXBUFFER_2048;
406 :
407 0 : sc->hw.phy_init_script = 1;
408 0 : sc->hw.phy_reset_disable = FALSE;
409 :
410 : #ifndef EM_MASTER_SLAVE
411 0 : sc->hw.master_slave = em_ms_hw_default;
412 : #else
413 : sc->hw.master_slave = EM_MASTER_SLAVE;
414 : #endif
415 :
416 : /*
417 : * This controls when hardware reports transmit completion
418 : * status.
419 : */
420 0 : sc->hw.report_tx_early = 1;
421 :
422 0 : if (em_allocate_pci_resources(sc))
423 : goto err_pci;
424 :
425 : /* Initialize eeprom parameters */
426 0 : em_init_eeprom_params(&sc->hw);
427 :
428 : /*
429 : * Set the max frame size assuming standard Ethernet
430 : * sized frames.
431 : */
432 0 : switch (sc->hw.mac_type) {
433 : case em_82573:
434 : {
435 0 : uint16_t eeprom_data = 0;
436 :
437 : /*
438 : * 82573 only supports Jumbo frames
439 : * if ASPM is disabled.
440 : */
441 0 : em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
442 : 1, &eeprom_data);
443 0 : if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
444 0 : sc->hw.max_frame_size = ETHER_MAX_LEN;
445 0 : break;
446 : }
447 : /* Allow Jumbo frames */
448 : /* FALLTHROUGH */
449 0 : }
450 : case em_82571:
451 : case em_82572:
452 : case em_82574:
453 : case em_82575:
454 : case em_82580:
455 : case em_i210:
456 : case em_i350:
457 : case em_ich9lan:
458 : case em_ich10lan:
459 : case em_pch2lan:
460 : case em_pch_lpt:
461 : case em_pch_spt:
462 : case em_pch_cnp:
463 : case em_80003es2lan:
464 : /* 9K Jumbo Frame size */
465 0 : sc->hw.max_frame_size = 9234;
466 0 : break;
467 : case em_pchlan:
468 0 : sc->hw.max_frame_size = 4096;
469 0 : break;
470 : case em_82542_rev2_0:
471 : case em_82542_rev2_1:
472 : case em_ich8lan:
473 : /* Adapters that do not support Jumbo frames */
474 0 : sc->hw.max_frame_size = ETHER_MAX_LEN;
475 0 : break;
476 : default:
477 0 : sc->hw.max_frame_size =
478 : MAX_JUMBO_FRAME_SIZE;
479 0 : }
480 :
481 0 : sc->hw.min_frame_size =
482 : ETHER_MIN_LEN + ETHER_CRC_LEN;
483 :
484 : /* Allocate Transmit Descriptor ring */
485 0 : if (em_dma_malloc(sc, sc->sc_tx_slots * sizeof(struct em_tx_desc),
486 0 : &sc->sc_tx_dma) != 0) {
487 0 : printf("%s: Unable to allocate tx_desc memory\n",
488 0 : DEVNAME(sc));
489 0 : goto err_tx_desc;
490 : }
491 0 : sc->sc_tx_desc_ring = (struct em_tx_desc *)sc->sc_tx_dma.dma_vaddr;
492 :
493 : /* Allocate Receive Descriptor ring */
494 0 : if (em_dma_malloc(sc, sc->sc_rx_slots * sizeof(struct em_rx_desc),
495 0 : &sc->sc_rx_dma) != 0) {
496 0 : printf("%s: Unable to allocate rx_desc memory\n",
497 0 : DEVNAME(sc));
498 0 : goto err_rx_desc;
499 : }
500 0 : sc->sc_rx_desc_ring = (struct em_rx_desc *)sc->sc_rx_dma.dma_vaddr;
501 :
502 : /* Initialize the hardware */
503 0 : if ((defer = em_hardware_init(sc))) {
504 0 : if (defer == EAGAIN)
505 0 : config_defer(self, em_defer_attach);
506 : else {
507 0 : printf("%s: Unable to initialize the hardware\n",
508 0 : DEVNAME(sc));
509 0 : goto err_hw_init;
510 : }
511 0 : }
512 :
513 0 : if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
514 0 : sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i210 ||
515 0 : sc->hw.mac_type == em_i350) {
516 0 : uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS);
517 0 : sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK) >>
518 : E1000_STATUS_FUNC_SHIFT;
519 :
520 0 : switch (sc->hw.bus_func) {
521 : case 0:
522 0 : sc->hw.swfw = E1000_SWFW_PHY0_SM;
523 0 : break;
524 : case 1:
525 0 : sc->hw.swfw = E1000_SWFW_PHY1_SM;
526 0 : break;
527 : case 2:
528 0 : sc->hw.swfw = E1000_SWFW_PHY2_SM;
529 0 : break;
530 : case 3:
531 0 : sc->hw.swfw = E1000_SWFW_PHY3_SM;
532 0 : break;
533 : }
534 0 : } else {
535 0 : sc->hw.bus_func = 0;
536 : }
537 :
538 : /* Copy the permanent MAC address out of the EEPROM */
539 0 : if (em_read_mac_addr(&sc->hw) < 0) {
540 0 : printf("%s: EEPROM read error while reading mac address\n",
541 0 : DEVNAME(sc));
542 : goto err_mac_addr;
543 : }
544 :
545 0 : bcopy(sc->hw.mac_addr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
546 :
547 : /* Setup OS specific network interface */
548 0 : if (!defer)
549 0 : em_setup_interface(sc);
550 :
551 : /* Initialize statistics */
552 0 : em_clear_hw_cntrs(&sc->hw);
553 : #ifndef SMALL_KERNEL
554 0 : em_update_stats_counters(sc);
555 : #endif
556 0 : sc->hw.get_link_status = 1;
557 0 : if (!defer)
558 0 : em_update_link_status(sc);
559 :
560 : #ifdef EM_DEBUG
561 : printf(", mac %#x phy %#x", sc->hw.mac_type, sc->hw.phy_type);
562 : #endif
563 0 : printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
564 :
565 : /* Indicate SOL/IDER usage */
566 0 : if (em_check_phy_reset_block(&sc->hw))
567 0 : printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
568 0 : DEVNAME(sc));
569 :
570 : /* Identify 82544 on PCI-X */
571 0 : em_get_bus_info(&sc->hw);
572 0 : if (sc->hw.bus_type == em_bus_type_pcix &&
573 0 : sc->hw.mac_type == em_82544)
574 0 : sc->pcix_82544 = TRUE;
575 : else
576 0 : sc->pcix_82544 = FALSE;
577 :
578 0 : sc->hw.icp_xxxx_is_link_up = FALSE;
579 :
580 : INIT_DEBUGOUT("em_attach: end");
581 0 : return;
582 :
583 : err_mac_addr:
584 : err_hw_init:
585 0 : sc->sc_rx_desc_ring = NULL;
586 0 : em_dma_free(sc, &sc->sc_rx_dma);
587 : err_rx_desc:
588 0 : sc->sc_tx_desc_ring = NULL;
589 0 : em_dma_free(sc, &sc->sc_tx_dma);
590 : err_tx_desc:
591 : err_pci:
592 0 : em_free_pci_resources(sc);
593 0 : }
594 :
595 : /*********************************************************************
596 : * Transmit entry point
597 : *
598 : * em_start is called by the stack to initiate a transmit.
599 : * The driver will remain in this routine as long as there are
600 : * packets to transmit and transmit resources are available.
601 : * In case resources are not available stack is notified and
602 : * the packet is requeued.
603 : **********************************************************************/
604 :
605 : void
606 0 : em_start(struct ifqueue *ifq)
607 : {
608 0 : struct ifnet *ifp = ifq->ifq_if;
609 0 : struct em_softc *sc = ifp->if_softc;
610 : u_int head, free, used;
611 : struct mbuf *m;
612 : int post = 0;
613 :
614 0 : if (!sc->link_active) {
615 0 : ifq_purge(ifq);
616 0 : return;
617 : }
618 :
619 : /* calculate free space */
620 0 : head = sc->sc_tx_desc_head;
621 0 : free = sc->sc_tx_desc_tail;
622 0 : if (free <= head)
623 0 : free += sc->sc_tx_slots;
624 0 : free -= head;
625 :
626 0 : if (sc->hw.mac_type != em_82547) {
627 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
628 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
629 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
630 0 : }
631 :
632 0 : for (;;) {
633 : /* use 2 because cksum setup can use an extra slot */
634 0 : if (EM_MAX_SCATTER + 2 > free) {
635 0 : ifq_set_oactive(ifq);
636 0 : break;
637 : }
638 :
639 0 : m = ifq_dequeue(ifq);
640 0 : if (m == NULL)
641 : break;
642 :
643 0 : used = em_encap(sc, m);
644 0 : if (used == 0) {
645 0 : m_freem(m);
646 0 : continue;
647 : }
648 :
649 0 : KASSERT(used <= free);
650 :
651 0 : free -= used;
652 :
653 : #if NBPFILTER > 0
654 : /* Send a copy of the frame to the BPF listener */
655 0 : if (ifp->if_bpf)
656 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
657 : #endif
658 :
659 : /* Set timeout in case hardware has problems transmitting */
660 0 : ifp->if_timer = EM_TX_TIMEOUT;
661 :
662 0 : if (sc->hw.mac_type == em_82547) {
663 0 : int len = m->m_pkthdr.len;
664 :
665 0 : if (sc->link_duplex == HALF_DUPLEX)
666 0 : em_82547_move_tail_locked(sc);
667 : else {
668 0 : E1000_WRITE_REG(&sc->hw, TDT,
669 : sc->sc_tx_desc_head);
670 0 : em_82547_update_fifo_head(sc, len);
671 : }
672 0 : }
673 :
674 : post = 1;
675 : }
676 :
677 0 : if (sc->hw.mac_type != em_82547) {
678 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
679 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
680 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
681 : /*
682 : * Advance the Transmit Descriptor Tail (Tdt),
683 : * this tells the E1000 that this frame is
684 : * available to transmit.
685 : */
686 0 : if (post)
687 0 : E1000_WRITE_REG(&sc->hw, TDT, sc->sc_tx_desc_head);
688 : }
689 0 : }
690 :
691 : /*********************************************************************
692 : * Ioctl entry point
693 : *
694 : * em_ioctl is called when the user wants to configure the
695 : * interface.
696 : *
697 : * return 0 on success, positive on failure
698 : **********************************************************************/
699 :
700 : int
701 0 : em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
702 : {
703 : int error = 0;
704 0 : struct ifreq *ifr = (struct ifreq *) data;
705 0 : struct em_softc *sc = ifp->if_softc;
706 : int s;
707 :
708 0 : s = splnet();
709 :
710 0 : switch (command) {
711 : case SIOCSIFADDR:
712 : IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
713 : "Addr)");
714 0 : if (!(ifp->if_flags & IFF_UP)) {
715 0 : ifp->if_flags |= IFF_UP;
716 0 : em_init(sc);
717 0 : }
718 : break;
719 :
720 : case SIOCSIFFLAGS:
721 : IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
722 0 : if (ifp->if_flags & IFF_UP) {
723 0 : if (ifp->if_flags & IFF_RUNNING)
724 0 : error = ENETRESET;
725 : else
726 0 : em_init(sc);
727 : } else {
728 0 : if (ifp->if_flags & IFF_RUNNING)
729 0 : em_stop(sc, 0);
730 : }
731 : break;
732 :
733 : case SIOCSIFMEDIA:
734 : /* Check SOL/IDER usage */
735 0 : if (em_check_phy_reset_block(&sc->hw)) {
736 0 : printf("%s: Media change is blocked due to SOL/IDER session.\n",
737 0 : DEVNAME(sc));
738 0 : break;
739 : }
740 : case SIOCGIFMEDIA:
741 : IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
742 0 : error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
743 0 : break;
744 :
745 : case SIOCGIFRXR:
746 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
747 0 : NULL, EM_MCLBYTES, &sc->sc_rx_ring);
748 0 : break;
749 :
750 : default:
751 0 : error = ether_ioctl(ifp, &sc->sc_ac, command, data);
752 0 : }
753 :
754 0 : if (error == ENETRESET) {
755 0 : if (ifp->if_flags & IFF_RUNNING) {
756 0 : em_disable_intr(sc);
757 0 : em_iff(sc);
758 0 : if (sc->hw.mac_type == em_82542_rev2_0)
759 0 : em_initialize_receive_unit(sc);
760 0 : em_enable_intr(sc);
761 0 : }
762 : error = 0;
763 0 : }
764 :
765 0 : splx(s);
766 0 : return (error);
767 : }
768 :
769 : /*********************************************************************
770 : * Watchdog entry point
771 : *
772 : * This routine is called whenever hardware quits transmitting.
773 : *
774 : **********************************************************************/
775 :
776 : void
777 0 : em_watchdog(struct ifnet *ifp)
778 : {
779 0 : struct em_softc *sc = ifp->if_softc;
780 :
781 : /* If we are in this routine because of pause frames, then
782 : * don't reset the hardware.
783 : */
784 0 : if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
785 0 : ifp->if_timer = EM_TX_TIMEOUT;
786 0 : return;
787 : }
788 0 : printf("%s: watchdog: head %u tail %u TDH %u TDT %u\n",
789 0 : DEVNAME(sc),
790 0 : sc->sc_tx_desc_head, sc->sc_tx_desc_tail,
791 0 : E1000_READ_REG(&sc->hw, TDH), E1000_READ_REG(&sc->hw, TDT));
792 :
793 0 : em_init(sc);
794 :
795 0 : sc->watchdog_events++;
796 0 : }
797 :
798 : /*********************************************************************
799 : * Init entry point
800 : *
801 : * This routine is used in two ways. It is used by the stack as
802 : * init entry point in network interface structure. It is also used
803 : * by the driver as a hw/sw initialization routine to get to a
804 : * consistent state.
805 : *
806 : **********************************************************************/
807 :
808 : void
809 0 : em_init(void *arg)
810 : {
811 0 : struct em_softc *sc = arg;
812 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
813 : uint32_t pba;
814 : int s;
815 :
816 0 : s = splnet();
817 :
818 : INIT_DEBUGOUT("em_init: begin");
819 :
820 0 : em_stop(sc, 0);
821 :
822 : /*
823 : * Packet Buffer Allocation (PBA)
824 : * Writing PBA sets the receive portion of the buffer
825 : * the remainder is used for the transmit buffer.
826 : *
827 : * Devices before the 82547 had a Packet Buffer of 64K.
828 : * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
829 : * After the 82547 the buffer was reduced to 40K.
830 : * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
831 : * Note: default does not leave enough room for Jumbo Frame >10k.
832 : */
833 0 : switch (sc->hw.mac_type) {
834 : case em_82547:
835 : case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
836 0 : if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
837 0 : pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
838 : else
839 : pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
840 0 : sc->tx_fifo_head = 0;
841 0 : sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
842 0 : sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
843 0 : break;
844 : case em_82571:
845 : case em_82572: /* Total Packet Buffer on these is 48k */
846 : case em_82575:
847 : case em_82580:
848 : case em_80003es2lan:
849 : case em_i350:
850 : pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
851 0 : break;
852 : case em_i210:
853 : pba = E1000_PBA_34K;
854 0 : break;
855 : case em_82573: /* 82573: Total Packet Buffer is 32K */
856 : /* Jumbo frames not supported */
857 : pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
858 0 : break;
859 : case em_82574: /* Total Packet Buffer is 40k */
860 : pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
861 0 : break;
862 : case em_ich8lan:
863 : pba = E1000_PBA_8K;
864 0 : break;
865 : case em_ich9lan:
866 : case em_ich10lan:
867 : /* Boost Receive side for jumbo frames */
868 0 : if (sc->hw.max_frame_size > EM_RXBUFFER_4096)
869 0 : pba = E1000_PBA_14K;
870 : else
871 : pba = E1000_PBA_10K;
872 : break;
873 : case em_pchlan:
874 : case em_pch2lan:
875 : case em_pch_lpt:
876 : case em_pch_spt:
877 : case em_pch_cnp:
878 : pba = E1000_PBA_26K;
879 0 : break;
880 : default:
881 : /* Devices before 82547 had a Packet Buffer of 64K. */
882 0 : if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
883 0 : pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
884 : else
885 : pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
886 : }
887 : INIT_DEBUGOUT1("em_init: pba=%dK",pba);
888 0 : E1000_WRITE_REG(&sc->hw, PBA, pba);
889 :
890 : /* Get the latest mac address, User can use a LAA */
891 0 : bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac_addr, ETHER_ADDR_LEN);
892 :
893 : /* Initialize the hardware */
894 0 : if (em_hardware_init(sc)) {
895 0 : printf("%s: Unable to initialize the hardware\n",
896 0 : DEVNAME(sc));
897 0 : splx(s);
898 0 : return;
899 : }
900 0 : em_update_link_status(sc);
901 :
902 0 : E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN);
903 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
904 0 : em_enable_hw_vlans(sc);
905 :
906 : /* Prepare transmit descriptors and buffers */
907 0 : if (em_setup_transmit_structures(sc)) {
908 0 : printf("%s: Could not setup transmit structures\n",
909 0 : DEVNAME(sc));
910 0 : em_stop(sc, 0);
911 0 : splx(s);
912 0 : return;
913 : }
914 0 : em_initialize_transmit_unit(sc);
915 :
916 : /* Prepare receive descriptors and buffers */
917 0 : if (em_setup_receive_structures(sc)) {
918 0 : printf("%s: Could not setup receive structures\n",
919 0 : DEVNAME(sc));
920 0 : em_stop(sc, 0);
921 0 : splx(s);
922 0 : return;
923 : }
924 0 : em_initialize_receive_unit(sc);
925 :
926 : /* Program promiscuous mode and multicast filters. */
927 0 : em_iff(sc);
928 :
929 0 : ifp->if_flags |= IFF_RUNNING;
930 0 : ifq_clr_oactive(&ifp->if_snd);
931 :
932 0 : timeout_add_sec(&sc->timer_handle, 1);
933 0 : em_clear_hw_cntrs(&sc->hw);
934 0 : em_enable_intr(sc);
935 :
936 : /* Don't reset the phy next time init gets called */
937 0 : sc->hw.phy_reset_disable = TRUE;
938 :
939 0 : splx(s);
940 0 : }
941 :
942 : /*********************************************************************
943 : *
944 : * Interrupt Service routine
945 : *
946 : **********************************************************************/
947 : int
948 0 : em_intr(void *arg)
949 : {
950 0 : struct em_softc *sc = arg;
951 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
952 : u_int32_t reg_icr, test_icr;
953 :
954 0 : test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
955 0 : if (sc->hw.mac_type >= em_82571)
956 0 : test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
957 0 : if (!test_icr)
958 0 : return (0);
959 :
960 0 : if (ifp->if_flags & IFF_RUNNING) {
961 0 : em_txeof(sc);
962 :
963 0 : if (em_rxeof(sc) || ISSET(reg_icr, E1000_ICR_RXO)) {
964 0 : if (em_rxfill(sc)) {
965 0 : E1000_WRITE_REG(&sc->hw, RDT,
966 : sc->sc_rx_desc_head);
967 0 : }
968 : }
969 : }
970 :
971 : /* Link status change */
972 0 : if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
973 0 : KERNEL_LOCK();
974 0 : sc->hw.get_link_status = 1;
975 0 : em_check_for_link(&sc->hw);
976 0 : em_update_link_status(sc);
977 0 : KERNEL_UNLOCK();
978 0 : }
979 :
980 0 : return (1);
981 0 : }
982 :
983 : /*********************************************************************
984 : *
985 : * Media Ioctl callback
986 : *
987 : * This routine is called whenever the user queries the status of
988 : * the interface using ifconfig.
989 : *
990 : **********************************************************************/
991 : void
992 0 : em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
993 : {
994 0 : struct em_softc *sc = ifp->if_softc;
995 : uint64_t fiber_type = IFM_1000_SX;
996 0 : u_int16_t gsr;
997 :
998 : INIT_DEBUGOUT("em_media_status: begin");
999 :
1000 0 : em_check_for_link(&sc->hw);
1001 0 : em_update_link_status(sc);
1002 :
1003 0 : ifmr->ifm_status = IFM_AVALID;
1004 0 : ifmr->ifm_active = IFM_ETHER;
1005 :
1006 0 : if (!sc->link_active) {
1007 0 : ifmr->ifm_active |= IFM_NONE;
1008 0 : return;
1009 : }
1010 :
1011 0 : ifmr->ifm_status |= IFM_ACTIVE;
1012 :
1013 0 : if (sc->hw.media_type == em_media_type_fiber ||
1014 0 : sc->hw.media_type == em_media_type_internal_serdes) {
1015 0 : if (sc->hw.mac_type == em_82545)
1016 0 : fiber_type = IFM_1000_LX;
1017 0 : ifmr->ifm_active |= fiber_type | IFM_FDX;
1018 0 : } else {
1019 0 : switch (sc->link_speed) {
1020 : case 10:
1021 0 : ifmr->ifm_active |= IFM_10_T;
1022 0 : break;
1023 : case 100:
1024 0 : ifmr->ifm_active |= IFM_100_TX;
1025 0 : break;
1026 : case 1000:
1027 0 : ifmr->ifm_active |= IFM_1000_T;
1028 0 : break;
1029 : }
1030 :
1031 0 : if (sc->link_duplex == FULL_DUPLEX)
1032 0 : ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX;
1033 : else
1034 0 : ifmr->ifm_active |= IFM_HDX;
1035 :
1036 0 : if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
1037 0 : em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
1038 0 : if (gsr & SR_1000T_MS_CONFIG_RES)
1039 0 : ifmr->ifm_active |= IFM_ETH_MASTER;
1040 : }
1041 : }
1042 0 : }
1043 :
1044 : /*********************************************************************
1045 : *
1046 : * Media Ioctl callback
1047 : *
1048 : * This routine is called when the user changes speed/duplex using
1049 : * media/mediopt option with ifconfig.
1050 : *
1051 : **********************************************************************/
1052 : int
1053 0 : em_media_change(struct ifnet *ifp)
1054 : {
1055 0 : struct em_softc *sc = ifp->if_softc;
1056 0 : struct ifmedia *ifm = &sc->media;
1057 :
1058 : INIT_DEBUGOUT("em_media_change: begin");
1059 :
1060 0 : if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1061 0 : return (EINVAL);
1062 :
1063 0 : switch (IFM_SUBTYPE(ifm->ifm_media)) {
1064 : case IFM_AUTO:
1065 0 : sc->hw.autoneg = DO_AUTO_NEG;
1066 0 : sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1067 0 : break;
1068 : case IFM_1000_LX:
1069 : case IFM_1000_SX:
1070 : case IFM_1000_T:
1071 0 : sc->hw.autoneg = DO_AUTO_NEG;
1072 0 : sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1073 0 : break;
1074 : case IFM_100_TX:
1075 0 : sc->hw.autoneg = FALSE;
1076 0 : sc->hw.autoneg_advertised = 0;
1077 0 : if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1078 0 : sc->hw.forced_speed_duplex = em_100_full;
1079 : else
1080 0 : sc->hw.forced_speed_duplex = em_100_half;
1081 : break;
1082 : case IFM_10_T:
1083 0 : sc->hw.autoneg = FALSE;
1084 0 : sc->hw.autoneg_advertised = 0;
1085 0 : if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1086 0 : sc->hw.forced_speed_duplex = em_10_full;
1087 : else
1088 0 : sc->hw.forced_speed_duplex = em_10_half;
1089 : break;
1090 : default:
1091 0 : printf("%s: Unsupported media type\n", DEVNAME(sc));
1092 0 : }
1093 :
1094 : /*
1095 : * As the speed/duplex settings may have changed we need to
1096 : * reset the PHY.
1097 : */
1098 0 : sc->hw.phy_reset_disable = FALSE;
1099 :
1100 0 : em_init(sc);
1101 :
1102 0 : return (0);
1103 0 : }
1104 :
1105 : uint64_t
1106 0 : em_flowstatus(struct em_softc *sc)
1107 : {
1108 0 : u_int16_t ar, lpar;
1109 :
1110 0 : if (sc->hw.media_type == em_media_type_fiber ||
1111 0 : sc->hw.media_type == em_media_type_internal_serdes)
1112 0 : return (0);
1113 :
1114 0 : em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
1115 0 : em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
1116 :
1117 0 : if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
1118 0 : return (IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE);
1119 0 : else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1120 0 : (lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1121 0 : return (IFM_FLOW|IFM_ETH_TXPAUSE);
1122 0 : else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1123 0 : !(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1124 0 : return (IFM_FLOW|IFM_ETH_RXPAUSE);
1125 :
1126 0 : return (0);
1127 0 : }
1128 :
1129 : /*********************************************************************
1130 : *
1131 : * This routine maps the mbufs to tx descriptors.
1132 : *
1133 : * return 0 on success, positive on failure
1134 : **********************************************************************/
1135 : u_int
1136 0 : em_encap(struct em_softc *sc, struct mbuf *m)
1137 : {
1138 : struct em_packet *pkt;
1139 : struct em_tx_desc *desc;
1140 : bus_dmamap_t map;
1141 0 : u_int32_t txd_upper, txd_lower;
1142 : u_int head, last, used = 0;
1143 : int i, j;
1144 :
1145 : /* For 82544 Workaround */
1146 0 : DESC_ARRAY desc_array;
1147 : u_int32_t array_elements;
1148 :
1149 : /* get a dmamap for this packet from the next free slot */
1150 0 : head = sc->sc_tx_desc_head;
1151 0 : pkt = &sc->sc_tx_pkts_ring[head];
1152 0 : map = pkt->pkt_map;
1153 :
1154 0 : switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1155 : case 0:
1156 : break;
1157 : case EFBIG:
1158 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1159 0 : bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1160 0 : BUS_DMA_NOWAIT) == 0)
1161 : break;
1162 :
1163 : /* FALLTHROUGH */
1164 : default:
1165 0 : sc->no_tx_dma_setup++;
1166 0 : return (0);
1167 : }
1168 :
1169 0 : bus_dmamap_sync(sc->sc_dmat, map,
1170 : 0, map->dm_mapsize,
1171 : BUS_DMASYNC_PREWRITE);
1172 :
1173 0 : if (sc->hw.mac_type == em_82547) {
1174 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
1175 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
1176 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1177 0 : }
1178 :
1179 0 : if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1180 0 : sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1181 0 : sc->hw.mac_type != em_i350) {
1182 0 : used += em_transmit_checksum_setup(sc, m, head,
1183 : &txd_upper, &txd_lower);
1184 0 : } else {
1185 0 : txd_upper = txd_lower = 0;
1186 : }
1187 :
1188 0 : head += used;
1189 0 : if (head >= sc->sc_tx_slots)
1190 0 : head -= sc->sc_tx_slots;
1191 :
1192 0 : for (i = 0; i < map->dm_nsegs; i++) {
1193 : /* If sc is 82544 and on PCI-X bus */
1194 0 : if (sc->pcix_82544) {
1195 : /*
1196 : * Check the Address and Length combination and
1197 : * split the data accordingly
1198 : */
1199 0 : array_elements = em_fill_descriptors(
1200 0 : map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len,
1201 : &desc_array);
1202 0 : for (j = 0; j < array_elements; j++) {
1203 0 : desc = &sc->sc_tx_desc_ring[head];
1204 :
1205 0 : desc->buffer_addr = htole64(
1206 : desc_array.descriptor[j].address);
1207 0 : desc->lower.data = htole32(
1208 : (sc->sc_txd_cmd | txd_lower |
1209 : (u_int16_t)desc_array.descriptor[j].length));
1210 0 : desc->upper.data = htole32(txd_upper);
1211 :
1212 : last = head;
1213 0 : if (++head == sc->sc_tx_slots)
1214 : head = 0;
1215 :
1216 0 : used++;
1217 : }
1218 : } else {
1219 0 : desc = &sc->sc_tx_desc_ring[head];
1220 :
1221 0 : desc->buffer_addr = htole64(map->dm_segs[i].ds_addr);
1222 0 : desc->lower.data = htole32(sc->sc_txd_cmd |
1223 : txd_lower | map->dm_segs[i].ds_len);
1224 0 : desc->upper.data = htole32(txd_upper);
1225 :
1226 : last = head;
1227 0 : if (++head == sc->sc_tx_slots)
1228 : head = 0;
1229 :
1230 0 : used++;
1231 : }
1232 : }
1233 :
1234 : #if NVLAN > 0
1235 : /* Find out if we are in VLAN mode */
1236 0 : if (m->m_flags & M_VLANTAG) {
1237 : /* Set the VLAN id */
1238 0 : desc->upper.fields.special = htole16(m->m_pkthdr.ether_vtag);
1239 :
1240 : /* Tell hardware to add tag */
1241 0 : desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1242 0 : }
1243 : #endif
1244 :
1245 : /* mark the packet with the mbuf and last desc slot */
1246 0 : pkt->pkt_m = m;
1247 0 : pkt->pkt_eop = last;
1248 :
1249 0 : sc->sc_tx_desc_head = head;
1250 :
1251 : /*
1252 : * Last Descriptor of Packet
1253 : * needs End Of Packet (EOP)
1254 : * and Report Status (RS)
1255 : */
1256 0 : desc->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1257 :
1258 0 : if (sc->hw.mac_type == em_82547) {
1259 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
1260 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
1261 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1262 0 : }
1263 :
1264 0 : return (used);
1265 0 : }
1266 :
1267 : /*********************************************************************
1268 : *
1269 : * 82547 workaround to avoid controller hang in half-duplex environment.
1270 : * The workaround is to avoid queuing a large packet that would span
1271 : * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1272 : * in this case. We do that only when FIFO is quiescent.
1273 : *
1274 : **********************************************************************/
1275 : void
1276 0 : em_82547_move_tail_locked(struct em_softc *sc)
1277 : {
1278 : uint16_t hw_tdt;
1279 : uint16_t sw_tdt;
1280 : struct em_tx_desc *tx_desc;
1281 : uint16_t length = 0;
1282 : boolean_t eop = 0;
1283 :
1284 0 : hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1285 0 : sw_tdt = sc->sc_tx_desc_head;
1286 :
1287 0 : while (hw_tdt != sw_tdt) {
1288 0 : tx_desc = &sc->sc_tx_desc_ring[hw_tdt];
1289 0 : length += tx_desc->lower.flags.length;
1290 0 : eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1291 0 : if (++hw_tdt == sc->sc_tx_slots)
1292 : hw_tdt = 0;
1293 :
1294 0 : if (eop) {
1295 0 : if (em_82547_fifo_workaround(sc, length)) {
1296 0 : sc->tx_fifo_wrk_cnt++;
1297 0 : timeout_add(&sc->tx_fifo_timer_handle, 1);
1298 0 : break;
1299 : }
1300 0 : E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1301 0 : em_82547_update_fifo_head(sc, length);
1302 : length = 0;
1303 0 : }
1304 : }
1305 0 : }
1306 :
1307 : void
1308 0 : em_82547_move_tail(void *arg)
1309 : {
1310 0 : struct em_softc *sc = arg;
1311 : int s;
1312 :
1313 0 : s = splnet();
1314 0 : em_82547_move_tail_locked(sc);
1315 0 : splx(s);
1316 0 : }
1317 :
1318 : int
1319 0 : em_82547_fifo_workaround(struct em_softc *sc, int len)
1320 : {
1321 : int fifo_space, fifo_pkt_len;
1322 :
1323 0 : fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1324 :
1325 0 : if (sc->link_duplex == HALF_DUPLEX) {
1326 0 : fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1327 :
1328 0 : if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1329 0 : if (em_82547_tx_fifo_reset(sc))
1330 0 : return (0);
1331 : else
1332 0 : return (1);
1333 : }
1334 : }
1335 :
1336 0 : return (0);
1337 0 : }
1338 :
1339 : void
1340 0 : em_82547_update_fifo_head(struct em_softc *sc, int len)
1341 : {
1342 0 : int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1343 :
1344 : /* tx_fifo_head is always 16 byte aligned */
1345 0 : sc->tx_fifo_head += fifo_pkt_len;
1346 0 : if (sc->tx_fifo_head >= sc->tx_fifo_size)
1347 0 : sc->tx_fifo_head -= sc->tx_fifo_size;
1348 0 : }
1349 :
1350 : int
1351 0 : em_82547_tx_fifo_reset(struct em_softc *sc)
1352 : {
1353 : uint32_t tctl;
1354 :
1355 0 : if ((E1000_READ_REG(&sc->hw, TDT) ==
1356 0 : E1000_READ_REG(&sc->hw, TDH)) &&
1357 0 : (E1000_READ_REG(&sc->hw, TDFT) ==
1358 0 : E1000_READ_REG(&sc->hw, TDFH)) &&
1359 0 : (E1000_READ_REG(&sc->hw, TDFTS) ==
1360 0 : E1000_READ_REG(&sc->hw, TDFHS)) &&
1361 0 : (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1362 :
1363 : /* Disable TX unit */
1364 0 : tctl = E1000_READ_REG(&sc->hw, TCTL);
1365 0 : E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1366 :
1367 : /* Reset FIFO pointers */
1368 0 : E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
1369 0 : E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
1370 0 : E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
1371 0 : E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
1372 :
1373 : /* Re-enable TX unit */
1374 0 : E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1375 0 : E1000_WRITE_FLUSH(&sc->hw);
1376 :
1377 0 : sc->tx_fifo_head = 0;
1378 0 : sc->tx_fifo_reset_cnt++;
1379 :
1380 0 : return (TRUE);
1381 : } else
1382 0 : return (FALSE);
1383 0 : }
1384 :
1385 : void
1386 0 : em_iff(struct em_softc *sc)
1387 : {
1388 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1389 : struct arpcom *ac = &sc->sc_ac;
1390 : u_int32_t reg_rctl = 0;
1391 0 : u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1392 : struct ether_multi *enm;
1393 : struct ether_multistep step;
1394 : int i = 0;
1395 :
1396 : IOCTL_DEBUGOUT("em_iff: begin");
1397 :
1398 0 : if (sc->hw.mac_type == em_82542_rev2_0) {
1399 0 : reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1400 0 : if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1401 0 : em_pci_clear_mwi(&sc->hw);
1402 0 : reg_rctl |= E1000_RCTL_RST;
1403 0 : E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1404 0 : msec_delay(5);
1405 0 : }
1406 :
1407 0 : reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1408 0 : reg_rctl &= ~(E1000_RCTL_MPE | E1000_RCTL_UPE);
1409 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1410 :
1411 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1412 0 : ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1413 0 : ifp->if_flags |= IFF_ALLMULTI;
1414 0 : reg_rctl |= E1000_RCTL_MPE;
1415 0 : if (ifp->if_flags & IFF_PROMISC)
1416 0 : reg_rctl |= E1000_RCTL_UPE;
1417 : } else {
1418 0 : ETHER_FIRST_MULTI(step, ac, enm);
1419 0 : while (enm != NULL) {
1420 0 : bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS);
1421 0 : i += ETH_LENGTH_OF_ADDRESS;
1422 :
1423 0 : ETHER_NEXT_MULTI(step, enm);
1424 : }
1425 :
1426 0 : em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0, 1);
1427 : }
1428 :
1429 0 : E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1430 :
1431 0 : if (sc->hw.mac_type == em_82542_rev2_0) {
1432 0 : reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1433 0 : reg_rctl &= ~E1000_RCTL_RST;
1434 0 : E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1435 0 : msec_delay(5);
1436 0 : if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1437 0 : em_pci_set_mwi(&sc->hw);
1438 : }
1439 0 : }
1440 :
1441 : /*********************************************************************
1442 : * Timer routine
1443 : *
1444 : * This routine checks for link status and updates statistics.
1445 : *
1446 : **********************************************************************/
1447 :
1448 : void
1449 0 : em_local_timer(void *arg)
1450 : {
1451 : struct ifnet *ifp;
1452 0 : struct em_softc *sc = arg;
1453 : int s;
1454 :
1455 0 : ifp = &sc->sc_ac.ac_if;
1456 :
1457 0 : s = splnet();
1458 :
1459 : #ifndef SMALL_KERNEL
1460 0 : em_update_stats_counters(sc);
1461 : #ifdef EM_DEBUG
1462 : if (ifp->if_flags & IFF_DEBUG && ifp->if_flags & IFF_RUNNING)
1463 : em_print_hw_stats(sc);
1464 : #endif
1465 : #endif
1466 0 : em_smartspeed(sc);
1467 :
1468 0 : timeout_add_sec(&sc->timer_handle, 1);
1469 :
1470 0 : splx(s);
1471 0 : }
1472 :
1473 : void
1474 0 : em_update_link_status(struct em_softc *sc)
1475 : {
1476 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1477 : u_char link_state;
1478 :
1479 0 : if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1480 0 : if (sc->link_active == 0) {
1481 0 : em_get_speed_and_duplex(&sc->hw,
1482 0 : &sc->link_speed,
1483 0 : &sc->link_duplex);
1484 : /* Check if we may set SPEED_MODE bit on PCI-E */
1485 0 : if ((sc->link_speed == SPEED_1000) &&
1486 0 : ((sc->hw.mac_type == em_82571) ||
1487 0 : (sc->hw.mac_type == em_82572) ||
1488 0 : (sc->hw.mac_type == em_82575) ||
1489 0 : (sc->hw.mac_type == em_82580))) {
1490 : int tarc0;
1491 :
1492 0 : tarc0 = E1000_READ_REG(&sc->hw, TARC0);
1493 0 : tarc0 |= SPEED_MODE_BIT;
1494 0 : E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
1495 0 : }
1496 0 : sc->link_active = 1;
1497 0 : sc->smartspeed = 0;
1498 0 : ifp->if_baudrate = IF_Mbps(sc->link_speed);
1499 0 : }
1500 0 : link_state = (sc->link_duplex == FULL_DUPLEX) ?
1501 : LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX;
1502 0 : if (ifp->if_link_state != link_state) {
1503 0 : ifp->if_link_state = link_state;
1504 0 : if_link_state_change(ifp);
1505 0 : }
1506 : } else {
1507 0 : if (sc->link_active == 1) {
1508 0 : ifp->if_baudrate = sc->link_speed = 0;
1509 0 : sc->link_duplex = 0;
1510 0 : sc->link_active = 0;
1511 0 : }
1512 0 : if (ifp->if_link_state != LINK_STATE_DOWN) {
1513 0 : ifp->if_link_state = LINK_STATE_DOWN;
1514 0 : if_link_state_change(ifp);
1515 0 : }
1516 : }
1517 0 : }
1518 :
1519 : /*********************************************************************
1520 : *
1521 : * This routine disables all traffic on the adapter by issuing a
1522 : * global reset on the MAC and deallocates TX/RX buffers.
1523 : *
1524 : **********************************************************************/
1525 :
1526 : void
1527 0 : em_stop(void *arg, int softonly)
1528 : {
1529 0 : struct em_softc *sc = arg;
1530 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1531 :
1532 : /* Tell the stack that the interface is no longer active */
1533 0 : ifp->if_flags &= ~IFF_RUNNING;
1534 :
1535 : INIT_DEBUGOUT("em_stop: begin");
1536 :
1537 0 : timeout_del(&sc->timer_handle);
1538 0 : timeout_del(&sc->tx_fifo_timer_handle);
1539 :
1540 0 : if (!softonly)
1541 0 : em_disable_intr(sc);
1542 0 : if (sc->hw.mac_type >= em_pch_spt)
1543 0 : em_flush_desc_rings(sc);
1544 0 : if (!softonly)
1545 0 : em_reset_hw(&sc->hw);
1546 :
1547 0 : intr_barrier(sc->sc_intrhand);
1548 0 : ifq_barrier(&ifp->if_snd);
1549 :
1550 0 : KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1551 :
1552 0 : ifq_clr_oactive(&ifp->if_snd);
1553 0 : ifp->if_timer = 0;
1554 :
1555 0 : em_free_transmit_structures(sc);
1556 0 : em_free_receive_structures(sc);
1557 0 : }
1558 :
1559 : /*********************************************************************
1560 : *
1561 : * Determine hardware revision.
1562 : *
1563 : **********************************************************************/
1564 : void
1565 0 : em_identify_hardware(struct em_softc *sc)
1566 : {
1567 : u_int32_t reg;
1568 0 : struct pci_attach_args *pa = &sc->osdep.em_pa;
1569 :
1570 : /* Make sure our PCI config space has the necessary stuff set */
1571 0 : sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1572 : PCI_COMMAND_STATUS_REG);
1573 :
1574 : /* Save off the information about this board */
1575 0 : sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1576 0 : sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1577 :
1578 0 : reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1579 0 : sc->hw.revision_id = PCI_REVISION(reg);
1580 :
1581 0 : reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1582 0 : sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1583 0 : sc->hw.subsystem_id = PCI_PRODUCT(reg);
1584 :
1585 : /* Identify the MAC */
1586 0 : if (em_set_mac_type(&sc->hw))
1587 0 : printf("%s: Unknown MAC Type\n", DEVNAME(sc));
1588 :
1589 0 : if (sc->hw.mac_type == em_pchlan)
1590 0 : sc->hw.revision_id = PCI_PRODUCT(pa->pa_id) & 0x0f;
1591 :
1592 0 : if (sc->hw.mac_type == em_82541 ||
1593 0 : sc->hw.mac_type == em_82541_rev_2 ||
1594 0 : sc->hw.mac_type == em_82547 ||
1595 0 : sc->hw.mac_type == em_82547_rev_2)
1596 0 : sc->hw.phy_init_script = TRUE;
1597 0 : }
1598 :
1599 : void
1600 0 : em_legacy_irq_quirk_spt(struct em_softc *sc)
1601 : {
1602 : uint32_t reg;
1603 :
1604 : /* Legacy interrupt: SPT needs a quirk. */
1605 0 : if (sc->hw.mac_type != em_pch_spt && sc->hw.mac_type != em_pch_cnp)
1606 0 : return;
1607 0 : if (sc->legacy_irq == 0)
1608 0 : return;
1609 :
1610 0 : reg = EM_READ_REG(&sc->hw, E1000_FEXTNVM7);
1611 0 : reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
1612 0 : EM_WRITE_REG(&sc->hw, E1000_FEXTNVM7, reg);
1613 :
1614 0 : reg = EM_READ_REG(&sc->hw, E1000_FEXTNVM9);
1615 0 : reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
1616 : E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
1617 0 : EM_WRITE_REG(&sc->hw, E1000_FEXTNVM9, reg);
1618 0 : }
1619 :
1620 : int
1621 0 : em_allocate_pci_resources(struct em_softc *sc)
1622 : {
1623 : int val, rid;
1624 0 : pci_intr_handle_t ih;
1625 : const char *intrstr = NULL;
1626 0 : struct pci_attach_args *pa = &sc->osdep.em_pa;
1627 0 : pci_chipset_tag_t pc = pa->pa_pc;
1628 :
1629 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1630 0 : if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1631 0 : printf(": mmba is not mem space\n");
1632 0 : return (ENXIO);
1633 : }
1634 0 : if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1635 0 : &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1636 0 : &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1637 0 : printf(": cannot find mem space\n");
1638 0 : return (ENXIO);
1639 : }
1640 :
1641 0 : switch (sc->hw.mac_type) {
1642 : case em_82544:
1643 : case em_82540:
1644 : case em_82545:
1645 : case em_82546:
1646 : case em_82541:
1647 : case em_82541_rev_2:
1648 : /* Figure out where our I/O BAR is ? */
1649 0 : for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
1650 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1651 0 : if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
1652 0 : sc->io_rid = rid;
1653 0 : break;
1654 : }
1655 0 : rid += 4;
1656 0 : if (PCI_MAPREG_MEM_TYPE(val) ==
1657 : PCI_MAPREG_MEM_TYPE_64BIT)
1658 0 : rid += 4; /* skip high bits, too */
1659 : }
1660 :
1661 0 : if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1662 0 : &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1663 0 : &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1664 0 : printf(": cannot find i/o space\n");
1665 0 : return (ENXIO);
1666 : }
1667 :
1668 0 : sc->hw.io_base = 0;
1669 0 : break;
1670 : default:
1671 : break;
1672 : }
1673 :
1674 0 : sc->osdep.em_flashoffset = 0;
1675 : /* for ICH8 and family we need to find the flash memory */
1676 0 : if (sc->hw.mac_type >= em_pch_spt) {
1677 0 : sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag;
1678 0 : sc->osdep.flash_bus_space_handle = sc->osdep.mem_bus_space_handle;
1679 0 : sc->osdep.em_flashbase = 0;
1680 0 : sc->osdep.em_flashsize = 0;
1681 0 : sc->osdep.em_flashoffset = 0xe000;
1682 0 : } else if (IS_ICH8(sc->hw.mac_type)) {
1683 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
1684 0 : if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1685 0 : printf(": flash is not mem space\n");
1686 0 : return (ENXIO);
1687 : }
1688 :
1689 0 : if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
1690 0 : &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1691 0 : &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1692 0 : printf(": cannot find mem space\n");
1693 0 : return (ENXIO);
1694 : }
1695 : }
1696 :
1697 0 : sc->legacy_irq = 0;
1698 0 : if (pci_intr_map_msi(pa, &ih)) {
1699 0 : if (pci_intr_map(pa, &ih)) {
1700 0 : printf(": couldn't map interrupt\n");
1701 0 : return (ENXIO);
1702 : }
1703 0 : sc->legacy_irq = 1;
1704 0 : }
1705 :
1706 0 : sc->osdep.dev = (struct device *)sc;
1707 0 : sc->hw.back = &sc->osdep;
1708 :
1709 0 : intrstr = pci_intr_string(pc, ih);
1710 0 : sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1711 0 : em_intr, sc, DEVNAME(sc));
1712 0 : if (sc->sc_intrhand == NULL) {
1713 0 : printf(": couldn't establish interrupt");
1714 0 : if (intrstr != NULL)
1715 0 : printf(" at %s", intrstr);
1716 0 : printf("\n");
1717 0 : return (ENXIO);
1718 : }
1719 0 : printf(": %s", intrstr);
1720 :
1721 : /*
1722 : * the ICP_xxxx device has multiple, duplicate register sets for
1723 : * use when it is being used as a network processor. Disable those
1724 : * registers here, as they are not necessary in this context and
1725 : * can confuse the system
1726 : */
1727 0 : if(sc->hw.mac_type == em_icp_xxxx) {
1728 0 : int offset;
1729 0 : pcireg_t val;
1730 :
1731 0 : if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1732 0 : sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST, &offset, &val)) {
1733 0 : return (0);
1734 : }
1735 0 : offset += PCI_ST_SMIA_OFFSET;
1736 0 : pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1737 : offset, 0x06);
1738 0 : E1000_WRITE_REG(&sc->hw, IMC1, ~0x0);
1739 0 : E1000_WRITE_REG(&sc->hw, IMC2, ~0x0);
1740 0 : }
1741 0 : return (0);
1742 0 : }
1743 :
1744 : void
1745 0 : em_free_pci_resources(struct em_softc *sc)
1746 : {
1747 0 : struct pci_attach_args *pa = &sc->osdep.em_pa;
1748 0 : pci_chipset_tag_t pc = pa->pa_pc;
1749 :
1750 0 : if (sc->sc_intrhand)
1751 0 : pci_intr_disestablish(pc, sc->sc_intrhand);
1752 0 : sc->sc_intrhand = 0;
1753 :
1754 0 : if (sc->osdep.em_flashbase)
1755 0 : bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1756 0 : sc->osdep.em_flashsize);
1757 0 : sc->osdep.em_flashbase = 0;
1758 :
1759 0 : if (sc->osdep.em_iobase)
1760 0 : bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1761 0 : sc->osdep.em_iosize);
1762 0 : sc->osdep.em_iobase = 0;
1763 :
1764 0 : if (sc->osdep.em_membase)
1765 0 : bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1766 0 : sc->osdep.em_memsize);
1767 0 : sc->osdep.em_membase = 0;
1768 0 : }
1769 :
1770 : /*********************************************************************
1771 : *
1772 : * Initialize the hardware to a configuration as specified by the
1773 : * em_softc structure. The controller is reset, the EEPROM is
1774 : * verified, the MAC address is set, then the shared initialization
1775 : * routines are called.
1776 : *
1777 : **********************************************************************/
1778 : int
1779 0 : em_hardware_init(struct em_softc *sc)
1780 : {
1781 : uint32_t ret_val;
1782 : u_int16_t rx_buffer_size;
1783 :
1784 : INIT_DEBUGOUT("em_hardware_init: begin");
1785 0 : if (sc->hw.mac_type >= em_pch_spt)
1786 0 : em_flush_desc_rings(sc);
1787 : /* Issue a global reset */
1788 0 : em_reset_hw(&sc->hw);
1789 :
1790 : /* When hardware is reset, fifo_head is also reset */
1791 0 : sc->tx_fifo_head = 0;
1792 :
1793 : /* Make sure we have a good EEPROM before we read from it */
1794 0 : if (em_get_flash_presence_i210(&sc->hw) &&
1795 0 : em_validate_eeprom_checksum(&sc->hw) < 0) {
1796 : /*
1797 : * Some PCIe parts fail the first check due to
1798 : * the link being in sleep state, call it again,
1799 : * if it fails a second time its a real issue.
1800 : */
1801 0 : if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1802 0 : printf("%s: The EEPROM Checksum Is Not Valid\n",
1803 0 : DEVNAME(sc));
1804 0 : return (EIO);
1805 : }
1806 : }
1807 :
1808 0 : if (em_get_flash_presence_i210(&sc->hw) &&
1809 0 : em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1810 0 : printf("%s: EEPROM read error while reading part number\n",
1811 0 : DEVNAME(sc));
1812 0 : return (EIO);
1813 : }
1814 :
1815 : /* Set up smart power down as default off on newer adapters */
1816 0 : if (!em_smart_pwr_down &&
1817 0 : (sc->hw.mac_type == em_82571 ||
1818 0 : sc->hw.mac_type == em_82572 ||
1819 0 : sc->hw.mac_type == em_82575 ||
1820 0 : sc->hw.mac_type == em_82580 ||
1821 0 : sc->hw.mac_type == em_i210 ||
1822 0 : sc->hw.mac_type == em_i350 )) {
1823 0 : uint16_t phy_tmp = 0;
1824 :
1825 : /* Speed up time to link by disabling smart power down */
1826 0 : em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1827 0 : phy_tmp &= ~IGP02E1000_PM_SPD;
1828 0 : em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1829 0 : }
1830 :
1831 0 : em_legacy_irq_quirk_spt(sc);
1832 :
1833 : /*
1834 : * These parameters control the automatic generation (Tx) and
1835 : * response (Rx) to Ethernet PAUSE frames.
1836 : * - High water mark should allow for at least two frames to be
1837 : * received after sending an XOFF.
1838 : * - Low water mark works best when it is very near the high water mark.
1839 : * This allows the receiver to restart by sending XON when it has
1840 : * drained a bit. Here we use an arbitary value of 1500 which will
1841 : * restart after one full frame is pulled from the buffer. There
1842 : * could be several smaller frames in the buffer and if so they will
1843 : * not trigger the XON until their total number reduces the buffer
1844 : * by 1500.
1845 : * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1846 : */
1847 0 : rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
1848 :
1849 0 : sc->hw.fc_high_water = rx_buffer_size -
1850 0 : EM_ROUNDUP(sc->hw.max_frame_size, 1024);
1851 0 : sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1852 0 : if (sc->hw.mac_type == em_80003es2lan)
1853 0 : sc->hw.fc_pause_time = 0xFFFF;
1854 : else
1855 0 : sc->hw.fc_pause_time = 1000;
1856 0 : sc->hw.fc_send_xon = TRUE;
1857 0 : sc->hw.fc = E1000_FC_FULL;
1858 :
1859 0 : em_disable_aspm(sc);
1860 :
1861 0 : if ((ret_val = em_init_hw(&sc->hw)) != 0) {
1862 0 : if (ret_val == E1000_DEFER_INIT) {
1863 : INIT_DEBUGOUT("\nHardware Initialization Deferred ");
1864 0 : return (EAGAIN);
1865 : }
1866 0 : printf("\n%s: Hardware Initialization Failed: %d\n",
1867 0 : DEVNAME(sc), ret_val);
1868 0 : return (EIO);
1869 : }
1870 :
1871 0 : em_check_for_link(&sc->hw);
1872 :
1873 0 : return (0);
1874 0 : }
1875 :
1876 : /*********************************************************************
1877 : *
1878 : * Setup networking device structure and register an interface.
1879 : *
1880 : **********************************************************************/
1881 : void
1882 0 : em_setup_interface(struct em_softc *sc)
1883 : {
1884 : struct ifnet *ifp;
1885 : uint64_t fiber_type = IFM_1000_SX;
1886 :
1887 : INIT_DEBUGOUT("em_setup_interface: begin");
1888 :
1889 0 : ifp = &sc->sc_ac.ac_if;
1890 0 : strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1891 0 : ifp->if_softc = sc;
1892 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1893 0 : ifp->if_xflags = IFXF_MPSAFE;
1894 0 : ifp->if_ioctl = em_ioctl;
1895 0 : ifp->if_qstart = em_start;
1896 0 : ifp->if_watchdog = em_watchdog;
1897 0 : ifp->if_hardmtu =
1898 0 : sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
1899 0 : IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_slots - 1);
1900 :
1901 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
1902 :
1903 : #if NVLAN > 0
1904 0 : if (sc->hw.mac_type != em_82575 && sc->hw.mac_type != em_82580 &&
1905 0 : sc->hw.mac_type != em_i210 && sc->hw.mac_type != em_i350)
1906 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1907 : #endif
1908 :
1909 0 : if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1910 0 : sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1911 0 : sc->hw.mac_type != em_i350)
1912 0 : ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1913 :
1914 : /*
1915 : * Specify the media types supported by this adapter and register
1916 : * callbacks to update media and link information
1917 : */
1918 0 : ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1919 : em_media_status);
1920 0 : if (sc->hw.media_type == em_media_type_fiber ||
1921 0 : sc->hw.media_type == em_media_type_internal_serdes) {
1922 0 : if (sc->hw.mac_type == em_82545)
1923 0 : fiber_type = IFM_1000_LX;
1924 0 : ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
1925 : 0, NULL);
1926 0 : ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
1927 : 0, NULL);
1928 0 : } else {
1929 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1930 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1931 : 0, NULL);
1932 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1933 : 0, NULL);
1934 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1935 : 0, NULL);
1936 0 : if (sc->hw.phy_type != em_phy_ife) {
1937 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1938 : 0, NULL);
1939 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1940 0 : }
1941 : }
1942 0 : ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1943 0 : ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1944 :
1945 0 : if_attach(ifp);
1946 0 : ether_ifattach(ifp);
1947 0 : }
1948 :
1949 : int
1950 0 : em_detach(struct device *self, int flags)
1951 : {
1952 0 : struct em_softc *sc = (struct em_softc *)self;
1953 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1954 0 : struct pci_attach_args *pa = &sc->osdep.em_pa;
1955 0 : pci_chipset_tag_t pc = pa->pa_pc;
1956 :
1957 0 : if (sc->sc_intrhand)
1958 0 : pci_intr_disestablish(pc, sc->sc_intrhand);
1959 0 : sc->sc_intrhand = 0;
1960 :
1961 0 : em_stop(sc, 1);
1962 :
1963 0 : em_free_pci_resources(sc);
1964 :
1965 0 : if (sc->sc_rx_desc_ring != NULL) {
1966 0 : sc->sc_rx_desc_ring = NULL;
1967 0 : em_dma_free(sc, &sc->sc_rx_dma);
1968 0 : }
1969 0 : if (sc->sc_tx_desc_ring != NULL) {
1970 0 : sc->sc_tx_desc_ring = NULL;
1971 0 : em_dma_free(sc, &sc->sc_tx_dma);
1972 0 : }
1973 :
1974 0 : ether_ifdetach(ifp);
1975 0 : if_detach(ifp);
1976 :
1977 0 : return (0);
1978 : }
1979 :
1980 : int
1981 0 : em_activate(struct device *self, int act)
1982 : {
1983 0 : struct em_softc *sc = (struct em_softc *)self;
1984 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1985 : int rv = 0;
1986 :
1987 0 : switch (act) {
1988 : case DVACT_SUSPEND:
1989 0 : if (ifp->if_flags & IFF_RUNNING)
1990 0 : em_stop(sc, 0);
1991 : /* We have no children atm, but we will soon */
1992 0 : rv = config_activate_children(self, act);
1993 0 : break;
1994 : case DVACT_RESUME:
1995 0 : if (ifp->if_flags & IFF_UP)
1996 0 : em_init(sc);
1997 : break;
1998 : default:
1999 0 : rv = config_activate_children(self, act);
2000 0 : break;
2001 : }
2002 0 : return (rv);
2003 : }
2004 :
2005 : /*********************************************************************
2006 : *
2007 : * Workaround for SmartSpeed on 82541 and 82547 controllers
2008 : *
2009 : **********************************************************************/
2010 : void
2011 0 : em_smartspeed(struct em_softc *sc)
2012 : {
2013 0 : uint16_t phy_tmp;
2014 :
2015 0 : if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
2016 0 : !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2017 0 : return;
2018 :
2019 0 : if (sc->smartspeed == 0) {
2020 : /* If Master/Slave config fault is asserted twice,
2021 : * we assume back-to-back */
2022 0 : em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2023 0 : if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2024 0 : return;
2025 0 : em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2026 0 : if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2027 0 : em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
2028 : &phy_tmp);
2029 0 : if (phy_tmp & CR_1000T_MS_ENABLE) {
2030 0 : phy_tmp &= ~CR_1000T_MS_ENABLE;
2031 0 : em_write_phy_reg(&sc->hw,
2032 : PHY_1000T_CTRL, phy_tmp);
2033 0 : sc->smartspeed++;
2034 0 : if (sc->hw.autoneg &&
2035 0 : !em_phy_setup_autoneg(&sc->hw) &&
2036 0 : !em_read_phy_reg(&sc->hw, PHY_CTRL,
2037 : &phy_tmp)) {
2038 0 : phy_tmp |= (MII_CR_AUTO_NEG_EN |
2039 : MII_CR_RESTART_AUTO_NEG);
2040 0 : em_write_phy_reg(&sc->hw,
2041 : PHY_CTRL, phy_tmp);
2042 0 : }
2043 : }
2044 : }
2045 0 : return;
2046 0 : } else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2047 : /* If still no link, perhaps using 2/3 pair cable */
2048 0 : em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2049 0 : phy_tmp |= CR_1000T_MS_ENABLE;
2050 0 : em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2051 0 : if (sc->hw.autoneg &&
2052 0 : !em_phy_setup_autoneg(&sc->hw) &&
2053 0 : !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
2054 0 : phy_tmp |= (MII_CR_AUTO_NEG_EN |
2055 : MII_CR_RESTART_AUTO_NEG);
2056 0 : em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
2057 0 : }
2058 : }
2059 : /* Restart process after EM_SMARTSPEED_MAX iterations */
2060 0 : if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
2061 0 : sc->smartspeed = 0;
2062 0 : }
2063 :
2064 : /*
2065 : * Manage DMA'able memory.
2066 : */
2067 : int
2068 0 : em_dma_malloc(struct em_softc *sc, bus_size_t size, struct em_dma_alloc *dma)
2069 : {
2070 : int r;
2071 :
2072 0 : r = bus_dmamap_create(sc->sc_dmat, size, 1,
2073 : size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dma->dma_map);
2074 0 : if (r != 0)
2075 0 : return (r);
2076 :
2077 0 : r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &dma->dma_seg,
2078 : 1, &dma->dma_nseg, BUS_DMA_WAITOK | BUS_DMA_ZERO);
2079 0 : if (r != 0)
2080 : goto destroy;
2081 :
2082 0 : r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, size,
2083 : &dma->dma_vaddr, BUS_DMA_WAITOK);
2084 0 : if (r != 0)
2085 : goto free;
2086 :
2087 0 : r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, size,
2088 : NULL, BUS_DMA_WAITOK);
2089 0 : if (r != 0)
2090 : goto unmap;
2091 :
2092 0 : dma->dma_size = size;
2093 0 : return (0);
2094 :
2095 : unmap:
2096 0 : bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
2097 : free:
2098 0 : bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
2099 : destroy:
2100 0 : bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
2101 :
2102 0 : return (r);
2103 0 : }
2104 :
2105 : void
2106 0 : em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2107 : {
2108 0 : bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
2109 0 : bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size);
2110 0 : bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
2111 0 : bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
2112 0 : }
2113 :
2114 : /*********************************************************************
2115 : *
2116 : * Allocate memory for tx_buffer structures. The tx_buffer stores all
2117 : * the information needed to transmit a packet on the wire.
2118 : *
2119 : **********************************************************************/
2120 : int
2121 0 : em_allocate_transmit_structures(struct em_softc *sc)
2122 : {
2123 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
2124 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
2125 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2126 :
2127 0 : sc->sc_tx_pkts_ring = mallocarray(sc->sc_tx_slots,
2128 : sizeof(*sc->sc_tx_pkts_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2129 0 : if (sc->sc_tx_pkts_ring == NULL) {
2130 0 : printf("%s: Unable to allocate tx_buffer memory\n",
2131 0 : DEVNAME(sc));
2132 0 : return (ENOMEM);
2133 : }
2134 :
2135 0 : return (0);
2136 0 : }
2137 :
2138 : /*********************************************************************
2139 : *
2140 : * Allocate and initialize transmit structures.
2141 : *
2142 : **********************************************************************/
2143 : int
2144 0 : em_setup_transmit_structures(struct em_softc *sc)
2145 : {
2146 : struct em_packet *pkt;
2147 : int error, i;
2148 :
2149 0 : if ((error = em_allocate_transmit_structures(sc)) != 0)
2150 : goto fail;
2151 :
2152 0 : bzero((void *) sc->sc_tx_desc_ring,
2153 : (sizeof(struct em_tx_desc)) * sc->sc_tx_slots);
2154 :
2155 0 : for (i = 0; i < sc->sc_tx_slots; i++) {
2156 0 : pkt = &sc->sc_tx_pkts_ring[i];
2157 0 : error = bus_dmamap_create(sc->sc_dmat, MAX_JUMBO_FRAME_SIZE,
2158 : EM_MAX_SCATTER / (sc->pcix_82544 ? 2 : 1),
2159 : MAX_JUMBO_FRAME_SIZE, 0, BUS_DMA_NOWAIT, &pkt->pkt_map);
2160 0 : if (error != 0) {
2161 0 : printf("%s: Unable to create TX DMA map\n",
2162 0 : DEVNAME(sc));
2163 0 : goto fail;
2164 : }
2165 : }
2166 :
2167 0 : sc->sc_tx_desc_head = 0;
2168 0 : sc->sc_tx_desc_tail = 0;
2169 :
2170 : /* Set checksum context */
2171 0 : sc->active_checksum_context = OFFLOAD_NONE;
2172 :
2173 0 : return (0);
2174 :
2175 : fail:
2176 0 : em_free_transmit_structures(sc);
2177 0 : return (error);
2178 0 : }
2179 :
2180 : /*********************************************************************
2181 : *
2182 : * Enable transmit unit.
2183 : *
2184 : **********************************************************************/
2185 : void
2186 0 : em_initialize_transmit_unit(struct em_softc *sc)
2187 : {
2188 : u_int32_t reg_tctl, reg_tipg = 0;
2189 : u_int64_t bus_addr;
2190 :
2191 : INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2192 :
2193 : /* Setup the Base and Length of the Tx Descriptor Ring */
2194 0 : bus_addr = sc->sc_tx_dma.dma_map->dm_segs[0].ds_addr;
2195 0 : E1000_WRITE_REG(&sc->hw, TDLEN,
2196 : sc->sc_tx_slots *
2197 : sizeof(struct em_tx_desc));
2198 0 : E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2199 0 : E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
2200 :
2201 : /* Setup the HW Tx Head and Tail descriptor pointers */
2202 0 : E1000_WRITE_REG(&sc->hw, TDT, 0);
2203 0 : E1000_WRITE_REG(&sc->hw, TDH, 0);
2204 :
2205 : HW_DEBUGOUT2("Base = %x, Length = %x\n",
2206 : E1000_READ_REG(&sc->hw, TDBAL),
2207 : E1000_READ_REG(&sc->hw, TDLEN));
2208 :
2209 : /* Set the default values for the Tx Inter Packet Gap timer */
2210 0 : switch (sc->hw.mac_type) {
2211 : case em_82542_rev2_0:
2212 : case em_82542_rev2_1:
2213 : reg_tipg = DEFAULT_82542_TIPG_IPGT;
2214 : reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2215 : reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2216 0 : break;
2217 : case em_80003es2lan:
2218 : reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2219 : reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2220 0 : break;
2221 : default:
2222 0 : if (sc->hw.media_type == em_media_type_fiber ||
2223 0 : sc->hw.media_type == em_media_type_internal_serdes)
2224 0 : reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2225 : else
2226 : reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2227 0 : reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2228 0 : reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2229 0 : }
2230 :
2231 :
2232 0 : E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2233 0 : E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2234 0 : if (sc->hw.mac_type >= em_82540)
2235 0 : E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2236 :
2237 : /* Setup Transmit Descriptor Base Settings */
2238 0 : sc->sc_txd_cmd = E1000_TXD_CMD_IFCS;
2239 :
2240 0 : if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2241 0 : sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2242 : /* 82575/6 need to enable the TX queue and lack the IDE bit */
2243 0 : reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL);
2244 0 : reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE;
2245 0 : E1000_WRITE_REG(&sc->hw, TXDCTL, reg_tctl);
2246 0 : } else if (sc->tx_int_delay > 0)
2247 0 : sc->sc_txd_cmd |= E1000_TXD_CMD_IDE;
2248 :
2249 : /* Program the Transmit Control Register */
2250 : reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2251 : (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2252 0 : if (sc->hw.mac_type >= em_82571)
2253 0 : reg_tctl |= E1000_TCTL_MULR;
2254 : if (sc->link_duplex == FULL_DUPLEX)
2255 : reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2256 : else
2257 : reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2258 : /* This write will effectively turn on the transmit unit */
2259 0 : E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2260 :
2261 : /* SPT Si errata workaround to avoid data corruption */
2262 :
2263 0 : if (sc->hw.mac_type == em_pch_spt) {
2264 : uint32_t reg_val;
2265 :
2266 0 : reg_val = EM_READ_REG(&sc->hw, E1000_IOSFPC);
2267 0 : reg_val |= E1000_RCTL_RDMTS_HEX;
2268 0 : EM_WRITE_REG(&sc->hw, E1000_IOSFPC, reg_val);
2269 :
2270 0 : reg_val = E1000_READ_REG(&sc->hw, TARC0);
2271 : /* i218-i219 Specification Update 1.5.4.5 */
2272 0 : reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
2273 0 : reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
2274 0 : E1000_WRITE_REG(&sc->hw, TARC0, reg_val);
2275 0 : }
2276 0 : }
2277 :
2278 : /*********************************************************************
2279 : *
2280 : * Free all transmit related data structures.
2281 : *
2282 : **********************************************************************/
2283 : void
2284 0 : em_free_transmit_structures(struct em_softc *sc)
2285 : {
2286 : struct em_packet *pkt;
2287 : int i;
2288 :
2289 : INIT_DEBUGOUT("free_transmit_structures: begin");
2290 :
2291 0 : if (sc->sc_tx_pkts_ring != NULL) {
2292 0 : for (i = 0; i < sc->sc_tx_slots; i++) {
2293 0 : pkt = &sc->sc_tx_pkts_ring[i];
2294 :
2295 0 : if (pkt->pkt_m != NULL) {
2296 0 : bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,
2297 : 0, pkt->pkt_map->dm_mapsize,
2298 : BUS_DMASYNC_POSTWRITE);
2299 0 : bus_dmamap_unload(sc->sc_dmat, pkt->pkt_map);
2300 :
2301 0 : m_freem(pkt->pkt_m);
2302 0 : pkt->pkt_m = NULL;
2303 0 : }
2304 :
2305 0 : if (pkt->pkt_map != NULL) {
2306 0 : bus_dmamap_destroy(sc->sc_dmat, pkt->pkt_map);
2307 0 : pkt->pkt_map = NULL;
2308 0 : }
2309 : }
2310 :
2311 0 : free(sc->sc_tx_pkts_ring, M_DEVBUF,
2312 0 : sc->sc_tx_slots * sizeof(*sc->sc_tx_pkts_ring));
2313 0 : sc->sc_tx_pkts_ring = NULL;
2314 0 : }
2315 :
2316 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
2317 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
2318 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2319 0 : }
2320 :
2321 : /*********************************************************************
2322 : *
2323 : * The offload context needs to be set when we transfer the first
2324 : * packet of a particular protocol (TCP/UDP). We change the
2325 : * context only if the protocol type changes.
2326 : *
2327 : **********************************************************************/
2328 : u_int
2329 0 : em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp, u_int head,
2330 : u_int32_t *txd_upper, u_int32_t *txd_lower)
2331 : {
2332 : struct em_context_desc *TXD;
2333 :
2334 0 : if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
2335 0 : *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2336 0 : *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2337 0 : if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2338 0 : return (0);
2339 : else
2340 0 : sc->active_checksum_context = OFFLOAD_TCP_IP;
2341 0 : } else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
2342 0 : *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2343 0 : *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2344 0 : if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2345 0 : return (0);
2346 : else
2347 0 : sc->active_checksum_context = OFFLOAD_UDP_IP;
2348 : } else {
2349 0 : *txd_upper = 0;
2350 0 : *txd_lower = 0;
2351 0 : return (0);
2352 : }
2353 :
2354 : /* If we reach this point, the checksum offload context
2355 : * needs to be reset.
2356 : */
2357 0 : TXD = (struct em_context_desc *)&sc->sc_tx_desc_ring[head];
2358 :
2359 0 : TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2360 0 : TXD->lower_setup.ip_fields.ipcso =
2361 : ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2362 0 : TXD->lower_setup.ip_fields.ipcse =
2363 : htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2364 :
2365 0 : TXD->upper_setup.tcp_fields.tucss =
2366 : ETHER_HDR_LEN + sizeof(struct ip);
2367 0 : TXD->upper_setup.tcp_fields.tucse = htole16(0);
2368 :
2369 0 : if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2370 0 : TXD->upper_setup.tcp_fields.tucso =
2371 : ETHER_HDR_LEN + sizeof(struct ip) +
2372 : offsetof(struct tcphdr, th_sum);
2373 0 : } else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2374 0 : TXD->upper_setup.tcp_fields.tucso =
2375 : ETHER_HDR_LEN + sizeof(struct ip) +
2376 : offsetof(struct udphdr, uh_sum);
2377 0 : }
2378 :
2379 0 : TXD->tcp_seg_setup.data = htole32(0);
2380 0 : TXD->cmd_and_length = htole32(sc->sc_txd_cmd | E1000_TXD_CMD_DEXT);
2381 :
2382 0 : return (1);
2383 0 : }
2384 :
2385 : /**********************************************************************
2386 : *
2387 : * Examine each tx_buffer in the used queue. If the hardware is done
2388 : * processing the packet then free associated resources. The
2389 : * tx_buffer is put back on the free queue.
2390 : *
2391 : **********************************************************************/
2392 : void
2393 0 : em_txeof(struct em_softc *sc)
2394 : {
2395 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
2396 : struct em_packet *pkt;
2397 : struct em_tx_desc *desc;
2398 : u_int head, tail;
2399 : u_int free = 0;
2400 :
2401 0 : head = sc->sc_tx_desc_head;
2402 0 : tail = sc->sc_tx_desc_tail;
2403 :
2404 0 : if (head == tail)
2405 0 : return;
2406 :
2407 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
2408 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
2409 : BUS_DMASYNC_POSTREAD);
2410 :
2411 0 : do {
2412 0 : pkt = &sc->sc_tx_pkts_ring[tail];
2413 0 : desc = &sc->sc_tx_desc_ring[pkt->pkt_eop];
2414 :
2415 0 : if (!ISSET(desc->upper.fields.status, E1000_TXD_STAT_DD))
2416 : break;
2417 :
2418 0 : bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,
2419 : 0, pkt->pkt_map->dm_mapsize,
2420 : BUS_DMASYNC_POSTWRITE);
2421 0 : bus_dmamap_unload(sc->sc_dmat, pkt->pkt_map);
2422 :
2423 0 : KASSERT(pkt->pkt_m != NULL);
2424 :
2425 0 : m_freem(pkt->pkt_m);
2426 0 : pkt->pkt_m = NULL;
2427 :
2428 0 : tail = pkt->pkt_eop;
2429 :
2430 0 : if (++tail == sc->sc_tx_slots)
2431 : tail = 0;
2432 :
2433 0 : free++;
2434 0 : } while (tail != head);
2435 :
2436 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dma.dma_map,
2437 : 0, sc->sc_tx_dma.dma_map->dm_mapsize,
2438 : BUS_DMASYNC_PREREAD);
2439 :
2440 0 : if (free == 0)
2441 0 : return;
2442 :
2443 0 : sc->sc_tx_desc_tail = tail;
2444 :
2445 0 : if (ifq_is_oactive(&ifp->if_snd))
2446 0 : ifq_restart(&ifp->if_snd);
2447 0 : else if (tail == head)
2448 0 : ifp->if_timer = 0;
2449 0 : }
2450 :
2451 : /*********************************************************************
2452 : *
2453 : * Get a buffer from system mbuf buffer pool.
2454 : *
2455 : **********************************************************************/
2456 : int
2457 0 : em_get_buf(struct em_softc *sc, int i)
2458 : {
2459 : struct mbuf *m;
2460 : struct em_packet *pkt;
2461 : struct em_rx_desc *desc;
2462 : int error;
2463 :
2464 0 : pkt = &sc->sc_rx_pkts_ring[i];
2465 0 : desc = &sc->sc_rx_desc_ring[i];
2466 :
2467 0 : KASSERT(pkt->pkt_m == NULL);
2468 :
2469 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, EM_MCLBYTES);
2470 0 : if (m == NULL) {
2471 0 : sc->mbuf_cluster_failed++;
2472 0 : return (ENOBUFS);
2473 : }
2474 0 : m->m_len = m->m_pkthdr.len = EM_MCLBYTES;
2475 0 : m_adj(m, ETHER_ALIGN);
2476 :
2477 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->pkt_map,
2478 : m, BUS_DMA_NOWAIT);
2479 0 : if (error) {
2480 0 : m_freem(m);
2481 0 : return (error);
2482 : }
2483 :
2484 0 : bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,
2485 : 0, pkt->pkt_map->dm_mapsize,
2486 : BUS_DMASYNC_PREREAD);
2487 0 : pkt->pkt_m = m;
2488 :
2489 0 : memset(desc, 0, sizeof(*desc));
2490 0 : htolem64(&desc->buffer_addr, pkt->pkt_map->dm_segs[0].ds_addr);
2491 :
2492 0 : return (0);
2493 0 : }
2494 :
2495 : /*********************************************************************
2496 : *
2497 : * Allocate memory for rx_buffer structures. Since we use one
2498 : * rx_buffer per received packet, the maximum number of rx_buffer's
2499 : * that we'll need is equal to the number of receive descriptors
2500 : * that we've allocated.
2501 : *
2502 : **********************************************************************/
2503 : int
2504 0 : em_allocate_receive_structures(struct em_softc *sc)
2505 : {
2506 : struct em_packet *pkt;
2507 : int i;
2508 : int error;
2509 :
2510 0 : sc->sc_rx_pkts_ring = mallocarray(sc->sc_rx_slots,
2511 : sizeof(*sc->sc_rx_pkts_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2512 0 : if (sc->sc_rx_pkts_ring == NULL) {
2513 0 : printf("%s: Unable to allocate rx_buffer memory\n",
2514 0 : DEVNAME(sc));
2515 0 : return (ENOMEM);
2516 : }
2517 :
2518 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dma.dma_map,
2519 : 0, sc->sc_rx_dma.dma_map->dm_mapsize,
2520 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2521 :
2522 0 : for (i = 0; i < sc->sc_rx_slots; i++) {
2523 0 : pkt = &sc->sc_rx_pkts_ring[i];
2524 :
2525 0 : error = bus_dmamap_create(sc->sc_dmat, EM_MCLBYTES, 1,
2526 : EM_MCLBYTES, 0, BUS_DMA_NOWAIT, &pkt->pkt_map);
2527 0 : if (error != 0) {
2528 0 : printf("%s: em_allocate_receive_structures: "
2529 : "bus_dmamap_create failed; error %u\n",
2530 0 : DEVNAME(sc), error);
2531 : goto fail;
2532 : }
2533 :
2534 0 : pkt->pkt_m = NULL;
2535 : }
2536 :
2537 0 : return (0);
2538 :
2539 : fail:
2540 0 : em_free_receive_structures(sc);
2541 0 : return (error);
2542 0 : }
2543 :
2544 : /*********************************************************************
2545 : *
2546 : * Allocate and initialize receive structures.
2547 : *
2548 : **********************************************************************/
2549 : int
2550 0 : em_setup_receive_structures(struct em_softc *sc)
2551 : {
2552 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
2553 : u_int lwm;
2554 :
2555 0 : memset(sc->sc_rx_desc_ring, 0,
2556 : sc->sc_rx_slots * sizeof(*sc->sc_rx_desc_ring));
2557 :
2558 0 : if (em_allocate_receive_structures(sc))
2559 0 : return (ENOMEM);
2560 :
2561 : /* Setup our descriptor pointers */
2562 0 : sc->sc_rx_desc_tail = 0;
2563 0 : sc->sc_rx_desc_head = sc->sc_rx_slots - 1;
2564 :
2565 0 : lwm = max(4, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1));
2566 0 : if_rxr_init(&sc->sc_rx_ring, lwm, sc->sc_rx_slots);
2567 :
2568 0 : if (em_rxfill(sc) == 0) {
2569 0 : printf("%s: unable to fill any rx descriptors\n",
2570 0 : DEVNAME(sc));
2571 0 : }
2572 :
2573 0 : return (0);
2574 0 : }
2575 :
2576 : /*********************************************************************
2577 : *
2578 : * Enable receive unit.
2579 : *
2580 : **********************************************************************/
2581 : void
2582 0 : em_initialize_receive_unit(struct em_softc *sc)
2583 : {
2584 : u_int32_t reg_rctl;
2585 : u_int32_t reg_rxcsum;
2586 : u_int64_t bus_addr;
2587 :
2588 : INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2589 :
2590 : /* Make sure receives are disabled while setting up the descriptor ring */
2591 0 : E1000_WRITE_REG(&sc->hw, RCTL, 0);
2592 :
2593 : /* Set the Receive Delay Timer Register */
2594 0 : E1000_WRITE_REG(&sc->hw, RDTR,
2595 : sc->rx_int_delay | E1000_RDT_FPDB);
2596 :
2597 0 : if (sc->hw.mac_type >= em_82540) {
2598 0 : if (sc->rx_int_delay)
2599 0 : E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2600 :
2601 : /* Set the interrupt throttling rate. Value is calculated
2602 : * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2603 0 : E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2604 0 : }
2605 :
2606 : /* Setup the Base and Length of the Rx Descriptor Ring */
2607 0 : bus_addr = sc->sc_rx_dma.dma_map->dm_segs[0].ds_addr;
2608 0 : E1000_WRITE_REG(&sc->hw, RDLEN,
2609 : sc->sc_rx_slots * sizeof(*sc->sc_rx_desc_ring));
2610 0 : E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2611 0 : E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2612 :
2613 : /* Setup the Receive Control Register */
2614 : reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2615 0 : E1000_RCTL_RDMTS_HALF |
2616 0 : (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2617 :
2618 0 : if (sc->hw.tbi_compatibility_on == TRUE)
2619 0 : reg_rctl |= E1000_RCTL_SBP;
2620 :
2621 : /*
2622 : * The i350 has a bug where it always strips the CRC whether
2623 : * asked to or not. So ask for stripped CRC here and
2624 : * cope in rxeof
2625 : */
2626 0 : if (sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350)
2627 0 : reg_rctl |= E1000_RCTL_SECRC;
2628 :
2629 0 : switch (sc->sc_rx_buffer_len) {
2630 : default:
2631 : case EM_RXBUFFER_2048:
2632 : reg_rctl |= E1000_RCTL_SZ_2048;
2633 0 : break;
2634 : case EM_RXBUFFER_4096:
2635 0 : reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2636 0 : break;
2637 : case EM_RXBUFFER_8192:
2638 0 : reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2639 0 : break;
2640 : case EM_RXBUFFER_16384:
2641 0 : reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2642 0 : break;
2643 : }
2644 :
2645 0 : if (sc->hw.max_frame_size != ETHER_MAX_LEN)
2646 0 : reg_rctl |= E1000_RCTL_LPE;
2647 :
2648 : /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2649 0 : if (sc->hw.mac_type >= em_82543) {
2650 0 : reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2651 0 : reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2652 0 : E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2653 0 : }
2654 :
2655 : /*
2656 : * XXX TEMPORARY WORKAROUND: on some systems with 82573
2657 : * long latencies are observed, like Lenovo X60.
2658 : */
2659 0 : if (sc->hw.mac_type == em_82573)
2660 0 : E1000_WRITE_REG(&sc->hw, RDTR, 0x20);
2661 :
2662 0 : if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2663 0 : sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2664 : /* 82575/6 need to enable the RX queue */
2665 : uint32_t reg;
2666 0 : reg = E1000_READ_REG(&sc->hw, RXDCTL);
2667 0 : reg |= E1000_RXDCTL_QUEUE_ENABLE;
2668 0 : E1000_WRITE_REG(&sc->hw, RXDCTL, reg);
2669 0 : }
2670 :
2671 : /* Enable Receives */
2672 0 : E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2673 :
2674 : /* Setup the HW Rx Head and Tail Descriptor Pointers */
2675 0 : E1000_WRITE_REG(&sc->hw, RDH, 0);
2676 0 : E1000_WRITE_REG(&sc->hw, RDT, sc->sc_rx_desc_head);
2677 0 : }
2678 :
2679 : /*********************************************************************
2680 : *
2681 : * Free receive related data structures.
2682 : *
2683 : **********************************************************************/
2684 : void
2685 0 : em_free_receive_structures(struct em_softc *sc)
2686 : {
2687 : struct em_packet *pkt;
2688 : int i;
2689 :
2690 : INIT_DEBUGOUT("free_receive_structures: begin");
2691 :
2692 0 : if_rxr_init(&sc->sc_rx_ring, 0, 0);
2693 :
2694 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dma.dma_map,
2695 : 0, sc->sc_rx_dma.dma_map->dm_mapsize,
2696 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2697 :
2698 0 : if (sc->sc_rx_pkts_ring != NULL) {
2699 0 : for (i = 0; i < sc->sc_rx_slots; i++) {
2700 0 : pkt = &sc->sc_rx_pkts_ring[i];
2701 0 : if (pkt->pkt_m != NULL) {
2702 0 : bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,
2703 : 0, pkt->pkt_map->dm_mapsize,
2704 : BUS_DMASYNC_POSTREAD);
2705 0 : bus_dmamap_unload(sc->sc_dmat, pkt->pkt_map);
2706 0 : m_freem(pkt->pkt_m);
2707 0 : pkt->pkt_m = NULL;
2708 0 : }
2709 0 : bus_dmamap_destroy(sc->sc_dmat, pkt->pkt_map);
2710 : }
2711 :
2712 0 : free(sc->sc_rx_pkts_ring, M_DEVBUF,
2713 0 : sc->sc_rx_slots * sizeof(*sc->sc_rx_pkts_ring));
2714 0 : sc->sc_rx_pkts_ring = NULL;
2715 0 : }
2716 :
2717 0 : if (sc->fmp != NULL) {
2718 0 : m_freem(sc->fmp);
2719 0 : sc->fmp = NULL;
2720 0 : sc->lmp = NULL;
2721 0 : }
2722 0 : }
2723 :
2724 : int
2725 0 : em_rxfill(struct em_softc *sc)
2726 : {
2727 : u_int slots;
2728 : int post = 0;
2729 : int i;
2730 :
2731 0 : i = sc->sc_rx_desc_head;
2732 :
2733 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dma.dma_map,
2734 : 0, sc->sc_rx_dma.dma_map->dm_mapsize,
2735 : BUS_DMASYNC_POSTWRITE);
2736 :
2737 0 : for (slots = if_rxr_get(&sc->sc_rx_ring, sc->sc_rx_slots);
2738 0 : slots > 0; slots--) {
2739 0 : if (++i == sc->sc_rx_slots)
2740 : i = 0;
2741 :
2742 0 : if (em_get_buf(sc, i) != 0)
2743 : break;
2744 :
2745 0 : sc->sc_rx_desc_head = i;
2746 : post = 1;
2747 : }
2748 :
2749 0 : if_rxr_put(&sc->sc_rx_ring, slots);
2750 :
2751 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dma.dma_map,
2752 : 0, sc->sc_rx_dma.dma_map->dm_mapsize,
2753 : BUS_DMASYNC_PREWRITE);
2754 :
2755 0 : return (post);
2756 : }
2757 :
2758 : /*********************************************************************
2759 : *
2760 : * This routine executes in interrupt context. It replenishes
2761 : * the mbufs in the descriptor and sends data which has been
2762 : * dma'ed into host memory to upper layer.
2763 : *
2764 : *********************************************************************/
2765 : int
2766 0 : em_rxeof(struct em_softc *sc)
2767 : {
2768 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
2769 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2770 : struct mbuf *m;
2771 : u_int8_t accept_frame = 0;
2772 : u_int8_t eop = 0;
2773 : u_int16_t len, desc_len, prev_len_adj;
2774 : int i, rv = 0;
2775 :
2776 : /* Pointer to the receive descriptor being examined. */
2777 : struct em_rx_desc *desc;
2778 : struct em_packet *pkt;
2779 : u_int8_t status;
2780 :
2781 0 : if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
2782 0 : return (0);
2783 :
2784 0 : i = sc->sc_rx_desc_tail;
2785 :
2786 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dma.dma_map,
2787 : 0, sc->sc_rx_dma.dma_map->dm_mapsize,
2788 : BUS_DMASYNC_POSTREAD);
2789 :
2790 0 : do {
2791 : m = NULL;
2792 :
2793 0 : pkt = &sc->sc_rx_pkts_ring[i];
2794 0 : desc = &sc->sc_rx_desc_ring[i];
2795 :
2796 0 : status = desc->status;
2797 0 : if (!ISSET(status, E1000_RXD_STAT_DD))
2798 : break;
2799 :
2800 : /* pull the mbuf off the ring */
2801 0 : bus_dmamap_sync(sc->sc_dmat, pkt->pkt_map,
2802 : 0, pkt->pkt_map->dm_mapsize,
2803 : BUS_DMASYNC_POSTREAD);
2804 0 : bus_dmamap_unload(sc->sc_dmat, pkt->pkt_map);
2805 0 : m = pkt->pkt_m;
2806 0 : pkt->pkt_m = NULL;
2807 :
2808 0 : KASSERT(m != NULL);
2809 :
2810 0 : if_rxr_put(&sc->sc_rx_ring, 1);
2811 : rv = 1;
2812 :
2813 : accept_frame = 1;
2814 : prev_len_adj = 0;
2815 0 : desc_len = letoh16(desc->length);
2816 :
2817 0 : if (status & E1000_RXD_STAT_EOP) {
2818 : eop = 1;
2819 0 : if (desc_len < ETHER_CRC_LEN) {
2820 : len = 0;
2821 0 : prev_len_adj = ETHER_CRC_LEN - desc_len;
2822 0 : } else if (sc->hw.mac_type == em_i210 ||
2823 0 : sc->hw.mac_type == em_i350)
2824 0 : len = desc_len;
2825 : else
2826 0 : len = desc_len - ETHER_CRC_LEN;
2827 : } else {
2828 : eop = 0;
2829 : len = desc_len;
2830 : }
2831 :
2832 0 : if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2833 : u_int8_t last_byte;
2834 0 : u_int32_t pkt_len = desc_len;
2835 :
2836 0 : if (sc->fmp != NULL)
2837 0 : pkt_len += sc->fmp->m_pkthdr.len;
2838 :
2839 0 : last_byte = *(mtod(m, caddr_t) + desc_len - 1);
2840 0 : if (TBI_ACCEPT(&sc->hw, status, desc->errors,
2841 : pkt_len, last_byte)) {
2842 : #ifndef SMALL_KERNEL
2843 0 : em_tbi_adjust_stats(&sc->hw, &sc->stats,
2844 0 : pkt_len, sc->hw.mac_addr);
2845 : #endif
2846 0 : if (len > 0)
2847 0 : len--;
2848 : } else
2849 : accept_frame = 0;
2850 0 : }
2851 :
2852 0 : if (accept_frame) {
2853 : /* Assign correct length to the current fragment */
2854 0 : m->m_len = len;
2855 :
2856 0 : if (sc->fmp == NULL) {
2857 0 : m->m_pkthdr.len = m->m_len;
2858 0 : sc->fmp = m; /* Store the first mbuf */
2859 0 : sc->lmp = m;
2860 0 : } else {
2861 : /* Chain mbuf's together */
2862 0 : m->m_flags &= ~M_PKTHDR;
2863 : /*
2864 : * Adjust length of previous mbuf in chain if
2865 : * we received less than 4 bytes in the last
2866 : * descriptor.
2867 : */
2868 0 : if (prev_len_adj > 0) {
2869 0 : sc->lmp->m_len -= prev_len_adj;
2870 0 : sc->fmp->m_pkthdr.len -= prev_len_adj;
2871 0 : }
2872 0 : sc->lmp->m_next = m;
2873 0 : sc->lmp = m;
2874 0 : sc->fmp->m_pkthdr.len += m->m_len;
2875 : }
2876 :
2877 0 : if (eop) {
2878 0 : m = sc->fmp;
2879 :
2880 0 : em_receive_checksum(sc, desc, m);
2881 : #if NVLAN > 0
2882 0 : if (desc->status & E1000_RXD_STAT_VP) {
2883 0 : m->m_pkthdr.ether_vtag =
2884 0 : letoh16(desc->special);
2885 0 : m->m_flags |= M_VLANTAG;
2886 0 : }
2887 : #endif
2888 0 : ml_enqueue(&ml, m);
2889 :
2890 0 : sc->fmp = NULL;
2891 0 : sc->lmp = NULL;
2892 0 : }
2893 : } else {
2894 0 : sc->dropped_pkts++;
2895 :
2896 0 : if (sc->fmp != NULL) {
2897 0 : m_freem(sc->fmp);
2898 0 : sc->fmp = NULL;
2899 0 : sc->lmp = NULL;
2900 0 : }
2901 :
2902 0 : m_freem(m);
2903 : }
2904 :
2905 : /* Advance our pointers to the next descriptor. */
2906 0 : if (++i == sc->sc_rx_slots)
2907 : i = 0;
2908 0 : } while (if_rxr_inuse(&sc->sc_rx_ring) > 0);
2909 :
2910 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dma.dma_map,
2911 : 0, sc->sc_rx_dma.dma_map->dm_mapsize,
2912 : BUS_DMASYNC_PREREAD);
2913 :
2914 0 : sc->sc_rx_desc_tail = i;
2915 :
2916 0 : if_input(ifp, &ml);
2917 :
2918 0 : return (rv);
2919 0 : }
2920 :
2921 : /*********************************************************************
2922 : *
2923 : * Verify that the hardware indicated that the checksum is valid.
2924 : * Inform the stack about the status of checksum so that stack
2925 : * doesn't spend time verifying the checksum.
2926 : *
2927 : *********************************************************************/
2928 : void
2929 0 : em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
2930 : struct mbuf *mp)
2931 : {
2932 : /* 82543 or newer only */
2933 0 : if ((sc->hw.mac_type < em_82543) ||
2934 : /* Ignore Checksum bit is set */
2935 0 : (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2936 0 : mp->m_pkthdr.csum_flags = 0;
2937 0 : return;
2938 : }
2939 :
2940 0 : if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2941 : /* Did it pass? */
2942 0 : if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2943 : /* IP Checksum Good */
2944 0 : mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2945 :
2946 0 : } else
2947 0 : mp->m_pkthdr.csum_flags = 0;
2948 : }
2949 :
2950 0 : if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2951 : /* Did it pass? */
2952 0 : if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
2953 0 : mp->m_pkthdr.csum_flags |=
2954 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2955 : }
2956 0 : }
2957 :
2958 : /*
2959 : * This turns on the hardware offload of the VLAN
2960 : * tag insertion and strip
2961 : */
2962 : void
2963 0 : em_enable_hw_vlans(struct em_softc *sc)
2964 : {
2965 : uint32_t ctrl;
2966 :
2967 0 : ctrl = E1000_READ_REG(&sc->hw, CTRL);
2968 0 : ctrl |= E1000_CTRL_VME;
2969 0 : E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
2970 0 : }
2971 :
2972 : void
2973 0 : em_enable_intr(struct em_softc *sc)
2974 : {
2975 0 : E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
2976 0 : }
2977 :
2978 : void
2979 0 : em_disable_intr(struct em_softc *sc)
2980 : {
2981 : /*
2982 : * The first version of 82542 had an errata where when link
2983 : * was forced it would stay up even if the cable was disconnected
2984 : * Sequence errors were used to detect the disconnect and then
2985 : * the driver would unforce the link. This code is in the ISR.
2986 : * For this to work correctly the Sequence error interrupt had
2987 : * to be enabled all the time.
2988 : */
2989 :
2990 0 : if (sc->hw.mac_type == em_82542_rev2_0)
2991 0 : E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
2992 : else
2993 0 : E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
2994 0 : }
2995 :
2996 : void
2997 0 : em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2998 : {
2999 0 : struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3000 : pcireg_t val;
3001 :
3002 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3003 0 : if (reg & 0x2) {
3004 0 : val &= 0x0000ffff;
3005 0 : val |= (*value << 16);
3006 0 : } else {
3007 0 : val &= 0xffff0000;
3008 0 : val |= *value;
3009 : }
3010 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3011 0 : }
3012 :
3013 : void
3014 0 : em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3015 : {
3016 0 : struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3017 : pcireg_t val;
3018 :
3019 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3020 0 : if (reg & 0x2)
3021 0 : *value = (val >> 16) & 0xffff;
3022 : else
3023 0 : *value = val & 0xffff;
3024 0 : }
3025 :
3026 : void
3027 0 : em_pci_set_mwi(struct em_hw *hw)
3028 : {
3029 0 : struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3030 :
3031 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3032 0 : (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
3033 0 : }
3034 :
3035 : void
3036 0 : em_pci_clear_mwi(struct em_hw *hw)
3037 : {
3038 0 : struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3039 :
3040 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3041 0 : (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
3042 0 : }
3043 :
3044 : /*
3045 : * We may eventually really do this, but its unnecessary
3046 : * for now so we just return unsupported.
3047 : */
3048 : int32_t
3049 0 : em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3050 : {
3051 0 : return -E1000_NOT_IMPLEMENTED;
3052 : }
3053 :
3054 : /*********************************************************************
3055 : * 82544 Coexistence issue workaround.
3056 : * There are 2 issues.
3057 : * 1. Transmit Hang issue.
3058 : * To detect this issue, following equation can be used...
3059 : * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3060 : * If SUM[3:0] is in between 1 to 4, we will have this issue.
3061 : *
3062 : * 2. DAC issue.
3063 : * To detect this issue, following equation can be used...
3064 : * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3065 : * If SUM[3:0] is in between 9 to c, we will have this issue.
3066 : *
3067 : *
3068 : * WORKAROUND:
3069 : * Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3070 : *
3071 : *** *********************************************************************/
3072 : u_int32_t
3073 0 : em_fill_descriptors(u_int64_t address, u_int32_t length,
3074 : PDESC_ARRAY desc_array)
3075 : {
3076 : /* Since issue is sensitive to length and address.*/
3077 : /* Let us first check the address...*/
3078 : u_int32_t safe_terminator;
3079 0 : if (length <= 4) {
3080 0 : desc_array->descriptor[0].address = address;
3081 0 : desc_array->descriptor[0].length = length;
3082 0 : desc_array->elements = 1;
3083 0 : return desc_array->elements;
3084 : }
3085 0 : safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3086 : /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3087 0 : if (safe_terminator == 0 ||
3088 0 : (safe_terminator > 4 &&
3089 0 : safe_terminator < 9) ||
3090 0 : (safe_terminator > 0xC &&
3091 : safe_terminator <= 0xF)) {
3092 0 : desc_array->descriptor[0].address = address;
3093 0 : desc_array->descriptor[0].length = length;
3094 0 : desc_array->elements = 1;
3095 0 : return desc_array->elements;
3096 : }
3097 :
3098 0 : desc_array->descriptor[0].address = address;
3099 0 : desc_array->descriptor[0].length = length - 4;
3100 0 : desc_array->descriptor[1].address = address + (length - 4);
3101 0 : desc_array->descriptor[1].length = 4;
3102 0 : desc_array->elements = 2;
3103 0 : return desc_array->elements;
3104 0 : }
3105 :
3106 : /*
3107 : * Disable the L0S and L1 LINK states.
3108 : */
3109 : void
3110 0 : em_disable_aspm(struct em_softc *sc)
3111 : {
3112 0 : int offset;
3113 : pcireg_t val;
3114 :
3115 0 : switch (sc->hw.mac_type) {
3116 : case em_82571:
3117 : case em_82572:
3118 : case em_82573:
3119 : case em_82574:
3120 : break;
3121 : default:
3122 0 : return;
3123 : }
3124 :
3125 0 : if (!pci_get_capability(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3126 : PCI_CAP_PCIEXPRESS, &offset, NULL))
3127 0 : return;
3128 :
3129 : /* Disable PCIe Active State Power Management (ASPM). */
3130 0 : val = pci_conf_read(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3131 0 : offset + PCI_PCIE_LCSR);
3132 :
3133 0 : switch (sc->hw.mac_type) {
3134 : case em_82571:
3135 : case em_82572:
3136 0 : val &= ~PCI_PCIE_LCSR_ASPM_L1;
3137 0 : break;
3138 : case em_82573:
3139 : case em_82574:
3140 0 : val &= ~(PCI_PCIE_LCSR_ASPM_L0S |
3141 : PCI_PCIE_LCSR_ASPM_L1);
3142 0 : break;
3143 : default:
3144 : break;
3145 : }
3146 :
3147 0 : pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
3148 0 : offset + PCI_PCIE_LCSR, val);
3149 0 : }
3150 :
3151 : /*
3152 : * em_flush_tx_ring - remove all descriptors from the tx_ring
3153 : *
3154 : * We want to clear all pending descriptors from the TX ring.
3155 : * zeroing happens when the HW reads the regs. We assign the ring itself as
3156 : * the data of the next descriptor. We don't care about the data we are about
3157 : * to reset the HW.
3158 : */
3159 : void
3160 0 : em_flush_tx_ring(struct em_softc *sc)
3161 : {
3162 : uint32_t tctl, txd_lower = E1000_TXD_CMD_IFCS;
3163 : uint16_t size = 512;
3164 : struct em_tx_desc *txd;
3165 :
3166 0 : KASSERT(sc->sc_tx_desc_ring != NULL);
3167 :
3168 0 : tctl = EM_READ_REG(&sc->hw, E1000_TCTL);
3169 0 : EM_WRITE_REG(&sc->hw, E1000_TCTL, tctl | E1000_TCTL_EN);
3170 :
3171 0 : KASSERT(EM_READ_REG(&sc->hw, E1000_TDT) == sc->sc_tx_desc_head);
3172 :
3173 0 : txd = &sc->sc_tx_desc_ring[sc->sc_tx_desc_head];
3174 0 : txd->buffer_addr = sc->sc_tx_dma.dma_map->dm_segs[0].ds_addr;
3175 0 : txd->lower.data = htole32(txd_lower | size);
3176 0 : txd->upper.data = 0;
3177 :
3178 : /* flush descriptors to memory before notifying the HW */
3179 0 : bus_space_barrier(sc->osdep.mem_bus_space_tag,
3180 0 : sc->osdep.mem_bus_space_handle, 0, 0, BUS_SPACE_BARRIER_WRITE);
3181 :
3182 0 : if (++sc->sc_tx_desc_head == sc->sc_tx_slots)
3183 0 : sc->sc_tx_desc_head = 0;
3184 :
3185 0 : EM_WRITE_REG(&sc->hw, E1000_TDT, sc->sc_tx_desc_head);
3186 0 : bus_space_barrier(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
3187 : 0, 0, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
3188 0 : usec_delay(250);
3189 0 : }
3190 :
3191 : /*
3192 : * em_flush_rx_ring - remove all descriptors from the rx_ring
3193 : *
3194 : * Mark all descriptors in the RX ring as consumed and disable the rx ring
3195 : */
3196 : void
3197 0 : em_flush_rx_ring(struct em_softc *sc)
3198 : {
3199 : uint32_t rctl, rxdctl;
3200 :
3201 0 : rctl = EM_READ_REG(&sc->hw, E1000_RCTL);
3202 0 : EM_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3203 0 : E1000_WRITE_FLUSH(&sc->hw);
3204 0 : usec_delay(150);
3205 :
3206 0 : rxdctl = EM_READ_REG(&sc->hw, E1000_RXDCTL);
3207 : /* zero the lower 14 bits (prefetch and host thresholds) */
3208 0 : rxdctl &= 0xffffc000;
3209 : /*
3210 : * update thresholds: prefetch threshold to 31, host threshold to 1
3211 : * and make sure the granularity is "descriptors" and not "cache lines"
3212 : */
3213 0 : rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
3214 0 : EM_WRITE_REG(&sc->hw, E1000_RXDCTL, rxdctl);
3215 :
3216 : /* momentarily enable the RX ring for the changes to take effect */
3217 0 : EM_WRITE_REG(&sc->hw, E1000_RCTL, rctl | E1000_RCTL_EN);
3218 0 : E1000_WRITE_FLUSH(&sc->hw);
3219 0 : usec_delay(150);
3220 0 : EM_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3221 0 : }
3222 :
3223 : /*
3224 : * em_flush_desc_rings - remove all descriptors from the descriptor rings
3225 : *
3226 : * In i219, the descriptor rings must be emptied before resetting the HW
3227 : * or before changing the device state to D3 during runtime (runtime PM).
3228 : *
3229 : * Failure to do this will cause the HW to enter a unit hang state which can
3230 : * only be released by PCI reset on the device
3231 : *
3232 : */
3233 : void
3234 0 : em_flush_desc_rings(struct em_softc *sc)
3235 : {
3236 0 : struct pci_attach_args *pa = &sc->osdep.em_pa;
3237 : uint32_t fextnvm11, tdlen;
3238 : uint16_t hang_state;
3239 :
3240 : /* First, disable MULR fix in FEXTNVM11 */
3241 0 : fextnvm11 = EM_READ_REG(&sc->hw, E1000_FEXTNVM11);
3242 0 : fextnvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
3243 0 : EM_WRITE_REG(&sc->hw, E1000_FEXTNVM11, fextnvm11);
3244 :
3245 : /* do nothing if we're not in faulty state, or if the queue is empty */
3246 0 : tdlen = EM_READ_REG(&sc->hw, E1000_TDLEN);
3247 0 : hang_state = pci_conf_read(pa->pa_pc, pa->pa_tag, PCICFG_DESC_RING_STATUS);
3248 0 : if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
3249 0 : return;
3250 0 : em_flush_tx_ring(sc);
3251 :
3252 : /* recheck, maybe the fault is caused by the rx ring */
3253 0 : hang_state = pci_conf_read(pa->pa_pc, pa->pa_tag, PCICFG_DESC_RING_STATUS);
3254 0 : if (hang_state & FLUSH_DESC_REQUIRED)
3255 0 : em_flush_rx_ring(sc);
3256 0 : }
3257 :
3258 : #ifndef SMALL_KERNEL
3259 : /**********************************************************************
3260 : *
3261 : * Update the board statistics counters.
3262 : *
3263 : **********************************************************************/
3264 : void
3265 0 : em_update_stats_counters(struct em_softc *sc)
3266 : {
3267 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
3268 :
3269 0 : sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
3270 0 : sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
3271 0 : sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
3272 :
3273 0 : sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
3274 0 : sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
3275 :
3276 0 : sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
3277 0 : sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
3278 :
3279 0 : if (sc->hw.mac_type >= em_82543) {
3280 0 : sc->stats.algnerrc +=
3281 0 : E1000_READ_REG(&sc->hw, ALGNERRC);
3282 0 : sc->stats.rxerrc +=
3283 0 : E1000_READ_REG(&sc->hw, RXERRC);
3284 0 : sc->stats.cexterr +=
3285 0 : E1000_READ_REG(&sc->hw, CEXTERR);
3286 0 : }
3287 :
3288 : #ifdef EM_DEBUG
3289 : if (sc->hw.media_type == em_media_type_copper ||
3290 : (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
3291 : sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
3292 : sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
3293 : }
3294 : sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
3295 :
3296 : sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
3297 : sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
3298 : sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
3299 : sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
3300 : sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
3301 : sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
3302 : sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
3303 : sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
3304 : sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
3305 : sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
3306 : sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
3307 : sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
3308 : sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
3309 : sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
3310 : sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
3311 : sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
3312 : sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
3313 : sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
3314 :
3315 : /* For the 64-bit byte counters the low dword must be read first. */
3316 : /* Both registers clear on the read of the high dword */
3317 :
3318 : sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
3319 : sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
3320 : sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
3321 : sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
3322 :
3323 : sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
3324 : sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
3325 : sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
3326 :
3327 : sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
3328 : sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
3329 : sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
3330 : sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
3331 :
3332 : sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
3333 : sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
3334 : sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
3335 : sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
3336 : sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
3337 : sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
3338 : sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
3339 : sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
3340 : sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
3341 : sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
3342 :
3343 : if (sc->hw.mac_type >= em_82543) {
3344 : sc->stats.tncrs +=
3345 : E1000_READ_REG(&sc->hw, TNCRS);
3346 : sc->stats.tsctc +=
3347 : E1000_READ_REG(&sc->hw, TSCTC);
3348 : sc->stats.tsctfc +=
3349 : E1000_READ_REG(&sc->hw, TSCTFC);
3350 : }
3351 : #endif
3352 :
3353 : /* Fill out the OS statistics structure */
3354 0 : ifp->if_collisions = sc->stats.colc;
3355 :
3356 : /* Rx Errors */
3357 0 : ifp->if_ierrors =
3358 0 : sc->dropped_pkts +
3359 0 : sc->stats.rxerrc +
3360 0 : sc->stats.crcerrs +
3361 0 : sc->stats.algnerrc +
3362 0 : sc->stats.ruc + sc->stats.roc +
3363 0 : sc->stats.mpc + sc->stats.cexterr +
3364 0 : sc->rx_overruns;
3365 :
3366 : /* Tx Errors */
3367 0 : ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
3368 0 : sc->watchdog_events;
3369 0 : }
3370 :
3371 : #ifdef EM_DEBUG
3372 : /**********************************************************************
3373 : *
3374 : * This routine is called only when IFF_DEBUG is enabled.
3375 : * This routine provides a way to take a look at important statistics
3376 : * maintained by the driver and hardware.
3377 : *
3378 : **********************************************************************/
3379 : void
3380 : em_print_hw_stats(struct em_softc *sc)
3381 : {
3382 : const char * const unit = DEVNAME(sc);
3383 :
3384 : printf("%s: Excessive collisions = %lld\n", unit,
3385 : (long long)sc->stats.ecol);
3386 : printf("%s: Symbol errors = %lld\n", unit,
3387 : (long long)sc->stats.symerrs);
3388 : printf("%s: Sequence errors = %lld\n", unit,
3389 : (long long)sc->stats.sec);
3390 : printf("%s: Defer count = %lld\n", unit,
3391 : (long long)sc->stats.dc);
3392 :
3393 : printf("%s: Missed Packets = %lld\n", unit,
3394 : (long long)sc->stats.mpc);
3395 : printf("%s: Receive No Buffers = %lld\n", unit,
3396 : (long long)sc->stats.rnbc);
3397 : /* RLEC is inaccurate on some hardware, calculate our own */
3398 : printf("%s: Receive Length Errors = %lld\n", unit,
3399 : ((long long)sc->stats.roc +
3400 : (long long)sc->stats.ruc));
3401 : printf("%s: Receive errors = %lld\n", unit,
3402 : (long long)sc->stats.rxerrc);
3403 : printf("%s: Crc errors = %lld\n", unit,
3404 : (long long)sc->stats.crcerrs);
3405 : printf("%s: Alignment errors = %lld\n", unit,
3406 : (long long)sc->stats.algnerrc);
3407 : printf("%s: Carrier extension errors = %lld\n", unit,
3408 : (long long)sc->stats.cexterr);
3409 :
3410 : printf("%s: RX overruns = %ld\n", unit,
3411 : sc->rx_overruns);
3412 : printf("%s: watchdog timeouts = %ld\n", unit,
3413 : sc->watchdog_events);
3414 :
3415 : printf("%s: XON Rcvd = %lld\n", unit,
3416 : (long long)sc->stats.xonrxc);
3417 : printf("%s: XON Xmtd = %lld\n", unit,
3418 : (long long)sc->stats.xontxc);
3419 : printf("%s: XOFF Rcvd = %lld\n", unit,
3420 : (long long)sc->stats.xoffrxc);
3421 : printf("%s: XOFF Xmtd = %lld\n", unit,
3422 : (long long)sc->stats.xofftxc);
3423 :
3424 : printf("%s: Good Packets Rcvd = %lld\n", unit,
3425 : (long long)sc->stats.gprc);
3426 : printf("%s: Good Packets Xmtd = %lld\n", unit,
3427 : (long long)sc->stats.gptc);
3428 : }
3429 : #endif
3430 : #endif /* !SMALL_KERNEL */
|