Line data Source code
1 : /* $OpenBSD: if_bnx.c,v 1.125 2018/03/10 10:51:46 sthen Exp $ */
2 :
3 : /*-
4 : * Copyright (c) 2006 Broadcom Corporation
5 : * David Christensen <davidch@broadcom.com>. All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : *
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 : * may be used to endorse or promote products derived from this software
18 : * without specific prior written consent.
19 : *
20 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 : * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 : * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 : * THE POSSIBILITY OF SUCH DAMAGE.
31 : */
32 :
33 : /*
34 : * The following controllers are supported by this driver:
35 : * BCM5706C A2, A3
36 : * BCM5706S A2, A3
37 : * BCM5708C B1, B2
38 : * BCM5708S B1, B2
39 : * BCM5709C A1, C0
40 : * BCM5709S A1, C0
41 : * BCM5716 C0
42 : *
43 : * The following controllers are not supported by this driver:
44 : * BCM5706C A0, A1
45 : * BCM5706S A0, A1
46 : * BCM5708C A0, B0
47 : * BCM5708S A0, B0
48 : * BCM5709C A0 B0, B1, B2 (pre-production)
49 : * BCM5709S A0, B0, B1, B2 (pre-production)
50 : */
51 :
52 : #include <dev/pci/if_bnxreg.h>
53 :
54 : struct bnx_firmware {
55 : char *filename;
56 : struct bnx_firmware_header *fw;
57 :
58 : u_int32_t *bnx_COM_FwText;
59 : u_int32_t *bnx_COM_FwData;
60 : u_int32_t *bnx_COM_FwRodata;
61 : u_int32_t *bnx_COM_FwBss;
62 : u_int32_t *bnx_COM_FwSbss;
63 :
64 : u_int32_t *bnx_RXP_FwText;
65 : u_int32_t *bnx_RXP_FwData;
66 : u_int32_t *bnx_RXP_FwRodata;
67 : u_int32_t *bnx_RXP_FwBss;
68 : u_int32_t *bnx_RXP_FwSbss;
69 :
70 : u_int32_t *bnx_TPAT_FwText;
71 : u_int32_t *bnx_TPAT_FwData;
72 : u_int32_t *bnx_TPAT_FwRodata;
73 : u_int32_t *bnx_TPAT_FwBss;
74 : u_int32_t *bnx_TPAT_FwSbss;
75 :
76 : u_int32_t *bnx_TXP_FwText;
77 : u_int32_t *bnx_TXP_FwData;
78 : u_int32_t *bnx_TXP_FwRodata;
79 : u_int32_t *bnx_TXP_FwBss;
80 : u_int32_t *bnx_TXP_FwSbss;
81 : };
82 :
83 : struct bnx_firmware bnx_firmwares[] = {
84 : { "bnx-b06", NULL },
85 : { "bnx-b09", NULL }
86 : };
87 : #define BNX_FW_B06 0
88 : #define BNX_FW_B09 1
89 :
90 : struct bnx_rv2p {
91 : char *filename;
92 : struct bnx_rv2p_header *fw;
93 :
94 : u_int32_t *bnx_rv2p_proc1;
95 : u_int32_t *bnx_rv2p_proc2;
96 : };
97 :
98 : struct bnx_rv2p bnx_rv2ps[] = {
99 : { "bnx-rv2p", NULL },
100 : { "bnx-xi-rv2p", NULL },
101 : { "bnx-xi90-rv2p", NULL }
102 : };
103 : #define BNX_RV2P 0
104 : #define BNX_XI_RV2P 1
105 : #define BNX_XI90_RV2P 2
106 :
107 : void nswaph(u_int32_t *p, int wcount);
108 :
109 : /****************************************************************************/
110 : /* BNX Driver Version */
111 : /****************************************************************************/
112 :
113 : #define BNX_DRIVER_VERSION "v0.9.6"
114 :
115 : /****************************************************************************/
116 : /* BNX Debug Options */
117 : /****************************************************************************/
118 : #ifdef BNX_DEBUG
119 : u_int32_t bnx_debug = BNX_WARN;
120 :
121 : /* 0 = Never */
122 : /* 1 = 1 in 2,147,483,648 */
123 : /* 256 = 1 in 8,388,608 */
124 : /* 2048 = 1 in 1,048,576 */
125 : /* 65536 = 1 in 32,768 */
126 : /* 1048576 = 1 in 2,048 */
127 : /* 268435456 = 1 in 8 */
128 : /* 536870912 = 1 in 4 */
129 : /* 1073741824 = 1 in 2 */
130 :
131 : /* Controls how often the l2_fhdr frame error check will fail. */
132 : int bnx_debug_l2fhdr_status_check = 0;
133 :
134 : /* Controls how often the unexpected attention check will fail. */
135 : int bnx_debug_unexpected_attention = 0;
136 :
137 : /* Controls how often to simulate an mbuf allocation failure. */
138 : int bnx_debug_mbuf_allocation_failure = 0;
139 :
140 : /* Controls how often to simulate a DMA mapping failure. */
141 : int bnx_debug_dma_map_addr_failure = 0;
142 :
143 : /* Controls how often to simulate a bootcode failure. */
144 : int bnx_debug_bootcode_running_failure = 0;
145 : #endif
146 :
147 : /****************************************************************************/
148 : /* PCI Device ID Table */
149 : /* */
150 : /* Used by bnx_probe() to identify the devices supported by this driver. */
151 : /****************************************************************************/
152 : const struct pci_matchid bnx_devices[] = {
153 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
154 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
155 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
156 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S },
157 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
158 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S },
159 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716 },
160 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S }
161 : };
162 :
163 : /****************************************************************************/
164 : /* Supported Flash NVRAM device data. */
165 : /****************************************************************************/
166 : static struct flash_spec flash_table[] =
167 : {
168 : #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
169 : #define NONBUFFERED_FLAGS (BNX_NV_WREN)
170 :
171 : /* Slow EEPROM */
172 : {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
173 : BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 : SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 : "EEPROM - slow"},
176 : /* Expansion entry 0001 */
177 : {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
178 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 : SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 : "Entry 0001"},
181 : /* Saifun SA25F010 (non-buffered flash) */
182 : /* strap, cfg1, & write1 need updates */
183 : {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
184 : 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 : SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
186 : "Non-buffered flash (128kB)"},
187 : /* Saifun SA25F020 (non-buffered flash) */
188 : /* strap, cfg1, & write1 need updates */
189 : {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
190 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 : SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
192 : "Non-buffered flash (256kB)"},
193 : /* Expansion entry 0100 */
194 : {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
195 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 : SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 : "Entry 0100"},
198 : /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
199 : {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
200 : NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
201 : ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
202 : "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
203 : /* Entry 0110: ST M45PE20 (non-buffered flash)*/
204 : {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
205 : NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
206 : ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
207 : "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
208 : /* Saifun SA25F005 (non-buffered flash) */
209 : /* strap, cfg1, & write1 need updates */
210 : {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
211 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212 : SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
213 : "Non-buffered flash (64kB)"},
214 : /* Fast EEPROM */
215 : {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
216 : BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
217 : SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
218 : "EEPROM - fast"},
219 : /* Expansion entry 1001 */
220 : {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
221 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 : SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 : "Entry 1001"},
224 : /* Expansion entry 1010 */
225 : {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
226 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227 : SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228 : "Entry 1010"},
229 : /* ATMEL AT45DB011B (buffered flash) */
230 : {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
231 : BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 : BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
233 : "Buffered flash (128kB)"},
234 : /* Expansion entry 1100 */
235 : {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
236 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
237 : SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
238 : "Entry 1100"},
239 : /* Expansion entry 1101 */
240 : {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
241 : NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
242 : SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243 : "Entry 1101"},
244 : /* Ateml Expansion entry 1110 */
245 : {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
246 : BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
247 : BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
248 : "Entry 1110 (Atmel)"},
249 : /* ATMEL AT45DB021B (buffered flash) */
250 : {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
251 : BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
252 : BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
253 : "Buffered flash (256kB)"},
254 : };
255 :
256 : /*
257 : * The BCM5709 controllers transparently handle the
258 : * differences between Atmel 264 byte pages and all
259 : * flash devices which use 256 byte pages, so no
260 : * logical-to-physical mapping is required in the
261 : * driver.
262 : */
263 : static struct flash_spec flash_5709 = {
264 : .flags = BNX_NV_BUFFERED,
265 : .page_bits = BCM5709_FLASH_PAGE_BITS,
266 : .page_size = BCM5709_FLASH_PAGE_SIZE,
267 : .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
268 : .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
269 : .name = "5709 buffered flash (256kB)",
270 : };
271 :
272 : /****************************************************************************/
273 : /* OpenBSD device entry points. */
274 : /****************************************************************************/
275 : int bnx_probe(struct device *, void *, void *);
276 : void bnx_attach(struct device *, struct device *, void *);
277 : void bnx_attachhook(struct device *);
278 : int bnx_read_firmware(struct bnx_softc *sc, int);
279 : int bnx_read_rv2p(struct bnx_softc *sc, int);
280 : #if 0
281 : void bnx_detach(void *);
282 : #endif
283 :
284 : /****************************************************************************/
285 : /* BNX Debug Data Structure Dump Routines */
286 : /****************************************************************************/
287 : #ifdef BNX_DEBUG
288 : void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
289 : void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
290 : void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
291 : void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
292 : void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
293 : void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
294 : void bnx_dump_tx_chain(struct bnx_softc *, int, int);
295 : void bnx_dump_rx_chain(struct bnx_softc *, int, int);
296 : void bnx_dump_status_block(struct bnx_softc *);
297 : void bnx_dump_stats_block(struct bnx_softc *);
298 : void bnx_dump_driver_state(struct bnx_softc *);
299 : void bnx_dump_hw_state(struct bnx_softc *);
300 : void bnx_breakpoint(struct bnx_softc *);
301 : #endif
302 :
303 : /****************************************************************************/
304 : /* BNX Register/Memory Access Routines */
305 : /****************************************************************************/
306 : u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
307 : void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
308 : void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
309 : int bnx_miibus_read_reg(struct device *, int, int);
310 : void bnx_miibus_write_reg(struct device *, int, int, int);
311 : void bnx_miibus_statchg(struct device *);
312 :
313 : /****************************************************************************/
314 : /* BNX NVRAM Access Routines */
315 : /****************************************************************************/
316 : int bnx_acquire_nvram_lock(struct bnx_softc *);
317 : int bnx_release_nvram_lock(struct bnx_softc *);
318 : void bnx_enable_nvram_access(struct bnx_softc *);
319 : void bnx_disable_nvram_access(struct bnx_softc *);
320 : int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
321 : u_int32_t);
322 : int bnx_init_nvram(struct bnx_softc *);
323 : int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
324 : int bnx_nvram_test(struct bnx_softc *);
325 : #ifdef BNX_NVRAM_WRITE_SUPPORT
326 : int bnx_enable_nvram_write(struct bnx_softc *);
327 : void bnx_disable_nvram_write(struct bnx_softc *);
328 : int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
329 : int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
330 : u_int32_t);
331 : int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
332 : #endif
333 :
334 : /****************************************************************************/
335 : /* */
336 : /****************************************************************************/
337 : void bnx_get_media(struct bnx_softc *);
338 : void bnx_init_media(struct bnx_softc *);
339 : int bnx_dma_alloc(struct bnx_softc *);
340 : void bnx_dma_free(struct bnx_softc *);
341 : void bnx_release_resources(struct bnx_softc *);
342 :
343 : /****************************************************************************/
344 : /* BNX Firmware Synchronization and Load */
345 : /****************************************************************************/
346 : int bnx_fw_sync(struct bnx_softc *, u_int32_t);
347 : void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
348 : u_int32_t);
349 : void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
350 : struct fw_info *);
351 : void bnx_init_cpus(struct bnx_softc *);
352 :
353 : void bnx_stop(struct bnx_softc *);
354 : int bnx_reset(struct bnx_softc *, u_int32_t);
355 : int bnx_chipinit(struct bnx_softc *);
356 : int bnx_blockinit(struct bnx_softc *);
357 : int bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
358 :
359 : int bnx_init_tx_chain(struct bnx_softc *);
360 : void bnx_init_tx_context(struct bnx_softc *);
361 : int bnx_fill_rx_chain(struct bnx_softc *);
362 : void bnx_init_rx_context(struct bnx_softc *);
363 : int bnx_init_rx_chain(struct bnx_softc *);
364 : void bnx_free_rx_chain(struct bnx_softc *);
365 : void bnx_free_tx_chain(struct bnx_softc *);
366 : void bnx_rxrefill(void *);
367 :
368 : int bnx_tx_encap(struct bnx_softc *, struct mbuf *, int *);
369 : void bnx_start(struct ifqueue *);
370 : int bnx_ioctl(struct ifnet *, u_long, caddr_t);
371 : void bnx_watchdog(struct ifnet *);
372 : int bnx_ifmedia_upd(struct ifnet *);
373 : void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
374 : void bnx_init(void *);
375 : void bnx_mgmt_init(struct bnx_softc *sc);
376 :
377 : void bnx_init_context(struct bnx_softc *);
378 : void bnx_get_mac_addr(struct bnx_softc *);
379 : void bnx_set_mac_addr(struct bnx_softc *);
380 : void bnx_phy_intr(struct bnx_softc *);
381 : void bnx_rx_intr(struct bnx_softc *);
382 : void bnx_tx_intr(struct bnx_softc *);
383 : void bnx_disable_intr(struct bnx_softc *);
384 : void bnx_enable_intr(struct bnx_softc *);
385 :
386 : int bnx_intr(void *);
387 : void bnx_iff(struct bnx_softc *);
388 : void bnx_stats_update(struct bnx_softc *);
389 : void bnx_tick(void *);
390 :
391 : /****************************************************************************/
392 : /* OpenBSD device dispatch table. */
393 : /****************************************************************************/
394 : struct cfattach bnx_ca = {
395 : sizeof(struct bnx_softc), bnx_probe, bnx_attach
396 : };
397 :
398 : struct cfdriver bnx_cd = {
399 : NULL, "bnx", DV_IFNET
400 : };
401 :
402 : /****************************************************************************/
403 : /* Device probe function. */
404 : /* */
405 : /* Compares the device to the driver's list of supported devices and */
406 : /* reports back to the OS whether this is the right driver for the device. */
407 : /* */
408 : /* Returns: */
409 : /* BUS_PROBE_DEFAULT on success, positive value on failure. */
410 : /****************************************************************************/
411 : int
412 0 : bnx_probe(struct device *parent, void *match, void *aux)
413 : {
414 0 : return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
415 : nitems(bnx_devices)));
416 : }
417 :
418 : void
419 0 : nswaph(u_int32_t *p, int wcount)
420 : {
421 0 : for (; wcount; wcount -=4) {
422 0 : *p = ntohl(*p);
423 0 : p++;
424 : }
425 0 : }
426 :
427 : int
428 0 : bnx_read_firmware(struct bnx_softc *sc, int idx)
429 : {
430 0 : struct bnx_firmware *bfw = &bnx_firmwares[idx];
431 0 : struct bnx_firmware_header *hdr = bfw->fw;
432 0 : u_char *p, *q;
433 0 : size_t size;
434 : int error;
435 :
436 0 : if (hdr != NULL)
437 0 : return (0);
438 :
439 0 : if ((error = loadfirmware(bfw->filename, &p, &size)) != 0)
440 0 : return (error);
441 :
442 0 : if (size < sizeof(struct bnx_firmware_header)) {
443 0 : free(p, M_DEVBUF, size);
444 0 : return (EINVAL);
445 : }
446 :
447 0 : hdr = (struct bnx_firmware_header *)p;
448 :
449 0 : hdr->bnx_COM_FwReleaseMajor = ntohl(hdr->bnx_COM_FwReleaseMajor);
450 0 : hdr->bnx_COM_FwReleaseMinor = ntohl(hdr->bnx_COM_FwReleaseMinor);
451 0 : hdr->bnx_COM_FwReleaseFix = ntohl(hdr->bnx_COM_FwReleaseFix);
452 0 : hdr->bnx_COM_FwStartAddr = ntohl(hdr->bnx_COM_FwStartAddr);
453 0 : hdr->bnx_COM_FwTextAddr = ntohl(hdr->bnx_COM_FwTextAddr);
454 0 : hdr->bnx_COM_FwTextLen = ntohl(hdr->bnx_COM_FwTextLen);
455 0 : hdr->bnx_COM_FwDataAddr = ntohl(hdr->bnx_COM_FwDataAddr);
456 0 : hdr->bnx_COM_FwDataLen = ntohl(hdr->bnx_COM_FwDataLen);
457 0 : hdr->bnx_COM_FwRodataAddr = ntohl(hdr->bnx_COM_FwRodataAddr);
458 0 : hdr->bnx_COM_FwRodataLen = ntohl(hdr->bnx_COM_FwRodataLen);
459 0 : hdr->bnx_COM_FwBssAddr = ntohl(hdr->bnx_COM_FwBssAddr);
460 0 : hdr->bnx_COM_FwBssLen = ntohl(hdr->bnx_COM_FwBssLen);
461 0 : hdr->bnx_COM_FwSbssAddr = ntohl(hdr->bnx_COM_FwSbssAddr);
462 0 : hdr->bnx_COM_FwSbssLen = ntohl(hdr->bnx_COM_FwSbssLen);
463 :
464 0 : hdr->bnx_RXP_FwReleaseMajor = ntohl(hdr->bnx_RXP_FwReleaseMajor);
465 0 : hdr->bnx_RXP_FwReleaseMinor = ntohl(hdr->bnx_RXP_FwReleaseMinor);
466 0 : hdr->bnx_RXP_FwReleaseFix = ntohl(hdr->bnx_RXP_FwReleaseFix);
467 0 : hdr->bnx_RXP_FwStartAddr = ntohl(hdr->bnx_RXP_FwStartAddr);
468 0 : hdr->bnx_RXP_FwTextAddr = ntohl(hdr->bnx_RXP_FwTextAddr);
469 0 : hdr->bnx_RXP_FwTextLen = ntohl(hdr->bnx_RXP_FwTextLen);
470 0 : hdr->bnx_RXP_FwDataAddr = ntohl(hdr->bnx_RXP_FwDataAddr);
471 0 : hdr->bnx_RXP_FwDataLen = ntohl(hdr->bnx_RXP_FwDataLen);
472 0 : hdr->bnx_RXP_FwRodataAddr = ntohl(hdr->bnx_RXP_FwRodataAddr);
473 0 : hdr->bnx_RXP_FwRodataLen = ntohl(hdr->bnx_RXP_FwRodataLen);
474 0 : hdr->bnx_RXP_FwBssAddr = ntohl(hdr->bnx_RXP_FwBssAddr);
475 0 : hdr->bnx_RXP_FwBssLen = ntohl(hdr->bnx_RXP_FwBssLen);
476 0 : hdr->bnx_RXP_FwSbssAddr = ntohl(hdr->bnx_RXP_FwSbssAddr);
477 0 : hdr->bnx_RXP_FwSbssLen = ntohl(hdr->bnx_RXP_FwSbssLen);
478 :
479 0 : hdr->bnx_TPAT_FwReleaseMajor = ntohl(hdr->bnx_TPAT_FwReleaseMajor);
480 0 : hdr->bnx_TPAT_FwReleaseMinor = ntohl(hdr->bnx_TPAT_FwReleaseMinor);
481 0 : hdr->bnx_TPAT_FwReleaseFix = ntohl(hdr->bnx_TPAT_FwReleaseFix);
482 0 : hdr->bnx_TPAT_FwStartAddr = ntohl(hdr->bnx_TPAT_FwStartAddr);
483 0 : hdr->bnx_TPAT_FwTextAddr = ntohl(hdr->bnx_TPAT_FwTextAddr);
484 0 : hdr->bnx_TPAT_FwTextLen = ntohl(hdr->bnx_TPAT_FwTextLen);
485 0 : hdr->bnx_TPAT_FwDataAddr = ntohl(hdr->bnx_TPAT_FwDataAddr);
486 0 : hdr->bnx_TPAT_FwDataLen = ntohl(hdr->bnx_TPAT_FwDataLen);
487 0 : hdr->bnx_TPAT_FwRodataAddr = ntohl(hdr->bnx_TPAT_FwRodataAddr);
488 0 : hdr->bnx_TPAT_FwRodataLen = ntohl(hdr->bnx_TPAT_FwRodataLen);
489 0 : hdr->bnx_TPAT_FwBssAddr = ntohl(hdr->bnx_TPAT_FwBssAddr);
490 0 : hdr->bnx_TPAT_FwBssLen = ntohl(hdr->bnx_TPAT_FwBssLen);
491 0 : hdr->bnx_TPAT_FwSbssAddr = ntohl(hdr->bnx_TPAT_FwSbssAddr);
492 0 : hdr->bnx_TPAT_FwSbssLen = ntohl(hdr->bnx_TPAT_FwSbssLen);
493 :
494 0 : hdr->bnx_TXP_FwReleaseMajor = ntohl(hdr->bnx_TXP_FwReleaseMajor);
495 0 : hdr->bnx_TXP_FwReleaseMinor = ntohl(hdr->bnx_TXP_FwReleaseMinor);
496 0 : hdr->bnx_TXP_FwReleaseFix = ntohl(hdr->bnx_TXP_FwReleaseFix);
497 0 : hdr->bnx_TXP_FwStartAddr = ntohl(hdr->bnx_TXP_FwStartAddr);
498 0 : hdr->bnx_TXP_FwTextAddr = ntohl(hdr->bnx_TXP_FwTextAddr);
499 0 : hdr->bnx_TXP_FwTextLen = ntohl(hdr->bnx_TXP_FwTextLen);
500 0 : hdr->bnx_TXP_FwDataAddr = ntohl(hdr->bnx_TXP_FwDataAddr);
501 0 : hdr->bnx_TXP_FwDataLen = ntohl(hdr->bnx_TXP_FwDataLen);
502 0 : hdr->bnx_TXP_FwRodataAddr = ntohl(hdr->bnx_TXP_FwRodataAddr);
503 0 : hdr->bnx_TXP_FwRodataLen = ntohl(hdr->bnx_TXP_FwRodataLen);
504 0 : hdr->bnx_TXP_FwBssAddr = ntohl(hdr->bnx_TXP_FwBssAddr);
505 0 : hdr->bnx_TXP_FwBssLen = ntohl(hdr->bnx_TXP_FwBssLen);
506 0 : hdr->bnx_TXP_FwSbssAddr = ntohl(hdr->bnx_TXP_FwSbssAddr);
507 0 : hdr->bnx_TXP_FwSbssLen = ntohl(hdr->bnx_TXP_FwSbssLen);
508 :
509 0 : q = p + sizeof(*hdr);
510 :
511 0 : bfw->bnx_COM_FwText = (u_int32_t *)q;
512 0 : q += hdr->bnx_COM_FwTextLen;
513 0 : nswaph(bfw->bnx_COM_FwText, hdr->bnx_COM_FwTextLen);
514 0 : bfw->bnx_COM_FwData = (u_int32_t *)q;
515 0 : q += hdr->bnx_COM_FwDataLen;
516 0 : nswaph(bfw->bnx_COM_FwData, hdr->bnx_COM_FwDataLen);
517 0 : bfw->bnx_COM_FwRodata = (u_int32_t *)q;
518 0 : q += hdr->bnx_COM_FwRodataLen;
519 0 : nswaph(bfw->bnx_COM_FwRodata, hdr->bnx_COM_FwRodataLen);
520 0 : bfw->bnx_COM_FwBss = (u_int32_t *)q;
521 0 : q += hdr->bnx_COM_FwBssLen;
522 0 : nswaph(bfw->bnx_COM_FwBss, hdr->bnx_COM_FwBssLen);
523 0 : bfw->bnx_COM_FwSbss = (u_int32_t *)q;
524 0 : q += hdr->bnx_COM_FwSbssLen;
525 0 : nswaph(bfw->bnx_COM_FwSbss, hdr->bnx_COM_FwSbssLen);
526 :
527 0 : bfw->bnx_RXP_FwText = (u_int32_t *)q;
528 0 : q += hdr->bnx_RXP_FwTextLen;
529 0 : nswaph(bfw->bnx_RXP_FwText, hdr->bnx_RXP_FwTextLen);
530 0 : bfw->bnx_RXP_FwData = (u_int32_t *)q;
531 0 : q += hdr->bnx_RXP_FwDataLen;
532 0 : nswaph(bfw->bnx_RXP_FwData, hdr->bnx_RXP_FwDataLen);
533 0 : bfw->bnx_RXP_FwRodata = (u_int32_t *)q;
534 0 : q += hdr->bnx_RXP_FwRodataLen;
535 0 : nswaph(bfw->bnx_RXP_FwRodata, hdr->bnx_RXP_FwRodataLen);
536 0 : bfw->bnx_RXP_FwBss = (u_int32_t *)q;
537 0 : q += hdr->bnx_RXP_FwBssLen;
538 0 : nswaph(bfw->bnx_RXP_FwBss, hdr->bnx_RXP_FwBssLen);
539 0 : bfw->bnx_RXP_FwSbss = (u_int32_t *)q;
540 0 : q += hdr->bnx_RXP_FwSbssLen;
541 0 : nswaph(bfw->bnx_RXP_FwSbss, hdr->bnx_RXP_FwSbssLen);
542 :
543 0 : bfw->bnx_TPAT_FwText = (u_int32_t *)q;
544 0 : q += hdr->bnx_TPAT_FwTextLen;
545 0 : nswaph(bfw->bnx_TPAT_FwText, hdr->bnx_TPAT_FwTextLen);
546 0 : bfw->bnx_TPAT_FwData = (u_int32_t *)q;
547 0 : q += hdr->bnx_TPAT_FwDataLen;
548 0 : nswaph(bfw->bnx_TPAT_FwData, hdr->bnx_TPAT_FwDataLen);
549 0 : bfw->bnx_TPAT_FwRodata = (u_int32_t *)q;
550 0 : q += hdr->bnx_TPAT_FwRodataLen;
551 0 : nswaph(bfw->bnx_TPAT_FwRodata, hdr->bnx_TPAT_FwRodataLen);
552 0 : bfw->bnx_TPAT_FwBss = (u_int32_t *)q;
553 0 : q += hdr->bnx_TPAT_FwBssLen;
554 0 : nswaph(bfw->bnx_TPAT_FwBss, hdr->bnx_TPAT_FwBssLen);
555 0 : bfw->bnx_TPAT_FwSbss = (u_int32_t *)q;
556 0 : q += hdr->bnx_TPAT_FwSbssLen;
557 0 : nswaph(bfw->bnx_TPAT_FwSbss, hdr->bnx_TPAT_FwSbssLen);
558 :
559 0 : bfw->bnx_TXP_FwText = (u_int32_t *)q;
560 0 : q += hdr->bnx_TXP_FwTextLen;
561 0 : nswaph(bfw->bnx_TXP_FwText, hdr->bnx_TXP_FwTextLen);
562 0 : bfw->bnx_TXP_FwData = (u_int32_t *)q;
563 0 : q += hdr->bnx_TXP_FwDataLen;
564 0 : nswaph(bfw->bnx_TXP_FwData, hdr->bnx_TXP_FwDataLen);
565 0 : bfw->bnx_TXP_FwRodata = (u_int32_t *)q;
566 0 : q += hdr->bnx_TXP_FwRodataLen;
567 0 : nswaph(bfw->bnx_TXP_FwRodata, hdr->bnx_TXP_FwRodataLen);
568 0 : bfw->bnx_TXP_FwBss = (u_int32_t *)q;
569 0 : q += hdr->bnx_TXP_FwBssLen;
570 0 : nswaph(bfw->bnx_TXP_FwBss, hdr->bnx_TXP_FwBssLen);
571 0 : bfw->bnx_TXP_FwSbss = (u_int32_t *)q;
572 0 : q += hdr->bnx_TXP_FwSbssLen;
573 0 : nswaph(bfw->bnx_TXP_FwSbss, hdr->bnx_TXP_FwSbssLen);
574 :
575 0 : if (q - p != size) {
576 0 : free(p, M_DEVBUF, size);
577 : hdr = NULL;
578 0 : return EINVAL;
579 : }
580 :
581 0 : bfw->fw = hdr;
582 :
583 0 : return (0);
584 0 : }
585 :
586 : int
587 0 : bnx_read_rv2p(struct bnx_softc *sc, int idx)
588 : {
589 0 : struct bnx_rv2p *rv2p = &bnx_rv2ps[idx];
590 0 : struct bnx_rv2p_header *hdr = rv2p->fw;
591 0 : u_char *p, *q;
592 0 : size_t size;
593 : int error;
594 :
595 0 : if (hdr != NULL)
596 0 : return (0);
597 :
598 0 : if ((error = loadfirmware(rv2p->filename, &p, &size)) != 0)
599 0 : return (error);
600 :
601 0 : if (size < sizeof(struct bnx_rv2p_header)) {
602 0 : free(p, M_DEVBUF, size);
603 0 : return (EINVAL);
604 : }
605 :
606 0 : hdr = (struct bnx_rv2p_header *)p;
607 :
608 0 : hdr->bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
609 0 : hdr->bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
610 :
611 0 : q = p + sizeof(*hdr);
612 :
613 0 : rv2p->bnx_rv2p_proc1 = (u_int32_t *)q;
614 0 : q += hdr->bnx_rv2p_proc1len;
615 0 : nswaph(rv2p->bnx_rv2p_proc1, hdr->bnx_rv2p_proc1len);
616 0 : rv2p->bnx_rv2p_proc2 = (u_int32_t *)q;
617 0 : q += hdr->bnx_rv2p_proc2len;
618 0 : nswaph(rv2p->bnx_rv2p_proc2, hdr->bnx_rv2p_proc2len);
619 :
620 0 : if (q - p != size) {
621 0 : free(p, M_DEVBUF, size);
622 0 : return EINVAL;
623 : }
624 :
625 0 : rv2p->fw = hdr;
626 :
627 0 : return (0);
628 0 : }
629 :
630 :
631 : /****************************************************************************/
632 : /* Device attach function. */
633 : /* */
634 : /* Allocates device resources, performs secondary chip identification, */
635 : /* resets and initializes the hardware, and initializes driver instance */
636 : /* variables. */
637 : /* */
638 : /* Returns: */
639 : /* 0 on success, positive value on failure. */
640 : /****************************************************************************/
641 : void
642 0 : bnx_attach(struct device *parent, struct device *self, void *aux)
643 : {
644 0 : struct bnx_softc *sc = (struct bnx_softc *)self;
645 0 : struct pci_attach_args *pa = aux;
646 0 : pci_chipset_tag_t pc = pa->pa_pc;
647 : u_int32_t val;
648 : pcireg_t memtype;
649 : const char *intrstr = NULL;
650 :
651 0 : sc->bnx_pa = *pa;
652 :
653 : /*
654 : * Map control/status registers.
655 : */
656 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
657 0 : if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
658 0 : &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
659 0 : printf(": can't find mem space\n");
660 0 : return;
661 : }
662 :
663 0 : if (pci_intr_map(pa, &sc->bnx_ih)) {
664 0 : printf(": couldn't map interrupt\n");
665 0 : goto bnx_attach_fail;
666 : }
667 0 : intrstr = pci_intr_string(pc, sc->bnx_ih);
668 :
669 : /*
670 : * Configure byte swap and enable indirect register access.
671 : * Rely on CPU to do target byte swapping on big endian systems.
672 : * Access to registers outside of PCI configurtion space are not
673 : * valid until this is done.
674 : */
675 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
676 : BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
677 : BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
678 :
679 : /* Save ASIC revsion info. */
680 0 : sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID);
681 :
682 : /*
683 : * Find the base address for shared memory access.
684 : * Newer versions of bootcode use a signature and offset
685 : * while older versions use a fixed address.
686 : */
687 0 : val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
688 0 : if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
689 0 : sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
690 : (sc->bnx_pa.pa_function << 2));
691 : else
692 0 : sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
693 :
694 : DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
695 :
696 : /* Set initial device and PHY flags */
697 0 : sc->bnx_flags = 0;
698 0 : sc->bnx_phy_flags = 0;
699 :
700 : /* Get PCI bus information (speed and type). */
701 0 : val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
702 0 : if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
703 : u_int32_t clkreg;
704 :
705 0 : sc->bnx_flags |= BNX_PCIX_FLAG;
706 :
707 0 : clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
708 :
709 0 : clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
710 0 : switch (clkreg) {
711 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
712 0 : sc->bus_speed_mhz = 133;
713 0 : break;
714 :
715 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
716 0 : sc->bus_speed_mhz = 100;
717 0 : break;
718 :
719 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
720 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
721 0 : sc->bus_speed_mhz = 66;
722 0 : break;
723 :
724 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
725 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
726 0 : sc->bus_speed_mhz = 50;
727 0 : break;
728 :
729 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
730 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
731 : case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
732 0 : sc->bus_speed_mhz = 33;
733 0 : break;
734 : }
735 0 : } else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
736 0 : sc->bus_speed_mhz = 66;
737 : else
738 0 : sc->bus_speed_mhz = 33;
739 :
740 0 : if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
741 0 : sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
742 :
743 : /* Hookup IRQ last. */
744 0 : sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih,
745 0 : IPL_NET | IPL_MPSAFE, bnx_intr, sc, sc->bnx_dev.dv_xname);
746 0 : if (sc->bnx_intrhand == NULL) {
747 0 : printf(": couldn't establish interrupt");
748 0 : if (intrstr != NULL)
749 0 : printf(" at %s", intrstr);
750 0 : printf("\n");
751 0 : goto bnx_attach_fail;
752 : }
753 :
754 0 : printf(": %s\n", intrstr);
755 :
756 0 : config_mountroot(self, bnx_attachhook);
757 0 : return;
758 :
759 : bnx_attach_fail:
760 0 : bnx_release_resources(sc);
761 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
762 0 : }
763 :
764 : void
765 0 : bnx_attachhook(struct device *self)
766 : {
767 0 : struct bnx_softc *sc = (struct bnx_softc *)self;
768 0 : struct pci_attach_args *pa = &sc->bnx_pa;
769 : struct ifnet *ifp;
770 : int error, mii_flags = 0;
771 : int fw = BNX_FW_B06;
772 : int rv2p = BNX_RV2P;
773 :
774 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
775 : fw = BNX_FW_B09;
776 0 : if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
777 0 : rv2p = BNX_XI90_RV2P;
778 : else
779 : rv2p = BNX_XI_RV2P;
780 : }
781 :
782 0 : if ((error = bnx_read_firmware(sc, fw)) != 0) {
783 0 : printf("%s: error %d, could not read firmware\n",
784 0 : sc->bnx_dev.dv_xname, error);
785 0 : return;
786 : }
787 :
788 0 : if ((error = bnx_read_rv2p(sc, rv2p)) != 0) {
789 0 : printf("%s: error %d, could not read rv2p\n",
790 0 : sc->bnx_dev.dv_xname, error);
791 0 : return;
792 : }
793 :
794 : /* Reset the controller. */
795 0 : if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
796 : goto bnx_attach_fail;
797 :
798 : /* Initialize the controller. */
799 0 : if (bnx_chipinit(sc)) {
800 0 : printf("%s: Controller initialization failed!\n",
801 0 : sc->bnx_dev.dv_xname);
802 0 : goto bnx_attach_fail;
803 : }
804 :
805 : /* Perform NVRAM test. */
806 0 : if (bnx_nvram_test(sc)) {
807 0 : printf("%s: NVRAM test failed!\n",
808 0 : sc->bnx_dev.dv_xname);
809 0 : goto bnx_attach_fail;
810 : }
811 :
812 : /* Fetch the permanent Ethernet MAC address. */
813 0 : bnx_get_mac_addr(sc);
814 :
815 : /*
816 : * Trip points control how many BDs
817 : * should be ready before generating an
818 : * interrupt while ticks control how long
819 : * a BD can sit in the chain before
820 : * generating an interrupt. Set the default
821 : * values for the RX and TX rings.
822 : */
823 :
824 : #ifdef BNX_DEBUG
825 : /* Force more frequent interrupts. */
826 : sc->bnx_tx_quick_cons_trip_int = 1;
827 : sc->bnx_tx_quick_cons_trip = 1;
828 : sc->bnx_tx_ticks_int = 0;
829 : sc->bnx_tx_ticks = 0;
830 :
831 : sc->bnx_rx_quick_cons_trip_int = 1;
832 : sc->bnx_rx_quick_cons_trip = 1;
833 : sc->bnx_rx_ticks_int = 0;
834 : sc->bnx_rx_ticks = 0;
835 : #else
836 0 : sc->bnx_tx_quick_cons_trip_int = 20;
837 0 : sc->bnx_tx_quick_cons_trip = 20;
838 0 : sc->bnx_tx_ticks_int = 80;
839 0 : sc->bnx_tx_ticks = 80;
840 :
841 0 : sc->bnx_rx_quick_cons_trip_int = 6;
842 0 : sc->bnx_rx_quick_cons_trip = 6;
843 0 : sc->bnx_rx_ticks_int = 18;
844 0 : sc->bnx_rx_ticks = 18;
845 : #endif
846 :
847 : /* Update statistics once every second. */
848 0 : sc->bnx_stats_ticks = 1000000 & 0xffff00;
849 :
850 : /* Find the media type for the adapter. */
851 0 : bnx_get_media(sc);
852 :
853 : /*
854 : * Store config data needed by the PHY driver for
855 : * backplane applications
856 : */
857 0 : sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
858 : BNX_SHARED_HW_CFG_CONFIG);
859 0 : sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
860 : BNX_PORT_HW_CFG_CONFIG);
861 :
862 : /* Allocate DMA memory resources. */
863 0 : sc->bnx_dmatag = pa->pa_dmat;
864 0 : if (bnx_dma_alloc(sc)) {
865 0 : printf("%s: DMA resource allocation failed!\n",
866 0 : sc->bnx_dev.dv_xname);
867 0 : goto bnx_attach_fail;
868 : }
869 :
870 : /* Initialize the ifnet interface. */
871 0 : ifp = &sc->arpcom.ac_if;
872 0 : ifp->if_softc = sc;
873 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
874 0 : ifp->if_xflags = IFXF_MPSAFE;
875 0 : ifp->if_ioctl = bnx_ioctl;
876 0 : ifp->if_qstart = bnx_start;
877 0 : ifp->if_watchdog = bnx_watchdog;
878 0 : IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
879 0 : bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
880 0 : bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
881 :
882 0 : ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
883 : IFCAP_CSUM_UDPv4;
884 :
885 : #if NVLAN > 0
886 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
887 : #endif
888 :
889 0 : sc->mbuf_alloc_size = BNX_MAX_MRU;
890 :
891 0 : printf("%s: address %s\n", sc->bnx_dev.dv_xname,
892 0 : ether_sprintf(sc->arpcom.ac_enaddr));
893 :
894 0 : sc->bnx_mii.mii_ifp = ifp;
895 0 : sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
896 0 : sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
897 0 : sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
898 :
899 : /* Handle any special PHY initialization for SerDes PHYs. */
900 0 : bnx_init_media(sc);
901 :
902 : /* Look for our PHY. */
903 0 : ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
904 : bnx_ifmedia_sts);
905 : mii_flags |= MIIF_DOPAUSE;
906 0 : if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
907 0 : mii_flags |= MIIF_HAVEFIBER;
908 0 : mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
909 0 : sc->bnx_phy_addr, MII_OFFSET_ANY, mii_flags);
910 :
911 0 : if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
912 0 : printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
913 0 : ifmedia_add(&sc->bnx_mii.mii_media,
914 : IFM_ETHER|IFM_MANUAL, 0, NULL);
915 0 : ifmedia_set(&sc->bnx_mii.mii_media,
916 : IFM_ETHER|IFM_MANUAL);
917 0 : } else {
918 0 : ifmedia_set(&sc->bnx_mii.mii_media,
919 : IFM_ETHER|IFM_AUTO);
920 : }
921 :
922 : /* Attach to the Ethernet interface list. */
923 0 : if_attach(ifp);
924 0 : ether_ifattach(ifp);
925 :
926 0 : timeout_set(&sc->bnx_timeout, bnx_tick, sc);
927 0 : timeout_set(&sc->bnx_rxrefill, bnx_rxrefill, sc);
928 :
929 : /* Print some important debugging info. */
930 : DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
931 :
932 : /* Get the firmware running so ASF still works. */
933 0 : bnx_mgmt_init(sc);
934 :
935 : /* Handle interrupts */
936 0 : sc->bnx_flags |= BNX_ACTIVE_FLAG;
937 :
938 0 : goto bnx_attach_exit;
939 :
940 : bnx_attach_fail:
941 0 : bnx_release_resources(sc);
942 :
943 : bnx_attach_exit:
944 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
945 0 : }
946 :
947 : /****************************************************************************/
948 : /* Device detach function. */
949 : /* */
950 : /* Stops the controller, resets the controller, and releases resources. */
951 : /* */
952 : /* Returns: */
953 : /* 0 on success, positive value on failure. */
954 : /****************************************************************************/
955 : #if 0
956 : void
957 : bnx_detach(void *xsc)
958 : {
959 : struct bnx_softc *sc;
960 : struct ifnet *ifp = &sc->arpcom.ac_if;
961 :
962 : sc = device_get_softc(dev);
963 :
964 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
965 :
966 : /* Stop and reset the controller. */
967 : bnx_stop(sc);
968 : bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
969 :
970 : ether_ifdetach(ifp);
971 :
972 : /* If we have a child device on the MII bus remove it too. */
973 : bus_generic_detach(dev);
974 : device_delete_child(dev, sc->bnx_mii);
975 :
976 : /* Release all remaining resources. */
977 : bnx_release_resources(sc);
978 :
979 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
980 :
981 : return(0);
982 : }
983 : #endif
984 :
985 : /****************************************************************************/
986 : /* Indirect register read. */
987 : /* */
988 : /* Reads NetXtreme II registers using an index/data register pair in PCI */
989 : /* configuration space. Using this mechanism avoids issues with posted */
990 : /* reads but is much slower than memory-mapped I/O. */
991 : /* */
992 : /* Returns: */
993 : /* The value of the register. */
994 : /****************************************************************************/
995 : u_int32_t
996 0 : bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
997 : {
998 0 : struct pci_attach_args *pa = &(sc->bnx_pa);
999 :
1000 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1001 : offset);
1002 : #ifdef BNX_DEBUG
1003 : {
1004 : u_int32_t val;
1005 : val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1006 : BNX_PCICFG_REG_WINDOW);
1007 : DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1008 : "val = 0x%08X\n", __FUNCTION__, offset, val);
1009 : return (val);
1010 : }
1011 : #else
1012 0 : return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1013 : #endif
1014 : }
1015 :
1016 : /****************************************************************************/
1017 : /* Indirect register write. */
1018 : /* */
1019 : /* Writes NetXtreme II registers using an index/data register pair in PCI */
1020 : /* configuration space. Using this mechanism avoids issues with posted */
1021 : /* writes but is muchh slower than memory-mapped I/O. */
1022 : /* */
1023 : /* Returns: */
1024 : /* Nothing. */
1025 : /****************************************************************************/
1026 : void
1027 0 : bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1028 : {
1029 0 : struct pci_attach_args *pa = &(sc->bnx_pa);
1030 :
1031 : DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1032 : __FUNCTION__, offset, val);
1033 :
1034 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1035 : offset);
1036 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1037 0 : }
1038 :
1039 : /****************************************************************************/
1040 : /* Context memory write. */
1041 : /* */
1042 : /* The NetXtreme II controller uses context memory to track connection */
1043 : /* information for L2 and higher network protocols. */
1044 : /* */
1045 : /* Returns: */
1046 : /* Nothing. */
1047 : /****************************************************************************/
1048 : void
1049 0 : bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset,
1050 : u_int32_t ctx_val)
1051 : {
1052 0 : u_int32_t idx, offset = ctx_offset + cid_addr;
1053 : u_int32_t val, retry_cnt = 5;
1054 :
1055 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1056 0 : REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
1057 0 : REG_WR(sc, BNX_CTX_CTX_CTRL,
1058 : (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
1059 :
1060 0 : for (idx = 0; idx < retry_cnt; idx++) {
1061 0 : val = REG_RD(sc, BNX_CTX_CTX_CTRL);
1062 0 : if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
1063 : break;
1064 0 : DELAY(5);
1065 : }
1066 :
1067 : #if 0
1068 : if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
1069 : BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1070 : "cid_addr = 0x%08X, offset = 0x%08X!\n",
1071 : __FILE__, __LINE__, cid_addr, ctx_offset);
1072 : #endif
1073 :
1074 : } else {
1075 0 : REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1076 0 : REG_WR(sc, BNX_CTX_DATA, ctx_val);
1077 : }
1078 0 : }
1079 :
1080 : /****************************************************************************/
1081 : /* PHY register read. */
1082 : /* */
1083 : /* Implements register reads on the MII bus. */
1084 : /* */
1085 : /* Returns: */
1086 : /* The value of the register. */
1087 : /****************************************************************************/
1088 : int
1089 0 : bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1090 : {
1091 0 : struct bnx_softc *sc = (struct bnx_softc *)dev;
1092 : u_int32_t val;
1093 : int i;
1094 :
1095 : /*
1096 : * The BCM5709S PHY is an IEEE Clause 45 PHY
1097 : * with special mappings to work with IEEE
1098 : * Clause 22 register accesses.
1099 : */
1100 0 : if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1101 0 : if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1102 0 : reg += 0x10;
1103 : }
1104 :
1105 0 : if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1106 0 : val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1107 0 : val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1108 :
1109 0 : REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1110 0 : REG_RD(sc, BNX_EMAC_MDIO_MODE);
1111 :
1112 0 : DELAY(40);
1113 0 : }
1114 :
1115 0 : val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1116 0 : BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1117 : BNX_EMAC_MDIO_COMM_START_BUSY;
1118 0 : REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1119 :
1120 0 : for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1121 0 : DELAY(10);
1122 :
1123 0 : val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1124 0 : if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1125 0 : DELAY(5);
1126 :
1127 0 : val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1128 0 : val &= BNX_EMAC_MDIO_COMM_DATA;
1129 :
1130 0 : break;
1131 : }
1132 : }
1133 :
1134 0 : if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1135 0 : BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1136 : "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1137 : val = 0x0;
1138 0 : } else
1139 0 : val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1140 :
1141 : DBPRINT(sc, BNX_EXCESSIVE,
1142 : "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1143 : (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1144 :
1145 0 : if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1146 0 : val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1147 0 : val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1148 :
1149 0 : REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1150 0 : REG_RD(sc, BNX_EMAC_MDIO_MODE);
1151 :
1152 0 : DELAY(40);
1153 0 : }
1154 :
1155 0 : return (val & 0xffff);
1156 : }
1157 :
1158 : /****************************************************************************/
1159 : /* PHY register write. */
1160 : /* */
1161 : /* Implements register writes on the MII bus. */
1162 : /* */
1163 : /* Returns: */
1164 : /* The value of the register. */
1165 : /****************************************************************************/
1166 : void
1167 0 : bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1168 : {
1169 0 : struct bnx_softc *sc = (struct bnx_softc *)dev;
1170 : u_int32_t val1;
1171 : int i;
1172 :
1173 : DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1174 : "val = 0x%04X\n", __FUNCTION__,
1175 : phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1176 :
1177 : /*
1178 : * The BCM5709S PHY is an IEEE Clause 45 PHY
1179 : * with special mappings to work with IEEE
1180 : * Clause 22 register accesses.
1181 : */
1182 0 : if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1183 0 : if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1184 0 : reg += 0x10;
1185 : }
1186 :
1187 0 : if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1188 0 : val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1189 0 : val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1190 :
1191 0 : REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1192 0 : REG_RD(sc, BNX_EMAC_MDIO_MODE);
1193 :
1194 0 : DELAY(40);
1195 0 : }
1196 :
1197 0 : val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1198 0 : BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1199 0 : BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1200 0 : REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1201 :
1202 0 : for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1203 0 : DELAY(10);
1204 :
1205 0 : val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1206 0 : if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1207 0 : DELAY(5);
1208 0 : break;
1209 : }
1210 : }
1211 :
1212 0 : if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1213 0 : BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1214 : __LINE__);
1215 0 : }
1216 :
1217 0 : if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1218 0 : val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1219 0 : val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1220 :
1221 0 : REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1222 0 : REG_RD(sc, BNX_EMAC_MDIO_MODE);
1223 :
1224 0 : DELAY(40);
1225 0 : }
1226 0 : }
1227 :
1228 : /****************************************************************************/
1229 : /* MII bus status change. */
1230 : /* */
1231 : /* Called by the MII bus driver when the PHY establishes link to set the */
1232 : /* MAC interface registers. */
1233 : /* */
1234 : /* Returns: */
1235 : /* Nothing. */
1236 : /****************************************************************************/
1237 : void
1238 0 : bnx_miibus_statchg(struct device *dev)
1239 : {
1240 0 : struct bnx_softc *sc = (struct bnx_softc *)dev;
1241 0 : struct mii_data *mii = &sc->bnx_mii;
1242 0 : u_int32_t rx_mode = sc->rx_mode;
1243 : int val;
1244 :
1245 0 : val = REG_RD(sc, BNX_EMAC_MODE);
1246 0 : val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1247 : BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1248 : BNX_EMAC_MODE_25G);
1249 :
1250 : /*
1251 : * Get flow control negotiation result.
1252 : */
1253 0 : if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1254 0 : (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) {
1255 0 : sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1256 0 : mii->mii_media_active &= ~IFM_ETH_FMASK;
1257 0 : }
1258 :
1259 : /* Set MII or GMII interface based on the speed
1260 : * negotiated by the PHY.
1261 : */
1262 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
1263 : case IFM_10_T:
1264 0 : if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1265 : DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1266 0 : val |= BNX_EMAC_MODE_PORT_MII_10;
1267 0 : break;
1268 : }
1269 : /* FALLTHROUGH */
1270 : case IFM_100_TX:
1271 : DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1272 0 : val |= BNX_EMAC_MODE_PORT_MII;
1273 0 : break;
1274 : case IFM_2500_SX:
1275 : DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1276 0 : val |= BNX_EMAC_MODE_25G;
1277 : /* FALLTHROUGH */
1278 : case IFM_1000_T:
1279 : case IFM_1000_SX:
1280 : DBPRINT(sc, BNX_INFO, "Enablinb GMII interface.\n");
1281 0 : val |= BNX_EMAC_MODE_PORT_GMII;
1282 0 : break;
1283 : default:
1284 0 : val |= BNX_EMAC_MODE_PORT_GMII;
1285 0 : break;
1286 : }
1287 :
1288 : /* Set half or full duplex based on the duplicity
1289 : * negotiated by the PHY.
1290 : */
1291 0 : if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1292 : DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1293 0 : val |= BNX_EMAC_MODE_HALF_DUPLEX;
1294 0 : } else
1295 : DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1296 :
1297 0 : REG_WR(sc, BNX_EMAC_MODE, val);
1298 :
1299 : /*
1300 : * 802.3x flow control
1301 : */
1302 0 : if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) {
1303 : DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n");
1304 0 : rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN;
1305 0 : } else {
1306 : DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n");
1307 0 : rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN;
1308 : }
1309 :
1310 0 : if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) {
1311 : DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n");
1312 0 : BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1313 0 : } else {
1314 : DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n");
1315 0 : BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1316 : }
1317 :
1318 : /* Only make changes if the recive mode has actually changed. */
1319 0 : if (rx_mode != sc->rx_mode) {
1320 : DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
1321 : rx_mode);
1322 :
1323 0 : sc->rx_mode = rx_mode;
1324 0 : REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
1325 0 : }
1326 0 : }
1327 :
1328 : /****************************************************************************/
1329 : /* Acquire NVRAM lock. */
1330 : /* */
1331 : /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1332 : /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1333 : /* for use by the driver. */
1334 : /* */
1335 : /* Returns: */
1336 : /* 0 on success, positive value on failure. */
1337 : /****************************************************************************/
1338 : int
1339 0 : bnx_acquire_nvram_lock(struct bnx_softc *sc)
1340 : {
1341 : u_int32_t val;
1342 : int j;
1343 :
1344 : DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1345 :
1346 : /* Request access to the flash interface. */
1347 0 : REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1348 0 : for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1349 0 : val = REG_RD(sc, BNX_NVM_SW_ARB);
1350 0 : if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1351 : break;
1352 :
1353 0 : DELAY(5);
1354 : }
1355 :
1356 0 : if (j >= NVRAM_TIMEOUT_COUNT) {
1357 : DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1358 0 : return (EBUSY);
1359 : }
1360 :
1361 0 : return (0);
1362 0 : }
1363 :
1364 : /****************************************************************************/
1365 : /* Release NVRAM lock. */
1366 : /* */
1367 : /* When the caller is finished accessing NVRAM the lock must be released. */
1368 : /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1369 : /* for use by the driver. */
1370 : /* */
1371 : /* Returns: */
1372 : /* 0 on success, positive value on failure. */
1373 : /****************************************************************************/
1374 : int
1375 0 : bnx_release_nvram_lock(struct bnx_softc *sc)
1376 : {
1377 : int j;
1378 : u_int32_t val;
1379 :
1380 : DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1381 :
1382 : /* Relinquish nvram interface. */
1383 0 : REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1384 :
1385 0 : for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1386 0 : val = REG_RD(sc, BNX_NVM_SW_ARB);
1387 0 : if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1388 : break;
1389 :
1390 0 : DELAY(5);
1391 : }
1392 :
1393 0 : if (j >= NVRAM_TIMEOUT_COUNT) {
1394 : DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1395 0 : return (EBUSY);
1396 : }
1397 :
1398 0 : return (0);
1399 0 : }
1400 :
1401 : #ifdef BNX_NVRAM_WRITE_SUPPORT
1402 : /****************************************************************************/
1403 : /* Enable NVRAM write access. */
1404 : /* */
1405 : /* Before writing to NVRAM the caller must enable NVRAM writes. */
1406 : /* */
1407 : /* Returns: */
1408 : /* 0 on success, positive value on failure. */
1409 : /****************************************************************************/
1410 : int
1411 : bnx_enable_nvram_write(struct bnx_softc *sc)
1412 : {
1413 : u_int32_t val;
1414 :
1415 : DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1416 :
1417 : val = REG_RD(sc, BNX_MISC_CFG);
1418 : REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1419 :
1420 : if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1421 : int j;
1422 :
1423 : REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1424 : REG_WR(sc, BNX_NVM_COMMAND,
1425 : BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1426 :
1427 : for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1428 : DELAY(5);
1429 :
1430 : val = REG_RD(sc, BNX_NVM_COMMAND);
1431 : if (val & BNX_NVM_COMMAND_DONE)
1432 : break;
1433 : }
1434 :
1435 : if (j >= NVRAM_TIMEOUT_COUNT) {
1436 : DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1437 : return (EBUSY);
1438 : }
1439 : }
1440 :
1441 : return (0);
1442 : }
1443 :
1444 : /****************************************************************************/
1445 : /* Disable NVRAM write access. */
1446 : /* */
1447 : /* When the caller is finished writing to NVRAM write access must be */
1448 : /* disabled. */
1449 : /* */
1450 : /* Returns: */
1451 : /* Nothing. */
1452 : /****************************************************************************/
1453 : void
1454 : bnx_disable_nvram_write(struct bnx_softc *sc)
1455 : {
1456 : u_int32_t val;
1457 :
1458 : DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n");
1459 :
1460 : val = REG_RD(sc, BNX_MISC_CFG);
1461 : REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1462 : }
1463 : #endif
1464 :
1465 : /****************************************************************************/
1466 : /* Enable NVRAM access. */
1467 : /* */
1468 : /* Before accessing NVRAM for read or write operations the caller must */
1469 : /* enabled NVRAM access. */
1470 : /* */
1471 : /* Returns: */
1472 : /* Nothing. */
1473 : /****************************************************************************/
1474 : void
1475 0 : bnx_enable_nvram_access(struct bnx_softc *sc)
1476 : {
1477 : u_int32_t val;
1478 :
1479 : DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1480 :
1481 0 : val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1482 : /* Enable both bits, even on read. */
1483 0 : REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1484 : val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1485 0 : }
1486 :
1487 : /****************************************************************************/
1488 : /* Disable NVRAM access. */
1489 : /* */
1490 : /* When the caller is finished accessing NVRAM access must be disabled. */
1491 : /* */
1492 : /* Returns: */
1493 : /* Nothing. */
1494 : /****************************************************************************/
1495 : void
1496 0 : bnx_disable_nvram_access(struct bnx_softc *sc)
1497 : {
1498 : u_int32_t val;
1499 :
1500 : DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1501 :
1502 0 : val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1503 :
1504 : /* Disable both bits, even after read. */
1505 0 : REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1506 : val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1507 0 : }
1508 :
1509 : #ifdef BNX_NVRAM_WRITE_SUPPORT
1510 : /****************************************************************************/
1511 : /* Erase NVRAM page before writing. */
1512 : /* */
1513 : /* Non-buffered flash parts require that a page be erased before it is */
1514 : /* written. */
1515 : /* */
1516 : /* Returns: */
1517 : /* 0 on success, positive value on failure. */
1518 : /****************************************************************************/
1519 : int
1520 : bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1521 : {
1522 : u_int32_t cmd;
1523 : int j;
1524 :
1525 : /* Buffered flash doesn't require an erase. */
1526 : if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1527 : return (0);
1528 :
1529 : DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1530 :
1531 : /* Build an erase command. */
1532 : cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1533 : BNX_NVM_COMMAND_DOIT;
1534 :
1535 : /*
1536 : * Clear the DONE bit separately, set the NVRAM address to erase,
1537 : * and issue the erase command.
1538 : */
1539 : REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1540 : REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1541 : REG_WR(sc, BNX_NVM_COMMAND, cmd);
1542 :
1543 : /* Wait for completion. */
1544 : for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1545 : u_int32_t val;
1546 :
1547 : DELAY(5);
1548 :
1549 : val = REG_RD(sc, BNX_NVM_COMMAND);
1550 : if (val & BNX_NVM_COMMAND_DONE)
1551 : break;
1552 : }
1553 :
1554 : if (j >= NVRAM_TIMEOUT_COUNT) {
1555 : DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1556 : return (EBUSY);
1557 : }
1558 :
1559 : return (0);
1560 : }
1561 : #endif /* BNX_NVRAM_WRITE_SUPPORT */
1562 :
1563 : /****************************************************************************/
1564 : /* Read a dword (32 bits) from NVRAM. */
1565 : /* */
1566 : /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1567 : /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1568 : /* */
1569 : /* Returns: */
1570 : /* 0 on success and the 32 bit value read, positive value on failure. */
1571 : /****************************************************************************/
1572 : int
1573 0 : bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1574 : u_int8_t *ret_val, u_int32_t cmd_flags)
1575 : {
1576 : u_int32_t cmd;
1577 : int i, rc = 0;
1578 :
1579 : /* Build the command word. */
1580 0 : cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1581 :
1582 : /* Calculate the offset for buffered flash if translation is used. */
1583 0 : if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1584 0 : offset = ((offset / sc->bnx_flash_info->page_size) <<
1585 0 : sc->bnx_flash_info->page_bits) +
1586 0 : (offset % sc->bnx_flash_info->page_size);
1587 0 : }
1588 :
1589 : /*
1590 : * Clear the DONE bit separately, set the address to read,
1591 : * and issue the read.
1592 : */
1593 0 : REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1594 0 : REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1595 0 : REG_WR(sc, BNX_NVM_COMMAND, cmd);
1596 :
1597 : /* Wait for completion. */
1598 0 : for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1599 : u_int32_t val;
1600 :
1601 0 : DELAY(5);
1602 :
1603 0 : val = REG_RD(sc, BNX_NVM_COMMAND);
1604 0 : if (val & BNX_NVM_COMMAND_DONE) {
1605 0 : val = REG_RD(sc, BNX_NVM_READ);
1606 :
1607 0 : val = bnx_be32toh(val);
1608 0 : memcpy(ret_val, &val, 4);
1609 0 : break;
1610 : }
1611 0 : }
1612 :
1613 : /* Check for errors. */
1614 0 : if (i >= NVRAM_TIMEOUT_COUNT) {
1615 0 : BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1616 : "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1617 : rc = EBUSY;
1618 0 : }
1619 :
1620 0 : return(rc);
1621 : }
1622 :
1623 : #ifdef BNX_NVRAM_WRITE_SUPPORT
1624 : /****************************************************************************/
1625 : /* Write a dword (32 bits) to NVRAM. */
1626 : /* */
1627 : /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1628 : /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1629 : /* enabled NVRAM write access. */
1630 : /* */
1631 : /* Returns: */
1632 : /* 0 on success, positive value on failure. */
1633 : /****************************************************************************/
1634 : int
1635 : bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1636 : u_int32_t cmd_flags)
1637 : {
1638 : u_int32_t cmd, val32;
1639 : int j;
1640 :
1641 : /* Build the command word. */
1642 : cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1643 :
1644 : /* Calculate the offset for buffered flash if translation is used. */
1645 : if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1646 : offset = ((offset / sc->bnx_flash_info->page_size) <<
1647 : sc->bnx_flash_info->page_bits) +
1648 : (offset % sc->bnx_flash_info->page_size);
1649 : }
1650 :
1651 : /*
1652 : * Clear the DONE bit separately, convert NVRAM data to big-endian,
1653 : * set the NVRAM address to write, and issue the write command
1654 : */
1655 : REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1656 : memcpy(&val32, val, 4);
1657 : val32 = htobe32(val32);
1658 : REG_WR(sc, BNX_NVM_WRITE, val32);
1659 : REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1660 : REG_WR(sc, BNX_NVM_COMMAND, cmd);
1661 :
1662 : /* Wait for completion. */
1663 : for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1664 : DELAY(5);
1665 :
1666 : if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1667 : break;
1668 : }
1669 : if (j >= NVRAM_TIMEOUT_COUNT) {
1670 : BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1671 : "offset 0x%08X\n", __FILE__, __LINE__, offset);
1672 : return (EBUSY);
1673 : }
1674 :
1675 : return (0);
1676 : }
1677 : #endif /* BNX_NVRAM_WRITE_SUPPORT */
1678 :
1679 : /****************************************************************************/
1680 : /* Initialize NVRAM access. */
1681 : /* */
1682 : /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1683 : /* access that device. */
1684 : /* */
1685 : /* Returns: */
1686 : /* 0 on success, positive value on failure. */
1687 : /****************************************************************************/
1688 : int
1689 0 : bnx_init_nvram(struct bnx_softc *sc)
1690 : {
1691 : u_int32_t val;
1692 : int j, entry_count, rc = 0;
1693 : struct flash_spec *flash;
1694 :
1695 : DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1696 :
1697 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1698 0 : sc->bnx_flash_info = &flash_5709;
1699 0 : goto bnx_init_nvram_get_flash_size;
1700 : }
1701 :
1702 : /* Determine the selected interface. */
1703 0 : val = REG_RD(sc, BNX_NVM_CFG1);
1704 :
1705 : entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1706 :
1707 : /*
1708 : * Flash reconfiguration is required to support additional
1709 : * NVRAM devices not directly supported in hardware.
1710 : * Check if the flash interface was reconfigured
1711 : * by the bootcode.
1712 : */
1713 :
1714 0 : if (val & 0x40000000) {
1715 : /* Flash interface reconfigured by bootcode. */
1716 :
1717 : DBPRINT(sc,BNX_INFO_LOAD,
1718 : "bnx_init_nvram(): Flash WAS reconfigured.\n");
1719 :
1720 0 : for (j = 0, flash = &flash_table[0]; j < entry_count;
1721 0 : j++, flash++) {
1722 0 : if ((val & FLASH_BACKUP_STRAP_MASK) ==
1723 0 : (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1724 0 : sc->bnx_flash_info = flash;
1725 0 : break;
1726 : }
1727 : }
1728 : } else {
1729 : /* Flash interface not yet reconfigured. */
1730 : u_int32_t mask;
1731 :
1732 : DBPRINT(sc,BNX_INFO_LOAD,
1733 : "bnx_init_nvram(): Flash was NOT reconfigured.\n");
1734 :
1735 0 : if (val & (1 << 23))
1736 0 : mask = FLASH_BACKUP_STRAP_MASK;
1737 : else
1738 : mask = FLASH_STRAP_MASK;
1739 :
1740 : /* Look for the matching NVRAM device configuration data. */
1741 0 : for (j = 0, flash = &flash_table[0]; j < entry_count;
1742 0 : j++, flash++) {
1743 : /* Check if the dev matches any of the known devices. */
1744 0 : if ((val & mask) == (flash->strapping & mask)) {
1745 : /* Found a device match. */
1746 0 : sc->bnx_flash_info = flash;
1747 :
1748 : /* Request access to the flash interface. */
1749 0 : if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1750 0 : return (rc);
1751 :
1752 : /* Reconfigure the flash interface. */
1753 0 : bnx_enable_nvram_access(sc);
1754 0 : REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1755 0 : REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1756 0 : REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1757 0 : REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1758 0 : bnx_disable_nvram_access(sc);
1759 0 : bnx_release_nvram_lock(sc);
1760 :
1761 0 : break;
1762 : }
1763 : }
1764 0 : }
1765 :
1766 : /* Check if a matching device was found. */
1767 0 : if (j == entry_count) {
1768 0 : sc->bnx_flash_info = NULL;
1769 0 : BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1770 : __FILE__, __LINE__);
1771 : rc = ENODEV;
1772 0 : }
1773 :
1774 : bnx_init_nvram_get_flash_size:
1775 : /* Write the flash config data to the shared memory interface. */
1776 0 : val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1777 0 : val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1778 0 : if (val)
1779 0 : sc->bnx_flash_size = val;
1780 : else
1781 0 : sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1782 :
1783 : DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1784 : "0x%08X\n", sc->bnx_flash_info->total_size);
1785 :
1786 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1787 :
1788 0 : return (rc);
1789 0 : }
1790 :
1791 : /****************************************************************************/
1792 : /* Read an arbitrary range of data from NVRAM. */
1793 : /* */
1794 : /* Prepares the NVRAM interface for access and reads the requested data */
1795 : /* into the supplied buffer. */
1796 : /* */
1797 : /* Returns: */
1798 : /* 0 on success and the data read, positive value on failure. */
1799 : /****************************************************************************/
1800 : int
1801 0 : bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1802 : int buf_size)
1803 : {
1804 : int rc = 0;
1805 : u_int32_t cmd_flags, offset32, len32, extra;
1806 :
1807 0 : if (buf_size == 0)
1808 0 : return (0);
1809 :
1810 : /* Request access to the flash interface. */
1811 0 : if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1812 0 : return (rc);
1813 :
1814 : /* Enable access to flash interface */
1815 0 : bnx_enable_nvram_access(sc);
1816 :
1817 : len32 = buf_size;
1818 : offset32 = offset;
1819 : extra = 0;
1820 :
1821 : cmd_flags = 0;
1822 :
1823 0 : if (offset32 & 3) {
1824 0 : u_int8_t buf[4];
1825 : u_int32_t pre_len;
1826 :
1827 0 : offset32 &= ~3;
1828 0 : pre_len = 4 - (offset & 3);
1829 :
1830 0 : if (pre_len >= len32) {
1831 : pre_len = len32;
1832 : cmd_flags =
1833 : BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1834 0 : } else
1835 : cmd_flags = BNX_NVM_COMMAND_FIRST;
1836 :
1837 0 : rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1838 :
1839 0 : if (rc)
1840 0 : return (rc);
1841 :
1842 0 : memcpy(ret_buf, buf + (offset & 3), pre_len);
1843 :
1844 0 : offset32 += 4;
1845 0 : ret_buf += pre_len;
1846 0 : len32 -= pre_len;
1847 0 : }
1848 :
1849 0 : if (len32 & 3) {
1850 0 : extra = 4 - (len32 & 3);
1851 0 : len32 = (len32 + 4) & ~3;
1852 0 : }
1853 :
1854 0 : if (len32 == 4) {
1855 0 : u_int8_t buf[4];
1856 :
1857 0 : if (cmd_flags)
1858 0 : cmd_flags = BNX_NVM_COMMAND_LAST;
1859 : else
1860 : cmd_flags =
1861 : BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1862 :
1863 0 : rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1864 :
1865 0 : memcpy(ret_buf, buf, 4 - extra);
1866 0 : } else if (len32 > 0) {
1867 0 : u_int8_t buf[4];
1868 :
1869 : /* Read the first word. */
1870 0 : if (cmd_flags)
1871 0 : cmd_flags = 0;
1872 : else
1873 : cmd_flags = BNX_NVM_COMMAND_FIRST;
1874 :
1875 0 : rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1876 :
1877 : /* Advance to the next dword. */
1878 0 : offset32 += 4;
1879 0 : ret_buf += 4;
1880 0 : len32 -= 4;
1881 :
1882 0 : while (len32 > 4 && rc == 0) {
1883 0 : rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1884 :
1885 : /* Advance to the next dword. */
1886 0 : offset32 += 4;
1887 0 : ret_buf += 4;
1888 0 : len32 -= 4;
1889 : }
1890 :
1891 0 : if (rc)
1892 0 : return (rc);
1893 :
1894 : cmd_flags = BNX_NVM_COMMAND_LAST;
1895 0 : rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1896 :
1897 0 : memcpy(ret_buf, buf, 4 - extra);
1898 0 : }
1899 :
1900 : /* Disable access to flash interface and release the lock. */
1901 0 : bnx_disable_nvram_access(sc);
1902 0 : bnx_release_nvram_lock(sc);
1903 :
1904 0 : return (rc);
1905 0 : }
1906 :
1907 : #ifdef BNX_NVRAM_WRITE_SUPPORT
1908 : /****************************************************************************/
1909 : /* Write an arbitrary range of data from NVRAM. */
1910 : /* */
1911 : /* Prepares the NVRAM interface for write access and writes the requested */
1912 : /* data from the supplied buffer. The caller is responsible for */
1913 : /* calculating any appropriate CRCs. */
1914 : /* */
1915 : /* Returns: */
1916 : /* 0 on success, positive value on failure. */
1917 : /****************************************************************************/
1918 : int
1919 : bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1920 : int buf_size)
1921 : {
1922 : u_int32_t written, offset32, len32;
1923 : u_int8_t *buf, start[4], end[4];
1924 : int rc = 0;
1925 : int align_start, align_end;
1926 :
1927 : buf = data_buf;
1928 : offset32 = offset;
1929 : len32 = buf_size;
1930 : align_start = align_end = 0;
1931 :
1932 : if ((align_start = (offset32 & 3))) {
1933 : offset32 &= ~3;
1934 : len32 += align_start;
1935 : if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1936 : return (rc);
1937 : }
1938 :
1939 : if (len32 & 3) {
1940 : if ((len32 > 4) || !align_start) {
1941 : align_end = 4 - (len32 & 3);
1942 : len32 += align_end;
1943 : if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1944 : end, 4))) {
1945 : return (rc);
1946 : }
1947 : }
1948 : }
1949 :
1950 : if (align_start || align_end) {
1951 : buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1952 : if (buf == 0)
1953 : return (ENOMEM);
1954 :
1955 : if (align_start)
1956 : memcpy(buf, start, 4);
1957 :
1958 : if (align_end)
1959 : memcpy(buf + len32 - 4, end, 4);
1960 :
1961 : memcpy(buf + align_start, data_buf, buf_size);
1962 : }
1963 :
1964 : written = 0;
1965 : while ((written < len32) && (rc == 0)) {
1966 : u_int32_t page_start, page_end, data_start, data_end;
1967 : u_int32_t addr, cmd_flags;
1968 : int i;
1969 : u_int8_t flash_buffer[264];
1970 :
1971 : /* Find the page_start addr */
1972 : page_start = offset32 + written;
1973 : page_start -= (page_start % sc->bnx_flash_info->page_size);
1974 : /* Find the page_end addr */
1975 : page_end = page_start + sc->bnx_flash_info->page_size;
1976 : /* Find the data_start addr */
1977 : data_start = (written == 0) ? offset32 : page_start;
1978 : /* Find the data_end addr */
1979 : data_end = (page_end > offset32 + len32) ?
1980 : (offset32 + len32) : page_end;
1981 :
1982 : /* Request access to the flash interface. */
1983 : if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1984 : goto nvram_write_end;
1985 :
1986 : /* Enable access to flash interface */
1987 : bnx_enable_nvram_access(sc);
1988 :
1989 : cmd_flags = BNX_NVM_COMMAND_FIRST;
1990 : if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1991 : int j;
1992 :
1993 : /* Read the whole page into the buffer
1994 : * (non-buffer flash only) */
1995 : for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1996 : if (j == (sc->bnx_flash_info->page_size - 4))
1997 : cmd_flags |= BNX_NVM_COMMAND_LAST;
1998 :
1999 : rc = bnx_nvram_read_dword(sc,
2000 : page_start + j,
2001 : &flash_buffer[j],
2002 : cmd_flags);
2003 :
2004 : if (rc)
2005 : goto nvram_write_end;
2006 :
2007 : cmd_flags = 0;
2008 : }
2009 : }
2010 :
2011 : /* Enable writes to flash interface (unlock write-protect) */
2012 : if ((rc = bnx_enable_nvram_write(sc)) != 0)
2013 : goto nvram_write_end;
2014 :
2015 : /* Erase the page */
2016 : if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
2017 : goto nvram_write_end;
2018 :
2019 : /* Re-enable the write again for the actual write */
2020 : bnx_enable_nvram_write(sc);
2021 :
2022 : /* Loop to write back the buffer data from page_start to
2023 : * data_start */
2024 : i = 0;
2025 : if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2026 : for (addr = page_start; addr < data_start;
2027 : addr += 4, i += 4) {
2028 :
2029 : rc = bnx_nvram_write_dword(sc, addr,
2030 : &flash_buffer[i], cmd_flags);
2031 :
2032 : if (rc != 0)
2033 : goto nvram_write_end;
2034 :
2035 : cmd_flags = 0;
2036 : }
2037 : }
2038 :
2039 : /* Loop to write the new data from data_start to data_end */
2040 : for (addr = data_start; addr < data_end; addr += 4, i++) {
2041 : if ((addr == page_end - 4) ||
2042 : (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
2043 : && (addr == data_end - 4))) {
2044 :
2045 : cmd_flags |= BNX_NVM_COMMAND_LAST;
2046 : }
2047 :
2048 : rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
2049 :
2050 : if (rc != 0)
2051 : goto nvram_write_end;
2052 :
2053 : cmd_flags = 0;
2054 : buf += 4;
2055 : }
2056 :
2057 : /* Loop to write back the buffer data from data_end
2058 : * to page_end */
2059 : if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2060 : for (addr = data_end; addr < page_end;
2061 : addr += 4, i += 4) {
2062 :
2063 : if (addr == page_end-4)
2064 : cmd_flags = BNX_NVM_COMMAND_LAST;
2065 :
2066 : rc = bnx_nvram_write_dword(sc, addr,
2067 : &flash_buffer[i], cmd_flags);
2068 :
2069 : if (rc != 0)
2070 : goto nvram_write_end;
2071 :
2072 : cmd_flags = 0;
2073 : }
2074 : }
2075 :
2076 : /* Disable writes to flash interface (lock write-protect) */
2077 : bnx_disable_nvram_write(sc);
2078 :
2079 : /* Disable access to flash interface */
2080 : bnx_disable_nvram_access(sc);
2081 : bnx_release_nvram_lock(sc);
2082 :
2083 : /* Increment written */
2084 : written += data_end - data_start;
2085 : }
2086 :
2087 : nvram_write_end:
2088 : if (align_start || align_end)
2089 : free(buf, M_DEVBUF, len32);
2090 :
2091 : return (rc);
2092 : }
2093 : #endif /* BNX_NVRAM_WRITE_SUPPORT */
2094 :
2095 : /****************************************************************************/
2096 : /* Verifies that NVRAM is accessible and contains valid data. */
2097 : /* */
2098 : /* Reads the configuration data from NVRAM and verifies that the CRC is */
2099 : /* correct. */
2100 : /* */
2101 : /* Returns: */
2102 : /* 0 on success, positive value on failure. */
2103 : /****************************************************************************/
2104 : int
2105 0 : bnx_nvram_test(struct bnx_softc *sc)
2106 : {
2107 0 : u_int32_t buf[BNX_NVRAM_SIZE / 4];
2108 0 : u_int8_t *data = (u_int8_t *) buf;
2109 : int rc = 0;
2110 : u_int32_t magic, csum;
2111 :
2112 : /*
2113 : * Check that the device NVRAM is valid by reading
2114 : * the magic value at offset 0.
2115 : */
2116 0 : if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2117 : goto bnx_nvram_test_done;
2118 :
2119 0 : magic = bnx_be32toh(buf[0]);
2120 0 : if (magic != BNX_NVRAM_MAGIC) {
2121 : rc = ENODEV;
2122 0 : BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2123 : "Expected: 0x%08X, Found: 0x%08X\n",
2124 : __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2125 0 : goto bnx_nvram_test_done;
2126 : }
2127 :
2128 : /*
2129 : * Verify that the device NVRAM includes valid
2130 : * configuration data.
2131 : */
2132 0 : if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2133 : goto bnx_nvram_test_done;
2134 :
2135 0 : csum = ether_crc32_le(data, 0x100);
2136 0 : if (csum != BNX_CRC32_RESIDUAL) {
2137 : rc = ENODEV;
2138 0 : BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2139 : "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2140 : __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2141 0 : goto bnx_nvram_test_done;
2142 : }
2143 :
2144 0 : csum = ether_crc32_le(data + 0x100, 0x100);
2145 0 : if (csum != BNX_CRC32_RESIDUAL) {
2146 0 : BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2147 : "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2148 : __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2149 : rc = ENODEV;
2150 0 : }
2151 :
2152 : bnx_nvram_test_done:
2153 0 : return (rc);
2154 0 : }
2155 :
2156 : /****************************************************************************/
2157 : /* Identifies the current media type of the controller and sets the PHY */
2158 : /* address. */
2159 : /* */
2160 : /* Returns: */
2161 : /* Nothing. */
2162 : /****************************************************************************/
2163 : void
2164 0 : bnx_get_media(struct bnx_softc *sc)
2165 : {
2166 : u_int32_t val;
2167 :
2168 0 : sc->bnx_phy_addr = 1;
2169 :
2170 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2171 0 : u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2172 0 : u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2173 : u_int32_t strap;
2174 :
2175 : /*
2176 : * The BCM5709S is software configurable
2177 : * for Copper or SerDes operation.
2178 : */
2179 0 : if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2180 : DBPRINT(sc, BNX_INFO_LOAD,
2181 : "5709 bonded for copper.\n");
2182 0 : goto bnx_get_media_exit;
2183 0 : } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2184 : DBPRINT(sc, BNX_INFO_LOAD,
2185 : "5709 bonded for dual media.\n");
2186 0 : sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2187 0 : goto bnx_get_media_exit;
2188 : }
2189 :
2190 0 : if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2191 0 : strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2192 : else {
2193 0 : strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2194 0 : >> 8;
2195 : }
2196 :
2197 0 : if (sc->bnx_pa.pa_function == 0) {
2198 0 : switch (strap) {
2199 : case 0x4:
2200 : case 0x5:
2201 : case 0x6:
2202 : DBPRINT(sc, BNX_INFO_LOAD,
2203 : "BCM5709 s/w configured for SerDes.\n");
2204 0 : sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2205 0 : break;
2206 : default:
2207 : DBPRINT(sc, BNX_INFO_LOAD,
2208 : "BCM5709 s/w configured for Copper.\n");
2209 : }
2210 : } else {
2211 0 : switch (strap) {
2212 : case 0x1:
2213 : case 0x2:
2214 : case 0x4:
2215 : DBPRINT(sc, BNX_INFO_LOAD,
2216 : "BCM5709 s/w configured for SerDes.\n");
2217 0 : sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2218 0 : break;
2219 : default:
2220 : DBPRINT(sc, BNX_INFO_LOAD,
2221 : "BCM5709 s/w configured for Copper.\n");
2222 : }
2223 : }
2224 :
2225 0 : } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2226 0 : sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2227 :
2228 0 : if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2229 0 : sc->bnx_flags |= BNX_NO_WOL_FLAG;
2230 :
2231 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2232 0 : sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2233 :
2234 : /*
2235 : * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2236 : * separate PHY for SerDes.
2237 : */
2238 0 : if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2239 0 : sc->bnx_phy_addr = 2;
2240 0 : val = REG_RD_IND(sc, sc->bnx_shmem_base +
2241 : BNX_SHARED_HW_CFG_CONFIG);
2242 0 : if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2243 0 : sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2244 : DBPRINT(sc, BNX_INFO_LOAD,
2245 : "Found 2.5Gb capable adapter\n");
2246 0 : }
2247 : }
2248 0 : } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2249 0 : (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2250 0 : sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2251 :
2252 : bnx_get_media_exit:
2253 : DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY),
2254 : "Using PHY address %d.\n", sc->bnx_phy_addr);
2255 0 : }
2256 :
2257 : /****************************************************************************/
2258 : /* Performs PHY initialization required before MII drivers access the */
2259 : /* device. */
2260 : /* */
2261 : /* Returns: */
2262 : /* Nothing. */
2263 : /****************************************************************************/
2264 : void
2265 0 : bnx_init_media(struct bnx_softc *sc)
2266 : {
2267 0 : if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2268 : /*
2269 : * Configure the BCM5709S / BCM5716S PHYs to use traditional
2270 : * IEEE Clause 22 method. Otherwise we have no way to attach
2271 : * the PHY to the mii(4) layer. PHY specific configuration
2272 : * is done by the mii(4) layer.
2273 : */
2274 :
2275 : /* Select auto-negotiation MMD of the PHY. */
2276 0 : bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2277 : BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2278 :
2279 0 : bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2280 : BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2281 :
2282 0 : bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2283 : BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2284 0 : }
2285 0 : }
2286 :
2287 : /****************************************************************************/
2288 : /* Free any DMA memory owned by the driver. */
2289 : /* */
2290 : /* Scans through each data structre that requires DMA memory and frees */
2291 : /* the memory if allocated. */
2292 : /* */
2293 : /* Returns: */
2294 : /* Nothing. */
2295 : /****************************************************************************/
2296 : void
2297 0 : bnx_dma_free(struct bnx_softc *sc)
2298 : {
2299 : int i;
2300 :
2301 : DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2302 :
2303 : /* Destroy the status block. */
2304 0 : if (sc->status_block != NULL && sc->status_map != NULL) {
2305 0 : bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2306 : sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2307 0 : bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2308 0 : bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2309 : BNX_STATUS_BLK_SZ);
2310 0 : bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2311 : sc->status_rseg);
2312 0 : bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2313 0 : sc->status_block = NULL;
2314 0 : sc->status_map = NULL;
2315 0 : }
2316 :
2317 : /* Destroy the statistics block. */
2318 0 : if (sc->stats_block != NULL && sc->stats_map != NULL) {
2319 0 : bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2320 0 : bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2321 : BNX_STATS_BLK_SZ);
2322 0 : bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2323 : sc->stats_rseg);
2324 0 : bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2325 0 : sc->stats_block = NULL;
2326 0 : sc->stats_map = NULL;
2327 0 : }
2328 :
2329 : /* Free, unmap and destroy all context memory pages. */
2330 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2331 0 : for (i = 0; i < sc->ctx_pages; i++) {
2332 0 : if (sc->ctx_block[i] != NULL) {
2333 0 : bus_dmamap_unload(sc->bnx_dmatag,
2334 : sc->ctx_map[i]);
2335 0 : bus_dmamem_unmap(sc->bnx_dmatag,
2336 : (caddr_t)sc->ctx_block[i],
2337 : BCM_PAGE_SIZE);
2338 0 : bus_dmamem_free(sc->bnx_dmatag,
2339 : &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2340 0 : bus_dmamap_destroy(sc->bnx_dmatag,
2341 : sc->ctx_map[i]);
2342 0 : sc->ctx_block[i] = NULL;
2343 0 : }
2344 : }
2345 : }
2346 :
2347 : /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2348 0 : for (i = 0; i < TX_PAGES; i++ ) {
2349 0 : if (sc->tx_bd_chain[i] != NULL &&
2350 0 : sc->tx_bd_chain_map[i] != NULL) {
2351 0 : bus_dmamap_unload(sc->bnx_dmatag,
2352 : sc->tx_bd_chain_map[i]);
2353 0 : bus_dmamem_unmap(sc->bnx_dmatag,
2354 : (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2355 0 : bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2356 : sc->tx_bd_chain_rseg[i]);
2357 0 : bus_dmamap_destroy(sc->bnx_dmatag,
2358 : sc->tx_bd_chain_map[i]);
2359 0 : sc->tx_bd_chain[i] = NULL;
2360 0 : sc->tx_bd_chain_map[i] = NULL;
2361 0 : }
2362 : }
2363 :
2364 : /* Unload and destroy the TX mbuf maps. */
2365 0 : for (i = 0; i < TOTAL_TX_BD; i++) {
2366 0 : bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2367 0 : bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2368 : }
2369 :
2370 : /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2371 0 : for (i = 0; i < RX_PAGES; i++ ) {
2372 0 : if (sc->rx_bd_chain[i] != NULL &&
2373 0 : sc->rx_bd_chain_map[i] != NULL) {
2374 0 : bus_dmamap_unload(sc->bnx_dmatag,
2375 : sc->rx_bd_chain_map[i]);
2376 0 : bus_dmamem_unmap(sc->bnx_dmatag,
2377 : (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2378 0 : bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2379 : sc->rx_bd_chain_rseg[i]);
2380 :
2381 0 : bus_dmamap_destroy(sc->bnx_dmatag,
2382 : sc->rx_bd_chain_map[i]);
2383 0 : sc->rx_bd_chain[i] = NULL;
2384 0 : sc->rx_bd_chain_map[i] = NULL;
2385 0 : }
2386 : }
2387 :
2388 : /* Unload and destroy the RX mbuf maps. */
2389 0 : for (i = 0; i < TOTAL_RX_BD; i++) {
2390 0 : if (sc->rx_mbuf_map[i] != NULL) {
2391 0 : bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2392 0 : bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2393 0 : }
2394 : }
2395 :
2396 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2397 0 : }
2398 :
2399 : /****************************************************************************/
2400 : /* Allocate any DMA memory needed by the driver. */
2401 : /* */
2402 : /* Allocates DMA memory needed for the various global structures needed by */
2403 : /* hardware. */
2404 : /* */
2405 : /* Returns: */
2406 : /* 0 for success, positive value for failure. */
2407 : /****************************************************************************/
2408 : int
2409 0 : bnx_dma_alloc(struct bnx_softc *sc)
2410 : {
2411 : int i, rc = 0;
2412 :
2413 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2414 :
2415 : /*
2416 : * Create DMA maps for the TX buffer mbufs.
2417 : */
2418 0 : for (i = 0; i < TOTAL_TX_BD; i++) {
2419 0 : if (bus_dmamap_create(sc->bnx_dmatag,
2420 : MCLBYTES * BNX_MAX_SEGMENTS, BNX_MAX_SEGMENTS,
2421 : MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->tx_mbuf_map[i])) {
2422 0 : printf(": Could not create Tx mbuf %d DMA map!\n", 1);
2423 : rc = ENOMEM;
2424 0 : goto bnx_dma_alloc_exit;
2425 : }
2426 : }
2427 :
2428 : /*
2429 : * Allocate DMA memory for the status block, map the memory into DMA
2430 : * space, and fetch the physical address of the block.
2431 : */
2432 0 : if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2433 : BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2434 0 : printf(": Could not create status block DMA map!\n");
2435 : rc = ENOMEM;
2436 0 : goto bnx_dma_alloc_exit;
2437 : }
2438 :
2439 0 : if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2440 : BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2441 : &sc->status_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2442 0 : printf(": Could not allocate status block DMA memory!\n");
2443 : rc = ENOMEM;
2444 0 : goto bnx_dma_alloc_exit;
2445 : }
2446 :
2447 0 : if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2448 : BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2449 0 : printf(": Could not map status block DMA memory!\n");
2450 : rc = ENOMEM;
2451 0 : goto bnx_dma_alloc_exit;
2452 : }
2453 :
2454 0 : if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2455 : sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2456 0 : printf(": Could not load status block DMA memory!\n");
2457 : rc = ENOMEM;
2458 0 : goto bnx_dma_alloc_exit;
2459 : }
2460 :
2461 0 : bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2462 : sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2463 :
2464 0 : sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2465 :
2466 : /* DRC - Fix for 64 bit addresses. */
2467 : DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2468 : (u_int32_t) sc->status_block_paddr);
2469 :
2470 : /* BCM5709 uses host memory as cache for context memory. */
2471 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2472 0 : sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2473 0 : if (sc->ctx_pages == 0)
2474 0 : sc->ctx_pages = 1;
2475 0 : if (sc->ctx_pages > 4) /* XXX */
2476 0 : sc->ctx_pages = 4;
2477 :
2478 : DBRUNIF((sc->ctx_pages > 512),
2479 : BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
2480 : __FILE__, __LINE__, sc->ctx_pages));
2481 :
2482 :
2483 0 : for (i = 0; i < sc->ctx_pages; i++) {
2484 0 : if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2485 : 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2486 : BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2487 0 : &sc->ctx_map[i]) != 0) {
2488 : rc = ENOMEM;
2489 0 : goto bnx_dma_alloc_exit;
2490 : }
2491 :
2492 0 : if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2493 : BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2494 0 : 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2495 : rc = ENOMEM;
2496 0 : goto bnx_dma_alloc_exit;
2497 : }
2498 :
2499 0 : if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2500 : sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2501 : (caddr_t *)&sc->ctx_block[i],
2502 0 : BUS_DMA_NOWAIT) != 0) {
2503 : rc = ENOMEM;
2504 0 : goto bnx_dma_alloc_exit;
2505 : }
2506 :
2507 0 : if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2508 : sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2509 0 : BUS_DMA_NOWAIT) != 0) {
2510 : rc = ENOMEM;
2511 0 : goto bnx_dma_alloc_exit;
2512 : }
2513 :
2514 0 : bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2515 : }
2516 : }
2517 :
2518 : /*
2519 : * Allocate DMA memory for the statistics block, map the memory into
2520 : * DMA space, and fetch the physical address of the block.
2521 : */
2522 0 : if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2523 : BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2524 0 : printf(": Could not create stats block DMA map!\n");
2525 : rc = ENOMEM;
2526 0 : goto bnx_dma_alloc_exit;
2527 : }
2528 :
2529 0 : if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2530 : BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2531 : &sc->stats_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2532 0 : printf(": Could not allocate stats block DMA memory!\n");
2533 : rc = ENOMEM;
2534 0 : goto bnx_dma_alloc_exit;
2535 : }
2536 :
2537 0 : if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2538 : BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2539 0 : printf(": Could not map stats block DMA memory!\n");
2540 : rc = ENOMEM;
2541 0 : goto bnx_dma_alloc_exit;
2542 : }
2543 :
2544 0 : if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2545 : sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2546 0 : printf(": Could not load status block DMA memory!\n");
2547 : rc = ENOMEM;
2548 0 : goto bnx_dma_alloc_exit;
2549 : }
2550 :
2551 0 : sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2552 :
2553 : /* DRC - Fix for 64 bit address. */
2554 : DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2555 : (u_int32_t) sc->stats_block_paddr);
2556 :
2557 : /*
2558 : * Allocate DMA memory for the TX buffer descriptor chain,
2559 : * and fetch the physical address of the block.
2560 : */
2561 0 : for (i = 0; i < TX_PAGES; i++) {
2562 0 : if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2563 : BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2564 : &sc->tx_bd_chain_map[i])) {
2565 0 : printf(": Could not create Tx desc %d DMA map!\n", i);
2566 : rc = ENOMEM;
2567 0 : goto bnx_dma_alloc_exit;
2568 : }
2569 :
2570 0 : if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2571 : BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2572 : &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2573 0 : printf(": Could not allocate TX desc %d DMA memory!\n",
2574 : i);
2575 : rc = ENOMEM;
2576 0 : goto bnx_dma_alloc_exit;
2577 : }
2578 :
2579 0 : if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2580 : sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2581 : (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2582 0 : printf(": Could not map TX desc %d DMA memory!\n", i);
2583 : rc = ENOMEM;
2584 0 : goto bnx_dma_alloc_exit;
2585 : }
2586 :
2587 0 : if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2588 : (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2589 : BUS_DMA_NOWAIT)) {
2590 0 : printf(": Could not load TX desc %d DMA memory!\n", i);
2591 : rc = ENOMEM;
2592 0 : goto bnx_dma_alloc_exit;
2593 : }
2594 :
2595 0 : sc->tx_bd_chain_paddr[i] =
2596 0 : sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2597 :
2598 : /* DRC - Fix for 64 bit systems. */
2599 : DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2600 : i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2601 : }
2602 :
2603 : /*
2604 : * Allocate DMA memory for the Rx buffer descriptor chain,
2605 : * and fetch the physical address of the block.
2606 : */
2607 0 : for (i = 0; i < RX_PAGES; i++) {
2608 0 : if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2609 : BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2610 : &sc->rx_bd_chain_map[i])) {
2611 0 : printf(": Could not create Rx desc %d DMA map!\n", i);
2612 : rc = ENOMEM;
2613 0 : goto bnx_dma_alloc_exit;
2614 : }
2615 :
2616 0 : if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2617 : BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2618 : &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2619 0 : printf(": Could not allocate Rx desc %d DMA memory!\n",
2620 : i);
2621 : rc = ENOMEM;
2622 0 : goto bnx_dma_alloc_exit;
2623 : }
2624 :
2625 0 : if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2626 : sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2627 : (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2628 0 : printf(": Could not map Rx desc %d DMA memory!\n", i);
2629 : rc = ENOMEM;
2630 0 : goto bnx_dma_alloc_exit;
2631 : }
2632 :
2633 0 : if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2634 : (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2635 : BUS_DMA_NOWAIT)) {
2636 0 : printf(": Could not load Rx desc %d DMA memory!\n", i);
2637 : rc = ENOMEM;
2638 0 : goto bnx_dma_alloc_exit;
2639 : }
2640 :
2641 0 : sc->rx_bd_chain_paddr[i] =
2642 0 : sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2643 :
2644 : /* DRC - Fix for 64 bit systems. */
2645 : DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2646 : i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2647 : }
2648 :
2649 : /*
2650 : * Create DMA maps for the Rx buffer mbufs.
2651 : */
2652 0 : for (i = 0; i < TOTAL_RX_BD; i++) {
2653 0 : if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2654 : BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2655 : &sc->rx_mbuf_map[i])) {
2656 0 : printf(": Could not create Rx mbuf %d DMA map!\n", i);
2657 : rc = ENOMEM;
2658 0 : goto bnx_dma_alloc_exit;
2659 : }
2660 : }
2661 :
2662 : bnx_dma_alloc_exit:
2663 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2664 :
2665 0 : return(rc);
2666 : }
2667 :
2668 : /****************************************************************************/
2669 : /* Release all resources used by the driver. */
2670 : /* */
2671 : /* Releases all resources acquired by the driver including interrupts, */
2672 : /* interrupt handler, interfaces, mutexes, and DMA memory. */
2673 : /* */
2674 : /* Returns: */
2675 : /* Nothing. */
2676 : /****************************************************************************/
2677 : void
2678 0 : bnx_release_resources(struct bnx_softc *sc)
2679 : {
2680 0 : struct pci_attach_args *pa = &(sc->bnx_pa);
2681 :
2682 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2683 :
2684 0 : bnx_dma_free(sc);
2685 :
2686 0 : if (sc->bnx_intrhand != NULL)
2687 0 : pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2688 :
2689 0 : if (sc->bnx_size)
2690 0 : bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2691 :
2692 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2693 0 : }
2694 :
2695 : /****************************************************************************/
2696 : /* Firmware synchronization. */
2697 : /* */
2698 : /* Before performing certain events such as a chip reset, synchronize with */
2699 : /* the firmware first. */
2700 : /* */
2701 : /* Returns: */
2702 : /* 0 for success, positive value for failure. */
2703 : /****************************************************************************/
2704 : int
2705 0 : bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2706 : {
2707 : int i, rc = 0;
2708 : u_int32_t val;
2709 :
2710 : /* Don't waste any time if we've timed out before. */
2711 0 : if (sc->bnx_fw_timed_out) {
2712 : rc = EBUSY;
2713 0 : goto bnx_fw_sync_exit;
2714 : }
2715 :
2716 : /* Increment the message sequence number. */
2717 0 : sc->bnx_fw_wr_seq++;
2718 0 : msg_data |= sc->bnx_fw_wr_seq;
2719 :
2720 : DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2721 : msg_data);
2722 :
2723 : /* Send the message to the bootcode driver mailbox. */
2724 0 : REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2725 :
2726 : /* Wait for the bootcode to acknowledge the message. */
2727 0 : for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2728 : /* Check for a response in the bootcode firmware mailbox. */
2729 0 : val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2730 0 : if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2731 : break;
2732 0 : DELAY(1000);
2733 : }
2734 :
2735 : /* If we've timed out, tell the bootcode that we've stopped waiting. */
2736 0 : if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2737 0 : ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2738 0 : BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2739 : "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2740 :
2741 0 : msg_data &= ~BNX_DRV_MSG_CODE;
2742 0 : msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2743 :
2744 0 : REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2745 :
2746 0 : sc->bnx_fw_timed_out = 1;
2747 : rc = EBUSY;
2748 0 : }
2749 :
2750 : bnx_fw_sync_exit:
2751 0 : return (rc);
2752 : }
2753 :
2754 : /****************************************************************************/
2755 : /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2756 : /* */
2757 : /* Returns: */
2758 : /* Nothing. */
2759 : /****************************************************************************/
2760 : void
2761 0 : bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2762 : u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2763 : {
2764 : int i;
2765 : u_int32_t val;
2766 :
2767 : /* Set the page size used by RV2P. */
2768 0 : if (rv2p_proc == RV2P_PROC2) {
2769 0 : BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2770 : USABLE_RX_BD_PER_PAGE);
2771 0 : }
2772 :
2773 0 : for (i = 0; i < rv2p_code_len; i += 8) {
2774 0 : REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2775 0 : rv2p_code++;
2776 0 : REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2777 0 : rv2p_code++;
2778 :
2779 0 : if (rv2p_proc == RV2P_PROC1) {
2780 : val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2781 0 : REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2782 0 : } else {
2783 : val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2784 0 : REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2785 : }
2786 : }
2787 :
2788 : /* Reset the processor, un-stall is done later. */
2789 0 : if (rv2p_proc == RV2P_PROC1)
2790 0 : REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2791 : else
2792 0 : REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2793 0 : }
2794 :
2795 : /****************************************************************************/
2796 : /* Load RISC processor firmware. */
2797 : /* */
2798 : /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */
2799 : /* associated with a particular processor. */
2800 : /* */
2801 : /* Returns: */
2802 : /* Nothing. */
2803 : /****************************************************************************/
2804 : void
2805 0 : bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2806 : struct fw_info *fw)
2807 : {
2808 : u_int32_t offset;
2809 : u_int32_t val;
2810 :
2811 : /* Halt the CPU. */
2812 0 : val = REG_RD_IND(sc, cpu_reg->mode);
2813 0 : val |= cpu_reg->mode_value_halt;
2814 0 : REG_WR_IND(sc, cpu_reg->mode, val);
2815 0 : REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2816 :
2817 : /* Load the Text area. */
2818 0 : offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2819 0 : if (fw->text) {
2820 : int j;
2821 :
2822 0 : for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2823 0 : REG_WR_IND(sc, offset, fw->text[j]);
2824 0 : }
2825 :
2826 : /* Load the Data area. */
2827 0 : offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2828 0 : if (fw->data) {
2829 : int j;
2830 :
2831 0 : for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2832 0 : REG_WR_IND(sc, offset, fw->data[j]);
2833 0 : }
2834 :
2835 : /* Load the SBSS area. */
2836 0 : offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2837 0 : if (fw->sbss) {
2838 : int j;
2839 :
2840 0 : for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2841 0 : REG_WR_IND(sc, offset, fw->sbss[j]);
2842 0 : }
2843 :
2844 : /* Load the BSS area. */
2845 0 : offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2846 0 : if (fw->bss) {
2847 : int j;
2848 :
2849 0 : for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2850 0 : REG_WR_IND(sc, offset, fw->bss[j]);
2851 0 : }
2852 :
2853 : /* Load the Read-Only area. */
2854 0 : offset = cpu_reg->spad_base +
2855 0 : (fw->rodata_addr - cpu_reg->mips_view_base);
2856 0 : if (fw->rodata) {
2857 : int j;
2858 :
2859 0 : for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2860 0 : REG_WR_IND(sc, offset, fw->rodata[j]);
2861 0 : }
2862 :
2863 : /* Clear the pre-fetch instruction. */
2864 0 : REG_WR_IND(sc, cpu_reg->inst, 0);
2865 0 : REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2866 :
2867 : /* Start the CPU. */
2868 0 : val = REG_RD_IND(sc, cpu_reg->mode);
2869 0 : val &= ~cpu_reg->mode_value_halt;
2870 0 : REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2871 0 : REG_WR_IND(sc, cpu_reg->mode, val);
2872 0 : }
2873 :
2874 : /****************************************************************************/
2875 : /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2876 : /* */
2877 : /* Loads the firmware for each CPU and starts the CPU. */
2878 : /* */
2879 : /* Returns: */
2880 : /* Nothing. */
2881 : /****************************************************************************/
2882 : void
2883 0 : bnx_init_cpus(struct bnx_softc *sc)
2884 : {
2885 : struct bnx_firmware *bfw = &bnx_firmwares[BNX_FW_B06];
2886 : struct bnx_rv2p *rv2p = &bnx_rv2ps[BNX_RV2P];
2887 0 : struct cpu_reg cpu_reg;
2888 0 : struct fw_info fw;
2889 :
2890 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2891 : bfw = &bnx_firmwares[BNX_FW_B09];
2892 0 : if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
2893 0 : rv2p = &bnx_rv2ps[BNX_XI90_RV2P];
2894 : else
2895 : rv2p = &bnx_rv2ps[BNX_XI_RV2P];
2896 : }
2897 :
2898 : /* Initialize the RV2P processor. */
2899 0 : bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc1,
2900 0 : rv2p->fw->bnx_rv2p_proc1len, RV2P_PROC1);
2901 0 : bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc2,
2902 0 : rv2p->fw->bnx_rv2p_proc2len, RV2P_PROC2);
2903 :
2904 : /* Initialize the RX Processor. */
2905 0 : cpu_reg.mode = BNX_RXP_CPU_MODE;
2906 0 : cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2907 0 : cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2908 0 : cpu_reg.state = BNX_RXP_CPU_STATE;
2909 0 : cpu_reg.state_value_clear = 0xffffff;
2910 0 : cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2911 0 : cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2912 0 : cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2913 0 : cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2914 0 : cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2915 0 : cpu_reg.spad_base = BNX_RXP_SCRATCH;
2916 0 : cpu_reg.mips_view_base = 0x8000000;
2917 :
2918 0 : fw.ver_major = bfw->fw->bnx_RXP_FwReleaseMajor;
2919 0 : fw.ver_minor = bfw->fw->bnx_RXP_FwReleaseMinor;
2920 0 : fw.ver_fix = bfw->fw->bnx_RXP_FwReleaseFix;
2921 0 : fw.start_addr = bfw->fw->bnx_RXP_FwStartAddr;
2922 :
2923 0 : fw.text_addr = bfw->fw->bnx_RXP_FwTextAddr;
2924 0 : fw.text_len = bfw->fw->bnx_RXP_FwTextLen;
2925 0 : fw.text_index = 0;
2926 0 : fw.text = bfw->bnx_RXP_FwText;
2927 :
2928 0 : fw.data_addr = bfw->fw->bnx_RXP_FwDataAddr;
2929 0 : fw.data_len = bfw->fw->bnx_RXP_FwDataLen;
2930 0 : fw.data_index = 0;
2931 0 : fw.data = bfw->bnx_RXP_FwData;
2932 :
2933 0 : fw.sbss_addr = bfw->fw->bnx_RXP_FwSbssAddr;
2934 0 : fw.sbss_len = bfw->fw->bnx_RXP_FwSbssLen;
2935 0 : fw.sbss_index = 0;
2936 0 : fw.sbss = bfw->bnx_RXP_FwSbss;
2937 :
2938 0 : fw.bss_addr = bfw->fw->bnx_RXP_FwBssAddr;
2939 0 : fw.bss_len = bfw->fw->bnx_RXP_FwBssLen;
2940 0 : fw.bss_index = 0;
2941 0 : fw.bss = bfw->bnx_RXP_FwBss;
2942 :
2943 0 : fw.rodata_addr = bfw->fw->bnx_RXP_FwRodataAddr;
2944 0 : fw.rodata_len = bfw->fw->bnx_RXP_FwRodataLen;
2945 0 : fw.rodata_index = 0;
2946 0 : fw.rodata = bfw->bnx_RXP_FwRodata;
2947 :
2948 : DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2949 0 : bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2950 :
2951 : /* Initialize the TX Processor. */
2952 0 : cpu_reg.mode = BNX_TXP_CPU_MODE;
2953 0 : cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2954 0 : cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2955 0 : cpu_reg.state = BNX_TXP_CPU_STATE;
2956 0 : cpu_reg.state_value_clear = 0xffffff;
2957 0 : cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2958 0 : cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2959 0 : cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2960 0 : cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2961 0 : cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2962 0 : cpu_reg.spad_base = BNX_TXP_SCRATCH;
2963 0 : cpu_reg.mips_view_base = 0x8000000;
2964 :
2965 0 : fw.ver_major = bfw->fw->bnx_TXP_FwReleaseMajor;
2966 0 : fw.ver_minor = bfw->fw->bnx_TXP_FwReleaseMinor;
2967 0 : fw.ver_fix = bfw->fw->bnx_TXP_FwReleaseFix;
2968 0 : fw.start_addr = bfw->fw->bnx_TXP_FwStartAddr;
2969 :
2970 0 : fw.text_addr = bfw->fw->bnx_TXP_FwTextAddr;
2971 0 : fw.text_len = bfw->fw->bnx_TXP_FwTextLen;
2972 0 : fw.text_index = 0;
2973 0 : fw.text = bfw->bnx_TXP_FwText;
2974 :
2975 0 : fw.data_addr = bfw->fw->bnx_TXP_FwDataAddr;
2976 0 : fw.data_len = bfw->fw->bnx_TXP_FwDataLen;
2977 0 : fw.data_index = 0;
2978 0 : fw.data = bfw->bnx_TXP_FwData;
2979 :
2980 0 : fw.sbss_addr = bfw->fw->bnx_TXP_FwSbssAddr;
2981 0 : fw.sbss_len = bfw->fw->bnx_TXP_FwSbssLen;
2982 0 : fw.sbss_index = 0;
2983 0 : fw.sbss = bfw->bnx_TXP_FwSbss;
2984 :
2985 0 : fw.bss_addr = bfw->fw->bnx_TXP_FwBssAddr;
2986 0 : fw.bss_len = bfw->fw->bnx_TXP_FwBssLen;
2987 0 : fw.bss_index = 0;
2988 0 : fw.bss = bfw->bnx_TXP_FwBss;
2989 :
2990 0 : fw.rodata_addr = bfw->fw->bnx_TXP_FwRodataAddr;
2991 0 : fw.rodata_len = bfw->fw->bnx_TXP_FwRodataLen;
2992 0 : fw.rodata_index = 0;
2993 0 : fw.rodata = bfw->bnx_TXP_FwRodata;
2994 :
2995 : DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2996 0 : bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2997 :
2998 : /* Initialize the TX Patch-up Processor. */
2999 0 : cpu_reg.mode = BNX_TPAT_CPU_MODE;
3000 0 : cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
3001 0 : cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
3002 0 : cpu_reg.state = BNX_TPAT_CPU_STATE;
3003 0 : cpu_reg.state_value_clear = 0xffffff;
3004 0 : cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3005 0 : cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3006 0 : cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3007 0 : cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3008 0 : cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3009 0 : cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3010 0 : cpu_reg.mips_view_base = 0x8000000;
3011 :
3012 0 : fw.ver_major = bfw->fw->bnx_TPAT_FwReleaseMajor;
3013 0 : fw.ver_minor = bfw->fw->bnx_TPAT_FwReleaseMinor;
3014 0 : fw.ver_fix = bfw->fw->bnx_TPAT_FwReleaseFix;
3015 0 : fw.start_addr = bfw->fw->bnx_TPAT_FwStartAddr;
3016 :
3017 0 : fw.text_addr = bfw->fw->bnx_TPAT_FwTextAddr;
3018 0 : fw.text_len = bfw->fw->bnx_TPAT_FwTextLen;
3019 0 : fw.text_index = 0;
3020 0 : fw.text = bfw->bnx_TPAT_FwText;
3021 :
3022 0 : fw.data_addr = bfw->fw->bnx_TPAT_FwDataAddr;
3023 0 : fw.data_len = bfw->fw->bnx_TPAT_FwDataLen;
3024 0 : fw.data_index = 0;
3025 0 : fw.data = bfw->bnx_TPAT_FwData;
3026 :
3027 0 : fw.sbss_addr = bfw->fw->bnx_TPAT_FwSbssAddr;
3028 0 : fw.sbss_len = bfw->fw->bnx_TPAT_FwSbssLen;
3029 0 : fw.sbss_index = 0;
3030 0 : fw.sbss = bfw->bnx_TPAT_FwSbss;
3031 :
3032 0 : fw.bss_addr = bfw->fw->bnx_TPAT_FwBssAddr;
3033 0 : fw.bss_len = bfw->fw->bnx_TPAT_FwBssLen;
3034 0 : fw.bss_index = 0;
3035 0 : fw.bss = bfw->bnx_TPAT_FwBss;
3036 :
3037 0 : fw.rodata_addr = bfw->fw->bnx_TPAT_FwRodataAddr;
3038 0 : fw.rodata_len = bfw->fw->bnx_TPAT_FwRodataLen;
3039 0 : fw.rodata_index = 0;
3040 0 : fw.rodata = bfw->bnx_TPAT_FwRodata;
3041 :
3042 : DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3043 0 : bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3044 :
3045 : /* Initialize the Completion Processor. */
3046 0 : cpu_reg.mode = BNX_COM_CPU_MODE;
3047 0 : cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3048 0 : cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3049 0 : cpu_reg.state = BNX_COM_CPU_STATE;
3050 0 : cpu_reg.state_value_clear = 0xffffff;
3051 0 : cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3052 0 : cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3053 0 : cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3054 0 : cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3055 0 : cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3056 0 : cpu_reg.spad_base = BNX_COM_SCRATCH;
3057 0 : cpu_reg.mips_view_base = 0x8000000;
3058 :
3059 0 : fw.ver_major = bfw->fw->bnx_COM_FwReleaseMajor;
3060 0 : fw.ver_minor = bfw->fw->bnx_COM_FwReleaseMinor;
3061 0 : fw.ver_fix = bfw->fw->bnx_COM_FwReleaseFix;
3062 0 : fw.start_addr = bfw->fw->bnx_COM_FwStartAddr;
3063 :
3064 0 : fw.text_addr = bfw->fw->bnx_COM_FwTextAddr;
3065 0 : fw.text_len = bfw->fw->bnx_COM_FwTextLen;
3066 0 : fw.text_index = 0;
3067 0 : fw.text = bfw->bnx_COM_FwText;
3068 :
3069 0 : fw.data_addr = bfw->fw->bnx_COM_FwDataAddr;
3070 0 : fw.data_len = bfw->fw->bnx_COM_FwDataLen;
3071 0 : fw.data_index = 0;
3072 0 : fw.data = bfw->bnx_COM_FwData;
3073 :
3074 0 : fw.sbss_addr = bfw->fw->bnx_COM_FwSbssAddr;
3075 0 : fw.sbss_len = bfw->fw->bnx_COM_FwSbssLen;
3076 0 : fw.sbss_index = 0;
3077 0 : fw.sbss = bfw->bnx_COM_FwSbss;
3078 :
3079 0 : fw.bss_addr = bfw->fw->bnx_COM_FwBssAddr;
3080 0 : fw.bss_len = bfw->fw->bnx_COM_FwBssLen;
3081 0 : fw.bss_index = 0;
3082 0 : fw.bss = bfw->bnx_COM_FwBss;
3083 :
3084 0 : fw.rodata_addr = bfw->fw->bnx_COM_FwRodataAddr;
3085 0 : fw.rodata_len = bfw->fw->bnx_COM_FwRodataLen;
3086 0 : fw.rodata_index = 0;
3087 0 : fw.rodata = bfw->bnx_COM_FwRodata;
3088 :
3089 : DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3090 0 : bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3091 0 : }
3092 :
3093 : /****************************************************************************/
3094 : /* Initialize context memory. */
3095 : /* */
3096 : /* Clears the memory associated with each Context ID (CID). */
3097 : /* */
3098 : /* Returns: */
3099 : /* Nothing. */
3100 : /****************************************************************************/
3101 : void
3102 0 : bnx_init_context(struct bnx_softc *sc)
3103 : {
3104 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3105 : /* DRC: Replace this constant value with a #define. */
3106 : int i, retry_cnt = 10;
3107 : u_int32_t val;
3108 :
3109 : /*
3110 : * BCM5709 context memory may be cached
3111 : * in host memory so prepare the host memory
3112 : * for access.
3113 : */
3114 : val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3115 : | (1 << 12);
3116 : val |= (BCM_PAGE_BITS - 8) << 16;
3117 0 : REG_WR(sc, BNX_CTX_COMMAND, val);
3118 :
3119 : /* Wait for mem init command to complete. */
3120 0 : for (i = 0; i < retry_cnt; i++) {
3121 0 : val = REG_RD(sc, BNX_CTX_COMMAND);
3122 0 : if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3123 : break;
3124 0 : DELAY(2);
3125 : }
3126 :
3127 : /* ToDo: Consider returning an error here. */
3128 :
3129 0 : for (i = 0; i < sc->ctx_pages; i++) {
3130 : int j;
3131 :
3132 : /* Set the physaddr of the context memory cache. */
3133 0 : val = (u_int32_t)(sc->ctx_segs[i].ds_addr);
3134 0 : REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3135 : BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3136 0 : val = (u_int32_t)
3137 0 : ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32);
3138 0 : REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3139 0 : REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3140 : BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3141 :
3142 : /* Verify that the context memory write was successful. */
3143 0 : for (j = 0; j < retry_cnt; j++) {
3144 0 : val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3145 0 : if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3146 : break;
3147 0 : DELAY(5);
3148 : }
3149 :
3150 : /* ToDo: Consider returning an error here. */
3151 : }
3152 0 : } else {
3153 : u_int32_t vcid_addr, offset;
3154 :
3155 : /*
3156 : * For the 5706/5708, context memory is local to
3157 : * the controller, so initialize the controller
3158 : * context memory.
3159 : */
3160 :
3161 : vcid_addr = GET_CID_ADDR(96);
3162 0 : while (vcid_addr) {
3163 :
3164 0 : vcid_addr -= PHY_CTX_SIZE;
3165 :
3166 0 : REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3167 0 : REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3168 :
3169 0 : for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3170 0 : CTX_WR(sc, 0x00, offset, 0);
3171 : }
3172 :
3173 0 : REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3174 0 : REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3175 : }
3176 : }
3177 0 : }
3178 :
3179 : /****************************************************************************/
3180 : /* Fetch the permanent MAC address of the controller. */
3181 : /* */
3182 : /* Returns: */
3183 : /* Nothing. */
3184 : /****************************************************************************/
3185 : void
3186 0 : bnx_get_mac_addr(struct bnx_softc *sc)
3187 : {
3188 : u_int32_t mac_lo = 0, mac_hi = 0;
3189 :
3190 : /*
3191 : * The NetXtreme II bootcode populates various NIC
3192 : * power-on and runtime configuration items in a
3193 : * shared memory area. The factory configured MAC
3194 : * address is available from both NVRAM and the
3195 : * shared memory area so we'll read the value from
3196 : * shared memory for speed.
3197 : */
3198 :
3199 0 : mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3200 0 : mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3201 :
3202 0 : if ((mac_lo == 0) && (mac_hi == 0)) {
3203 0 : BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3204 : __FILE__, __LINE__);
3205 0 : } else {
3206 0 : sc->eaddr[0] = (u_char)(mac_hi >> 8);
3207 0 : sc->eaddr[1] = (u_char)(mac_hi >> 0);
3208 0 : sc->eaddr[2] = (u_char)(mac_lo >> 24);
3209 0 : sc->eaddr[3] = (u_char)(mac_lo >> 16);
3210 0 : sc->eaddr[4] = (u_char)(mac_lo >> 8);
3211 0 : sc->eaddr[5] = (u_char)(mac_lo >> 0);
3212 : }
3213 :
3214 : DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3215 : "%6D\n", sc->eaddr, ":");
3216 0 : }
3217 :
3218 : /****************************************************************************/
3219 : /* Program the MAC address. */
3220 : /* */
3221 : /* Returns: */
3222 : /* Nothing. */
3223 : /****************************************************************************/
3224 : void
3225 0 : bnx_set_mac_addr(struct bnx_softc *sc)
3226 : {
3227 : u_int32_t val;
3228 0 : u_int8_t *mac_addr = sc->eaddr;
3229 :
3230 : DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3231 : "%6D\n", sc->eaddr, ":");
3232 :
3233 0 : val = (mac_addr[0] << 8) | mac_addr[1];
3234 :
3235 0 : REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3236 :
3237 0 : val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3238 0 : (mac_addr[4] << 8) | mac_addr[5];
3239 :
3240 0 : REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3241 0 : }
3242 :
3243 : /****************************************************************************/
3244 : /* Stop the controller. */
3245 : /* */
3246 : /* Returns: */
3247 : /* Nothing. */
3248 : /****************************************************************************/
3249 : void
3250 0 : bnx_stop(struct bnx_softc *sc)
3251 : {
3252 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
3253 : struct ifmedia_entry *ifm;
3254 : struct mii_data *mii;
3255 : int mtmp, itmp;
3256 :
3257 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3258 :
3259 0 : timeout_del(&sc->bnx_timeout);
3260 0 : timeout_del(&sc->bnx_rxrefill);
3261 :
3262 0 : ifp->if_flags &= ~IFF_RUNNING;
3263 0 : ifq_clr_oactive(&ifp->if_snd);
3264 :
3265 : /* Disable the transmit/receive blocks. */
3266 0 : REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3267 0 : REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3268 0 : DELAY(20);
3269 :
3270 0 : bnx_disable_intr(sc);
3271 :
3272 0 : intr_barrier(sc->bnx_intrhand);
3273 0 : KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
3274 :
3275 : /* Tell firmware that the driver is going away. */
3276 0 : bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3277 :
3278 : /* Free RX buffers. */
3279 0 : bnx_free_rx_chain(sc);
3280 :
3281 : /* Free TX buffers. */
3282 0 : bnx_free_tx_chain(sc);
3283 :
3284 : /*
3285 : * Isolate/power down the PHY, but leave the media selection
3286 : * unchanged so that things will be put back to normal when
3287 : * we bring the interface back up.
3288 : */
3289 0 : mii = &sc->bnx_mii;
3290 0 : itmp = ifp->if_flags;
3291 0 : ifp->if_flags |= IFF_UP;
3292 0 : ifm = mii->mii_media.ifm_cur;
3293 0 : mtmp = ifm->ifm_media;
3294 0 : ifm->ifm_media = IFM_ETHER|IFM_NONE;
3295 0 : mii_mediachg(mii);
3296 0 : ifm->ifm_media = mtmp;
3297 0 : ifp->if_flags = itmp;
3298 :
3299 0 : ifp->if_timer = 0;
3300 :
3301 0 : sc->bnx_link = 0;
3302 :
3303 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3304 :
3305 0 : bnx_mgmt_init(sc);
3306 0 : }
3307 :
3308 : int
3309 0 : bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
3310 : {
3311 0 : struct pci_attach_args *pa = &(sc->bnx_pa);
3312 : u_int32_t val;
3313 : int i, rc = 0;
3314 :
3315 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3316 :
3317 : /* Wait for pending PCI transactions to complete. */
3318 0 : REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3319 : BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3320 : BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3321 : BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3322 : BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3323 0 : val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3324 0 : DELAY(5);
3325 :
3326 : /* Disable DMA */
3327 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3328 0 : val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3329 0 : val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3330 0 : REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3331 0 : }
3332 :
3333 : /* Assume bootcode is running. */
3334 0 : sc->bnx_fw_timed_out = 0;
3335 :
3336 : /* Give the firmware a chance to prepare for the reset. */
3337 0 : rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3338 0 : if (rc)
3339 : goto bnx_reset_exit;
3340 :
3341 : /* Set a firmware reminder that this is a soft reset. */
3342 0 : REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3343 : BNX_DRV_RESET_SIGNATURE_MAGIC);
3344 :
3345 : /* Dummy read to force the chip to complete all current transactions. */
3346 0 : val = REG_RD(sc, BNX_MISC_ID);
3347 :
3348 : /* Chip reset. */
3349 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3350 0 : REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3351 0 : REG_RD(sc, BNX_MISC_COMMAND);
3352 0 : DELAY(5);
3353 :
3354 : val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3355 : BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3356 :
3357 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3358 : val);
3359 0 : } else {
3360 : val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3361 : BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3362 : BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3363 0 : REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3364 :
3365 : /* Allow up to 30us for reset to complete. */
3366 0 : for (i = 0; i < 10; i++) {
3367 0 : val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3368 0 : if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3369 0 : BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3370 : break;
3371 : }
3372 0 : DELAY(10);
3373 : }
3374 :
3375 : /* Check that reset completed successfully. */
3376 0 : if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3377 : BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3378 0 : BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3379 : __FILE__, __LINE__);
3380 : rc = EBUSY;
3381 0 : goto bnx_reset_exit;
3382 : }
3383 : }
3384 :
3385 : /* Make sure byte swapping is properly configured. */
3386 0 : val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3387 0 : if (val != 0x01020304) {
3388 0 : BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3389 : __FILE__, __LINE__);
3390 : rc = ENODEV;
3391 0 : goto bnx_reset_exit;
3392 : }
3393 :
3394 : /* Just completed a reset, assume that firmware is running again. */
3395 0 : sc->bnx_fw_timed_out = 0;
3396 :
3397 : /* Wait for the firmware to finish its initialization. */
3398 0 : rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3399 0 : if (rc)
3400 0 : BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3401 : "initialization!\n", __FILE__, __LINE__);
3402 :
3403 : bnx_reset_exit:
3404 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3405 :
3406 0 : return (rc);
3407 : }
3408 :
3409 : int
3410 0 : bnx_chipinit(struct bnx_softc *sc)
3411 : {
3412 0 : struct pci_attach_args *pa = &(sc->bnx_pa);
3413 : u_int32_t val;
3414 : int rc = 0;
3415 :
3416 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3417 :
3418 : /* Make sure the interrupt is not active. */
3419 0 : REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3420 :
3421 : /* Initialize DMA byte/word swapping, configure the number of DMA */
3422 : /* channels and PCI clock compensation delay. */
3423 : val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3424 : BNX_DMA_CONFIG_DATA_WORD_SWAP |
3425 : #if BYTE_ORDER == BIG_ENDIAN
3426 : BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3427 : #endif
3428 : BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3429 : DMA_READ_CHANS << 12 |
3430 : DMA_WRITE_CHANS << 16;
3431 :
3432 : val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3433 :
3434 0 : if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3435 0 : val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3436 :
3437 : /*
3438 : * This setting resolves a problem observed on certain Intel PCI
3439 : * chipsets that cannot handle multiple outstanding DMA operations.
3440 : * See errata E9_5706A1_65.
3441 : */
3442 0 : if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3443 0 : (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3444 0 : !(sc->bnx_flags & BNX_PCIX_FLAG))
3445 0 : val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3446 :
3447 0 : REG_WR(sc, BNX_DMA_CONFIG, val);
3448 :
3449 : #if 1
3450 : /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3451 0 : if (sc->bnx_flags & BNX_PCIX_FLAG) {
3452 0 : val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3453 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3454 0 : val & ~0x20000);
3455 0 : }
3456 : #endif
3457 :
3458 : /* Enable the RX_V2P and Context state machines before access. */
3459 0 : REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3460 : BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3461 : BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3462 : BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3463 :
3464 : /* Initialize context mapping and zero out the quick contexts. */
3465 0 : bnx_init_context(sc);
3466 :
3467 : /* Initialize the on-boards CPUs */
3468 0 : bnx_init_cpus(sc);
3469 :
3470 : /* Prepare NVRAM for access. */
3471 0 : if (bnx_init_nvram(sc)) {
3472 : rc = ENODEV;
3473 0 : goto bnx_chipinit_exit;
3474 : }
3475 :
3476 : /* Set the kernel bypass block size */
3477 0 : val = REG_RD(sc, BNX_MQ_CONFIG);
3478 0 : val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3479 0 : val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3480 :
3481 : /* Enable bins used on the 5709. */
3482 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3483 0 : val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3484 0 : if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3485 0 : val |= BNX_MQ_CONFIG_HALT_DIS;
3486 : }
3487 :
3488 0 : REG_WR(sc, BNX_MQ_CONFIG, val);
3489 :
3490 : val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3491 0 : REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3492 0 : REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3493 :
3494 : val = (BCM_PAGE_BITS - 8) << 24;
3495 0 : REG_WR(sc, BNX_RV2P_CONFIG, val);
3496 :
3497 : /* Configure page size. */
3498 0 : val = REG_RD(sc, BNX_TBDR_CONFIG);
3499 0 : val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3500 0 : val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3501 0 : REG_WR(sc, BNX_TBDR_CONFIG, val);
3502 :
3503 : #if 0
3504 : /* Set the perfect match control register to default. */
3505 : REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3506 : #endif
3507 :
3508 : bnx_chipinit_exit:
3509 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3510 :
3511 0 : return(rc);
3512 : }
3513 :
3514 : /****************************************************************************/
3515 : /* Initialize the controller in preparation to send/receive traffic. */
3516 : /* */
3517 : /* Returns: */
3518 : /* 0 for success, positive value for failure. */
3519 : /****************************************************************************/
3520 : int
3521 0 : bnx_blockinit(struct bnx_softc *sc)
3522 : {
3523 : u_int32_t reg, val;
3524 : int rc = 0;
3525 :
3526 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3527 :
3528 : /* Load the hardware default MAC address. */
3529 0 : bnx_set_mac_addr(sc);
3530 :
3531 : /* Set the Ethernet backoff seed value */
3532 0 : val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3533 0 : (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3534 0 : REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3535 :
3536 0 : sc->last_status_idx = 0;
3537 0 : sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3538 :
3539 : /* Set up link change interrupt generation. */
3540 0 : REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3541 0 : REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3542 :
3543 : /* Program the physical address of the status block. */
3544 0 : REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3545 0 : REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3546 : (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3547 :
3548 : /* Program the physical address of the statistics block. */
3549 0 : REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3550 : (u_int32_t)(sc->stats_block_paddr));
3551 0 : REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3552 : (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3553 :
3554 : /* Program various host coalescing parameters. */
3555 0 : REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3556 : << 16) | sc->bnx_tx_quick_cons_trip);
3557 0 : REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3558 : << 16) | sc->bnx_rx_quick_cons_trip);
3559 0 : REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3560 : sc->bnx_comp_prod_trip);
3561 0 : REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3562 : sc->bnx_tx_ticks);
3563 0 : REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3564 : sc->bnx_rx_ticks);
3565 0 : REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3566 : sc->bnx_com_ticks);
3567 0 : REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3568 : sc->bnx_cmd_ticks);
3569 0 : REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3570 0 : REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3571 0 : REG_WR(sc, BNX_HC_CONFIG,
3572 : (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3573 : BNX_HC_CONFIG_COLLECT_STATS));
3574 :
3575 : /* Clear the internal statistics counters. */
3576 0 : REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3577 :
3578 : /* Verify that bootcode is running. */
3579 0 : reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3580 :
3581 : DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3582 : BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3583 : __FILE__, __LINE__); reg = 0);
3584 :
3585 0 : if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3586 : BNX_DEV_INFO_SIGNATURE_MAGIC) {
3587 0 : BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3588 : "Expected: 08%08X\n", __FILE__, __LINE__,
3589 : (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3590 : BNX_DEV_INFO_SIGNATURE_MAGIC);
3591 : rc = ENODEV;
3592 0 : goto bnx_blockinit_exit;
3593 : }
3594 :
3595 : /* Check if any management firmware is running. */
3596 0 : reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3597 0 : if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3598 : BNX_PORT_FEATURE_IMD_ENABLED)) {
3599 : DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3600 0 : sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3601 0 : }
3602 :
3603 0 : sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3604 : BNX_DEV_INFO_BC_REV);
3605 :
3606 : DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3607 :
3608 : /* Enable DMA */
3609 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3610 0 : val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3611 0 : val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3612 0 : REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3613 0 : }
3614 :
3615 : /* Allow bootcode to apply any additional fixes before enabling MAC. */
3616 0 : rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3617 :
3618 : /* Enable link state change interrupt generation. */
3619 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3620 0 : REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3621 : BNX_MISC_ENABLE_DEFAULT_XI);
3622 0 : } else
3623 0 : REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3624 :
3625 : /* Enable all remaining blocks in the MAC. */
3626 0 : REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3627 0 : REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3628 0 : DELAY(20);
3629 :
3630 : bnx_blockinit_exit:
3631 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3632 :
3633 0 : return (rc);
3634 : }
3635 :
3636 : /****************************************************************************/
3637 : /* Encapsulate an mbuf cluster into the rx_bd chain. */
3638 : /* */
3639 : /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3640 : /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3641 : /* necessary. */
3642 : /* */
3643 : /* Returns: */
3644 : /* 0 for success, positive value for failure. */
3645 : /****************************************************************************/
3646 : int
3647 0 : bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3648 : u_int16_t *chain_prod, u_int32_t *prod_bseq)
3649 : {
3650 : bus_dmamap_t map;
3651 : struct mbuf *m;
3652 : struct rx_bd *rxbd;
3653 : int i;
3654 : u_int32_t addr;
3655 : #ifdef BNX_DEBUG
3656 : u_int16_t debug_chain_prod = *chain_prod;
3657 : #endif
3658 : u_int16_t first_chain_prod;
3659 :
3660 : DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3661 : __FUNCTION__);
3662 :
3663 : /* Make sure the inputs are valid. */
3664 : DBRUNIF((*chain_prod > MAX_RX_BD),
3665 : printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3666 : *chain_prod, (u_int16_t) MAX_RX_BD));
3667 :
3668 : DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3669 : "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3670 : *prod_bseq);
3671 :
3672 : /* This is a new mbuf allocation. */
3673 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
3674 0 : if (!m)
3675 0 : return (0);
3676 0 : m->m_len = m->m_pkthdr.len = MCLBYTES;
3677 : /* the chip aligns the ip header for us, no need to m_adj */
3678 :
3679 : /* Map the mbuf cluster into device memory. */
3680 0 : map = sc->rx_mbuf_map[*chain_prod];
3681 0 : if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT)) {
3682 0 : m_freem(m);
3683 0 : return (0);
3684 : }
3685 0 : first_chain_prod = *chain_prod;
3686 :
3687 : #ifdef BNX_DEBUG
3688 : /* Track the distribution of buffer segments. */
3689 : sc->rx_mbuf_segs[map->dm_nsegs]++;
3690 : #endif
3691 :
3692 : /* Setup the rx_bd for the first segment. */
3693 0 : rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3694 :
3695 0 : addr = (u_int32_t)map->dm_segs[0].ds_addr;
3696 0 : rxbd->rx_bd_haddr_lo = addr;
3697 0 : addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3698 0 : rxbd->rx_bd_haddr_hi = addr;
3699 0 : rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3700 0 : rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3701 0 : *prod_bseq += map->dm_segs[0].ds_len;
3702 :
3703 0 : for (i = 1; i < map->dm_nsegs; i++) {
3704 0 : *prod = NEXT_RX_BD(*prod);
3705 0 : *chain_prod = RX_CHAIN_IDX(*prod);
3706 :
3707 : rxbd =
3708 0 : &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3709 :
3710 0 : addr = (u_int32_t)map->dm_segs[i].ds_addr;
3711 0 : rxbd->rx_bd_haddr_lo = addr;
3712 0 : addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3713 0 : rxbd->rx_bd_haddr_hi = addr;
3714 0 : rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3715 0 : rxbd->rx_bd_flags = 0;
3716 0 : *prod_bseq += map->dm_segs[i].ds_len;
3717 : }
3718 :
3719 0 : rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3720 :
3721 : /*
3722 : * Save the mbuf, adjust the map pointer (swap map for first and
3723 : * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3724 : * and update our counter.
3725 : */
3726 0 : sc->rx_mbuf_ptr[*chain_prod] = m;
3727 0 : sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3728 0 : sc->rx_mbuf_map[*chain_prod] = map;
3729 :
3730 : DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3731 : map->dm_nsegs));
3732 :
3733 0 : return (map->dm_nsegs);
3734 0 : }
3735 :
3736 :
3737 : /****************************************************************************/
3738 : /* Initialize the TX context memory. */
3739 : /* */
3740 : /* Returns: */
3741 : /* Nothing */
3742 : /****************************************************************************/
3743 : void
3744 0 : bnx_init_tx_context(struct bnx_softc *sc)
3745 : {
3746 : u_int32_t val;
3747 :
3748 : /* Initialize the context ID for an L2 TX chain. */
3749 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3750 : /* Set the CID type to support an L2 connection. */
3751 : val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3752 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
3753 : val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3754 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
3755 :
3756 : /* Point the hardware to the first page in the chain. */
3757 0 : val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3758 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID),
3759 : BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
3760 0 : val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3761 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID),
3762 : BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
3763 0 : } else {
3764 : /* Set the CID type to support an L2 connection. */
3765 : val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3766 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3767 : val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3768 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3769 :
3770 : /* Point the hardware to the first page in the chain. */
3771 0 : val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3772 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3773 0 : val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3774 0 : CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3775 : }
3776 0 : }
3777 :
3778 : /****************************************************************************/
3779 : /* Allocate memory and initialize the TX data structures. */
3780 : /* */
3781 : /* Returns: */
3782 : /* 0 for success, positive value for failure. */
3783 : /****************************************************************************/
3784 : int
3785 0 : bnx_init_tx_chain(struct bnx_softc *sc)
3786 : {
3787 : struct tx_bd *txbd;
3788 : u_int32_t addr;
3789 : int i, rc = 0;
3790 :
3791 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3792 :
3793 : /* Set the initial TX producer/consumer indices. */
3794 0 : sc->tx_prod = 0;
3795 0 : sc->tx_cons = 0;
3796 0 : sc->tx_prod_bseq = 0;
3797 0 : sc->used_tx_bd = 0;
3798 0 : sc->max_tx_bd = USABLE_TX_BD;
3799 : DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3800 : DBRUNIF(1, sc->tx_full_count = 0);
3801 :
3802 : /*
3803 : * The NetXtreme II supports a linked-list structure called
3804 : * a Buffer Descriptor Chain (or BD chain). A BD chain
3805 : * consists of a series of 1 or more chain pages, each of which
3806 : * consists of a fixed number of BD entries.
3807 : * The last BD entry on each page is a pointer to the next page
3808 : * in the chain, and the last pointer in the BD chain
3809 : * points back to the beginning of the chain.
3810 : */
3811 :
3812 : /* Set the TX next pointer chain entries. */
3813 0 : for (i = 0; i < TX_PAGES; i++) {
3814 : int j;
3815 :
3816 0 : txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3817 :
3818 : /* Check if we've reached the last page. */
3819 0 : if (i == (TX_PAGES - 1))
3820 0 : j = 0;
3821 : else
3822 0 : j = i + 1;
3823 :
3824 0 : addr = (u_int32_t)sc->tx_bd_chain_paddr[j];
3825 0 : txbd->tx_bd_haddr_lo = addr;
3826 0 : addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3827 0 : txbd->tx_bd_haddr_hi = addr;
3828 : }
3829 :
3830 : /*
3831 : * Initialize the context ID for an L2 TX chain.
3832 : */
3833 0 : bnx_init_tx_context(sc);
3834 :
3835 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3836 :
3837 0 : return(rc);
3838 : }
3839 :
3840 : /****************************************************************************/
3841 : /* Free memory and clear the TX data structures. */
3842 : /* */
3843 : /* Returns: */
3844 : /* Nothing. */
3845 : /****************************************************************************/
3846 : void
3847 0 : bnx_free_tx_chain(struct bnx_softc *sc)
3848 : {
3849 : int i;
3850 :
3851 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3852 :
3853 : /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3854 0 : for (i = 0; i < TOTAL_TX_BD; i++) {
3855 0 : if (sc->tx_mbuf_ptr[i] != NULL) {
3856 0 : if (sc->tx_mbuf_map[i] != NULL) {
3857 0 : bus_dmamap_sync(sc->bnx_dmatag,
3858 : sc->tx_mbuf_map[i], 0,
3859 : sc->tx_mbuf_map[i]->dm_mapsize,
3860 : BUS_DMASYNC_POSTWRITE);
3861 0 : bus_dmamap_unload(sc->bnx_dmatag,
3862 : sc->tx_mbuf_map[i]);
3863 0 : }
3864 0 : m_freem(sc->tx_mbuf_ptr[i]);
3865 0 : sc->tx_mbuf_ptr[i] = NULL;
3866 : DBRUNIF(1, sc->tx_mbuf_alloc--);
3867 0 : }
3868 : }
3869 :
3870 : /* Clear each TX chain page. */
3871 0 : for (i = 0; i < TX_PAGES; i++)
3872 0 : bzero(sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3873 :
3874 0 : sc->used_tx_bd = 0;
3875 :
3876 : /* Check if we lost any mbufs in the process. */
3877 : DBRUNIF((sc->tx_mbuf_alloc),
3878 : printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3879 : sc->tx_mbuf_alloc));
3880 :
3881 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3882 0 : }
3883 :
3884 : /****************************************************************************/
3885 : /* Initialize the RX context memory. */
3886 : /* */
3887 : /* Returns: */
3888 : /* Nothing */
3889 : /****************************************************************************/
3890 : void
3891 0 : bnx_init_rx_context(struct bnx_softc *sc)
3892 : {
3893 : u_int32_t val;
3894 :
3895 : /* Initialize the context ID for an L2 RX chain. */
3896 : val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3897 : BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3898 :
3899 : /*
3900 : * Set the level for generating pause frames
3901 : * when the number of available rx_bd's gets
3902 : * too low (the low watermark) and the level
3903 : * when pause frames can be stopped (the high
3904 : * watermark).
3905 : */
3906 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3907 : u_int32_t lo_water, hi_water;
3908 :
3909 : lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3910 : hi_water = USABLE_RX_BD / 4;
3911 :
3912 : lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
3913 : hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
3914 :
3915 0 : if (hi_water > 0xf)
3916 0 : hi_water = 0xf;
3917 0 : else if (hi_water == 0)
3918 0 : lo_water = 0;
3919 :
3920 0 : val |= (lo_water << BNX_L2CTX_RX_LO_WATER_MARK_SHIFT) |
3921 0 : (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
3922 0 : }
3923 :
3924 0 : CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3925 :
3926 : /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
3927 0 : if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3928 0 : val = REG_RD(sc, BNX_MQ_MAP_L2_5);
3929 0 : REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
3930 0 : }
3931 :
3932 : /* Point the hardware to the first page in the chain. */
3933 0 : val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3934 0 : CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3935 0 : val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3936 0 : CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3937 0 : }
3938 :
3939 : /****************************************************************************/
3940 : /* Add mbufs to the RX chain until its full or an mbuf allocation error */
3941 : /* occurs. */
3942 : /* */
3943 : /* Returns: */
3944 : /* Nothing */
3945 : /****************************************************************************/
3946 : int
3947 0 : bnx_fill_rx_chain(struct bnx_softc *sc)
3948 : {
3949 0 : u_int16_t prod, chain_prod;
3950 0 : u_int32_t prod_bseq;
3951 : u_int slots, used;
3952 : int ndesc = 0;
3953 : #ifdef BNX_DEBUG
3954 : int rx_mbuf_alloc_before;
3955 : #endif
3956 :
3957 : DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3958 :
3959 0 : prod = sc->rx_prod;
3960 0 : prod_bseq = sc->rx_prod_bseq;
3961 :
3962 : #ifdef BNX_DEBUG
3963 : rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3964 : #endif
3965 :
3966 : /* Keep filling the RX chain until it's full. */
3967 0 : slots = if_rxr_get(&sc->rx_ring, sc->max_rx_bd);
3968 0 : while (slots > 0) {
3969 0 : chain_prod = RX_CHAIN_IDX(prod);
3970 :
3971 0 : used = bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
3972 0 : if (used == 0) {
3973 : /* Bail out if we can't add an mbuf to the chain. */
3974 : break;
3975 : }
3976 0 : slots -= used;
3977 :
3978 0 : prod = NEXT_RX_BD(prod);
3979 0 : ndesc++;
3980 : }
3981 0 : if_rxr_put(&sc->rx_ring, slots);
3982 :
3983 : /* Save the RX chain producer index. */
3984 0 : sc->rx_prod = prod;
3985 0 : sc->rx_prod_bseq = prod_bseq;
3986 :
3987 : /* Tell the chip about the waiting rx_bd's. */
3988 0 : REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3989 0 : REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3990 :
3991 : DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
3992 :
3993 0 : return (ndesc);
3994 0 : }
3995 :
3996 : /****************************************************************************/
3997 : /* Allocate memory and initialize the RX data structures. */
3998 : /* */
3999 : /* Returns: */
4000 : /* 0 for success, positive value for failure. */
4001 : /****************************************************************************/
4002 : int
4003 0 : bnx_init_rx_chain(struct bnx_softc *sc)
4004 : {
4005 : struct rx_bd *rxbd;
4006 : int i, rc = 0;
4007 : u_int32_t addr;
4008 :
4009 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4010 :
4011 : /* Initialize the RX producer and consumer indices. */
4012 0 : sc->rx_prod = 0;
4013 0 : sc->rx_cons = 0;
4014 0 : sc->rx_prod_bseq = 0;
4015 0 : sc->max_rx_bd = USABLE_RX_BD;
4016 : DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4017 : DBRUNIF(1, sc->rx_empty_count = 0);
4018 :
4019 : /* Initialize the RX next pointer chain entries. */
4020 0 : for (i = 0; i < RX_PAGES; i++) {
4021 : int j;
4022 :
4023 0 : rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4024 :
4025 : /* Check if we've reached the last page. */
4026 0 : if (i == (RX_PAGES - 1))
4027 0 : j = 0;
4028 : else
4029 0 : j = i + 1;
4030 :
4031 : /* Setup the chain page pointers. */
4032 0 : addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
4033 0 : rxbd->rx_bd_haddr_hi = addr;
4034 0 : addr = (u_int32_t)sc->rx_bd_chain_paddr[j];
4035 0 : rxbd->rx_bd_haddr_lo = addr;
4036 : }
4037 :
4038 0 : if_rxr_init(&sc->rx_ring, 16, sc->max_rx_bd);
4039 :
4040 : /* Fill up the RX chain. */
4041 0 : bnx_fill_rx_chain(sc);
4042 :
4043 0 : for (i = 0; i < RX_PAGES; i++)
4044 0 : bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4045 : sc->rx_bd_chain_map[i]->dm_mapsize,
4046 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4047 :
4048 0 : bnx_init_rx_context(sc);
4049 :
4050 : DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4051 :
4052 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4053 :
4054 0 : return(rc);
4055 : }
4056 :
4057 : /****************************************************************************/
4058 : /* Free memory and clear the RX data structures. */
4059 : /* */
4060 : /* Returns: */
4061 : /* Nothing. */
4062 : /****************************************************************************/
4063 : void
4064 0 : bnx_free_rx_chain(struct bnx_softc *sc)
4065 : {
4066 : int i;
4067 : #ifdef BNX_DEBUG
4068 : int rx_mbuf_alloc_before;
4069 : #endif
4070 :
4071 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4072 :
4073 : #ifdef BNX_DEBUG
4074 : rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4075 : #endif
4076 :
4077 : /* Free any mbufs still in the RX mbuf chain. */
4078 0 : for (i = 0; i < TOTAL_RX_BD; i++) {
4079 0 : if (sc->rx_mbuf_ptr[i] != NULL) {
4080 0 : if (sc->rx_mbuf_map[i] != NULL) {
4081 0 : bus_dmamap_sync(sc->bnx_dmatag,
4082 : sc->rx_mbuf_map[i], 0,
4083 : sc->rx_mbuf_map[i]->dm_mapsize,
4084 : BUS_DMASYNC_POSTREAD);
4085 0 : bus_dmamap_unload(sc->bnx_dmatag,
4086 : sc->rx_mbuf_map[i]);
4087 0 : }
4088 0 : m_freem(sc->rx_mbuf_ptr[i]);
4089 0 : sc->rx_mbuf_ptr[i] = NULL;
4090 : DBRUNIF(1, sc->rx_mbuf_alloc--);
4091 0 : }
4092 : }
4093 :
4094 : DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
4095 : BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
4096 : __FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
4097 :
4098 : /* Clear each RX chain page. */
4099 0 : for (i = 0; i < RX_PAGES; i++)
4100 0 : bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
4101 :
4102 : /* Check if we lost any mbufs in the process. */
4103 : DBRUNIF((sc->rx_mbuf_alloc),
4104 : printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
4105 : sc->rx_mbuf_alloc));
4106 :
4107 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4108 0 : }
4109 :
4110 : void
4111 0 : bnx_rxrefill(void *xsc)
4112 : {
4113 0 : struct bnx_softc *sc = xsc;
4114 : int s;
4115 :
4116 0 : s = splnet();
4117 0 : if (!bnx_fill_rx_chain(sc))
4118 0 : timeout_add(&sc->bnx_rxrefill, 1);
4119 0 : splx(s);
4120 0 : }
4121 :
4122 : /****************************************************************************/
4123 : /* Set media options. */
4124 : /* */
4125 : /* Returns: */
4126 : /* 0 for success, positive value for failure. */
4127 : /****************************************************************************/
4128 : int
4129 0 : bnx_ifmedia_upd(struct ifnet *ifp)
4130 : {
4131 : struct bnx_softc *sc;
4132 : struct mii_data *mii;
4133 : int rc = 0;
4134 :
4135 0 : sc = ifp->if_softc;
4136 :
4137 0 : mii = &sc->bnx_mii;
4138 0 : sc->bnx_link = 0;
4139 0 : if (mii->mii_instance) {
4140 : struct mii_softc *miisc;
4141 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4142 0 : mii_phy_reset(miisc);
4143 0 : }
4144 0 : mii_mediachg(mii);
4145 :
4146 0 : return(rc);
4147 : }
4148 :
4149 : /****************************************************************************/
4150 : /* Reports current media status. */
4151 : /* */
4152 : /* Returns: */
4153 : /* Nothing. */
4154 : /****************************************************************************/
4155 : void
4156 0 : bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4157 : {
4158 : struct bnx_softc *sc;
4159 : struct mii_data *mii;
4160 : int s;
4161 :
4162 0 : sc = ifp->if_softc;
4163 :
4164 0 : s = splnet();
4165 :
4166 0 : mii = &sc->bnx_mii;
4167 :
4168 0 : mii_pollstat(mii);
4169 0 : ifmr->ifm_status = mii->mii_media_status;
4170 0 : ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4171 0 : sc->bnx_flowflags;
4172 :
4173 0 : splx(s);
4174 0 : }
4175 :
4176 : /****************************************************************************/
4177 : /* Handles PHY generated interrupt events. */
4178 : /* */
4179 : /* Returns: */
4180 : /* Nothing. */
4181 : /****************************************************************************/
4182 : void
4183 0 : bnx_phy_intr(struct bnx_softc *sc)
4184 : {
4185 : u_int32_t new_link_state, old_link_state;
4186 :
4187 0 : new_link_state = sc->status_block->status_attn_bits &
4188 : STATUS_ATTN_BITS_LINK_STATE;
4189 0 : old_link_state = sc->status_block->status_attn_bits_ack &
4190 : STATUS_ATTN_BITS_LINK_STATE;
4191 :
4192 : /* Handle any changes if the link state has changed. */
4193 0 : if (new_link_state != old_link_state) {
4194 : DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4195 :
4196 0 : sc->bnx_link = 0;
4197 0 : timeout_del(&sc->bnx_timeout);
4198 0 : bnx_tick(sc);
4199 :
4200 : /* Update the status_attn_bits_ack field in the status block. */
4201 0 : if (new_link_state) {
4202 0 : REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4203 : STATUS_ATTN_BITS_LINK_STATE);
4204 : DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4205 0 : } else {
4206 0 : REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4207 : STATUS_ATTN_BITS_LINK_STATE);
4208 : DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4209 : }
4210 : }
4211 :
4212 : /* Acknowledge the link change interrupt. */
4213 0 : REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4214 0 : }
4215 :
4216 : /****************************************************************************/
4217 : /* Handles received frame interrupt events. */
4218 : /* */
4219 : /* Returns: */
4220 : /* Nothing. */
4221 : /****************************************************************************/
4222 : void
4223 0 : bnx_rx_intr(struct bnx_softc *sc)
4224 : {
4225 0 : struct status_block *sblk = sc->status_block;
4226 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
4227 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
4228 : u_int16_t hw_cons, sw_cons, sw_chain_cons;
4229 : u_int16_t sw_prod, sw_chain_prod;
4230 : u_int32_t sw_prod_bseq;
4231 : struct l2_fhdr *l2fhdr;
4232 : int i;
4233 :
4234 : DBRUNIF(1, sc->rx_interrupts++);
4235 :
4236 0 : if (if_rxr_inuse(&sc->rx_ring) == 0)
4237 0 : return;
4238 :
4239 : /* Prepare the RX chain pages to be accessed by the host CPU. */
4240 0 : for (i = 0; i < RX_PAGES; i++)
4241 0 : bus_dmamap_sync(sc->bnx_dmatag,
4242 : sc->rx_bd_chain_map[i], 0,
4243 : sc->rx_bd_chain_map[i]->dm_mapsize,
4244 : BUS_DMASYNC_POSTWRITE);
4245 :
4246 : /* Get the hardware's view of the RX consumer index. */
4247 0 : hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4248 0 : if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4249 0 : hw_cons++;
4250 :
4251 : /* Get working copies of the driver's view of the RX indices. */
4252 0 : sw_cons = sc->rx_cons;
4253 0 : sw_prod = sc->rx_prod;
4254 0 : sw_prod_bseq = sc->rx_prod_bseq;
4255 :
4256 : DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4257 : "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4258 : __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4259 :
4260 : /* Prevent speculative reads from getting ahead of the status block. */
4261 0 : bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4262 : BUS_SPACE_BARRIER_READ);
4263 :
4264 : /*
4265 : * Scan through the receive chain as long
4266 : * as there is work to do.
4267 : */
4268 0 : while (sw_cons != hw_cons) {
4269 : struct mbuf *m;
4270 : struct rx_bd *rxbd;
4271 : unsigned int len;
4272 : u_int32_t status;
4273 :
4274 : /* Clear the mbuf pointer. */
4275 : m = NULL;
4276 :
4277 : /* Convert the producer/consumer indices to an actual
4278 : * rx_bd index.
4279 : */
4280 0 : sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4281 0 : sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4282 :
4283 : /* Get the used rx_bd. */
4284 0 : rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4285 0 : if_rxr_put(&sc->rx_ring, 1);
4286 :
4287 : DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
4288 : bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4289 :
4290 : /* The mbuf is stored with the last rx_bd entry of a packet. */
4291 0 : if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4292 : /* Validate that this is the last rx_bd. */
4293 : DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4294 : printf("%s: Unexpected mbuf found in "
4295 : "rx_bd[0x%04X]!\n", sw_chain_cons);
4296 : bnx_breakpoint(sc));
4297 :
4298 : /* DRC - ToDo: If the received packet is small, say less
4299 : * than 128 bytes, allocate a new mbuf here,
4300 : * copy the data to that mbuf, and recycle
4301 : * the mapped jumbo frame.
4302 : */
4303 :
4304 : /* Unmap the mbuf from DMA space. */
4305 0 : bus_dmamap_sync(sc->bnx_dmatag,
4306 : sc->rx_mbuf_map[sw_chain_cons], 0,
4307 : sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4308 : BUS_DMASYNC_POSTREAD);
4309 0 : bus_dmamap_unload(sc->bnx_dmatag,
4310 : sc->rx_mbuf_map[sw_chain_cons]);
4311 :
4312 : /* Remove the mbuf from RX chain. */
4313 0 : m = sc->rx_mbuf_ptr[sw_chain_cons];
4314 0 : sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4315 :
4316 : /*
4317 : * Frames received on the NetXteme II are prepended
4318 : * with the l2_fhdr structure which provides status
4319 : * information about the received frame (including
4320 : * VLAN tags and checksum info) and are also
4321 : * automatically adjusted to align the IP header
4322 : * (i.e. two null bytes are inserted before the
4323 : * Ethernet header).
4324 : */
4325 0 : l2fhdr = mtod(m, struct l2_fhdr *);
4326 :
4327 0 : len = l2fhdr->l2_fhdr_pkt_len;
4328 0 : status = l2fhdr->l2_fhdr_status;
4329 :
4330 : DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4331 : printf("Simulating l2_fhdr status error.\n");
4332 : status = status | L2_FHDR_ERRORS_PHY_DECODE);
4333 :
4334 : /* Watch for unusual sized frames. */
4335 : DBRUNIF(((len < BNX_MIN_MTU) ||
4336 : (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4337 : printf("%s: Unusual frame size found. "
4338 : "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
4339 : len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4340 :
4341 : bnx_dump_mbuf(sc, m);
4342 : bnx_breakpoint(sc));
4343 :
4344 0 : len -= ETHER_CRC_LEN;
4345 :
4346 : /* Check the received frame for errors. */
4347 0 : if (status & (L2_FHDR_ERRORS_BAD_CRC |
4348 : L2_FHDR_ERRORS_PHY_DECODE |
4349 : L2_FHDR_ERRORS_ALIGNMENT |
4350 : L2_FHDR_ERRORS_TOO_SHORT |
4351 : L2_FHDR_ERRORS_GIANT_FRAME)) {
4352 : /* Log the error and release the mbuf. */
4353 0 : ifp->if_ierrors++;
4354 : DBRUNIF(1, sc->l2fhdr_status_errors++);
4355 :
4356 0 : m_freem(m);
4357 : m = NULL;
4358 0 : goto bnx_rx_int_next_rx;
4359 : }
4360 :
4361 : /* Skip over the l2_fhdr when passing the data up
4362 : * the stack.
4363 : */
4364 0 : m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4365 :
4366 : /* Adjust the pckt length to match the received data. */
4367 0 : m->m_pkthdr.len = m->m_len = len;
4368 :
4369 : DBRUN(BNX_VERBOSE_RECV,
4370 : struct ether_header *eh;
4371 : eh = mtod(m, struct ether_header *);
4372 : printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
4373 : __FUNCTION__, eh->ether_dhost, ":",
4374 : eh->ether_shost, ":", htons(eh->ether_type)));
4375 :
4376 : /* Validate the checksum. */
4377 :
4378 : /* Check for an IP datagram. */
4379 0 : if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4380 : /* Check if the IP checksum is valid. */
4381 0 : if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
4382 0 : == 0)
4383 0 : m->m_pkthdr.csum_flags |=
4384 : M_IPV4_CSUM_IN_OK;
4385 : else
4386 : DBPRINT(sc, BNX_WARN_SEND,
4387 : "%s(): Invalid IP checksum "
4388 : "= 0x%04X!\n",
4389 : __FUNCTION__,
4390 : l2fhdr->l2_fhdr_ip_xsum
4391 : );
4392 : }
4393 :
4394 : /* Check for a valid TCP/UDP frame. */
4395 0 : if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4396 : L2_FHDR_STATUS_UDP_DATAGRAM)) {
4397 : /* Check for a good TCP/UDP checksum. */
4398 0 : if ((status &
4399 : (L2_FHDR_ERRORS_TCP_XSUM |
4400 0 : L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4401 0 : m->m_pkthdr.csum_flags |=
4402 : M_TCP_CSUM_IN_OK |
4403 : M_UDP_CSUM_IN_OK;
4404 0 : } else {
4405 : DBPRINT(sc, BNX_WARN_SEND,
4406 : "%s(): Invalid TCP/UDP "
4407 : "checksum = 0x%04X!\n",
4408 : __FUNCTION__,
4409 : l2fhdr->l2_fhdr_tcp_udp_xsum);
4410 : }
4411 : }
4412 :
4413 : /*
4414 : * If we received a packet with a vlan tag,
4415 : * attach that information to the packet.
4416 : */
4417 0 : if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4418 0 : !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4419 : #if NVLAN > 0
4420 : DBPRINT(sc, BNX_VERBOSE_SEND,
4421 : "%s(): VLAN tag = 0x%04X\n",
4422 : __FUNCTION__,
4423 : l2fhdr->l2_fhdr_vlan_tag);
4424 :
4425 0 : m->m_pkthdr.ether_vtag =
4426 0 : l2fhdr->l2_fhdr_vlan_tag;
4427 0 : m->m_flags |= M_VLANTAG;
4428 : #else
4429 : m_freem(m);
4430 : m = NULL;
4431 : goto bnx_rx_int_next_rx;
4432 : #endif
4433 0 : }
4434 :
4435 : bnx_rx_int_next_rx:
4436 0 : sw_prod = NEXT_RX_BD(sw_prod);
4437 0 : }
4438 :
4439 0 : sw_cons = NEXT_RX_BD(sw_cons);
4440 :
4441 : /* If we have a packet, pass it up the stack */
4442 0 : if (m) {
4443 0 : sc->rx_cons = sw_cons;
4444 :
4445 : DBPRINT(sc, BNX_VERBOSE_RECV,
4446 : "%s(): Passing received frame up.\n", __FUNCTION__);
4447 0 : ml_enqueue(&ml, m);
4448 : DBRUNIF(1, sc->rx_mbuf_alloc--);
4449 :
4450 0 : sw_cons = sc->rx_cons;
4451 0 : }
4452 :
4453 : /* Refresh hw_cons to see if there's new work */
4454 0 : if (sw_cons == hw_cons) {
4455 0 : hw_cons = sc->hw_rx_cons =
4456 0 : sblk->status_rx_quick_consumer_index0;
4457 0 : if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4458 : USABLE_RX_BD_PER_PAGE)
4459 0 : hw_cons++;
4460 : }
4461 :
4462 : /* Prevent speculative reads from getting ahead of
4463 : * the status block.
4464 : */
4465 0 : bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4466 : BUS_SPACE_BARRIER_READ);
4467 : }
4468 :
4469 : /* No new packets to process. Refill the RX chain and exit. */
4470 0 : sc->rx_cons = sw_cons;
4471 0 : if (!bnx_fill_rx_chain(sc))
4472 0 : timeout_add(&sc->bnx_rxrefill, 1);
4473 :
4474 0 : for (i = 0; i < RX_PAGES; i++)
4475 0 : bus_dmamap_sync(sc->bnx_dmatag,
4476 : sc->rx_bd_chain_map[i], 0,
4477 : sc->rx_bd_chain_map[i]->dm_mapsize,
4478 : BUS_DMASYNC_PREWRITE);
4479 :
4480 0 : if_input(ifp, &ml);
4481 :
4482 : DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4483 : "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4484 : __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4485 0 : }
4486 :
4487 : /****************************************************************************/
4488 : /* Handles transmit completion interrupt events. */
4489 : /* */
4490 : /* Returns: */
4491 : /* Nothing. */
4492 : /****************************************************************************/
4493 : void
4494 0 : bnx_tx_intr(struct bnx_softc *sc)
4495 : {
4496 0 : struct status_block *sblk = sc->status_block;
4497 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
4498 : bus_dmamap_t map;
4499 : u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4500 : int freed, used;
4501 :
4502 : DBRUNIF(1, sc->tx_interrupts++);
4503 :
4504 : /* Get the hardware's view of the TX consumer index. */
4505 0 : hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4506 :
4507 : /* Skip to the next entry if this is a chain page pointer. */
4508 0 : if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4509 0 : hw_tx_cons++;
4510 :
4511 0 : sw_tx_cons = sc->tx_cons;
4512 :
4513 : /* Prevent speculative reads from getting ahead of the status block. */
4514 0 : bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4515 : BUS_SPACE_BARRIER_READ);
4516 :
4517 : /* Cycle through any completed TX chain page entries. */
4518 : freed = 0;
4519 0 : while (sw_tx_cons != hw_tx_cons) {
4520 : #ifdef BNX_DEBUG
4521 : struct tx_bd *txbd = NULL;
4522 : #endif
4523 0 : sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4524 :
4525 : DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4526 : "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4527 : __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4528 :
4529 : DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4530 : printf("%s: TX chain consumer out of range! "
4531 : " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4532 : bnx_breakpoint(sc));
4533 :
4534 : DBRUNIF(1, txbd = &sc->tx_bd_chain
4535 : [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4536 :
4537 : DBRUNIF((txbd == NULL),
4538 : printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4539 : sw_tx_chain_cons);
4540 : bnx_breakpoint(sc));
4541 :
4542 : DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4543 : bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4544 :
4545 0 : map = sc->tx_mbuf_map[sw_tx_chain_cons];
4546 0 : if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4547 : /* Validate that this is the last tx_bd. */
4548 : DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4549 : printf("%s: tx_bd END flag not set but "
4550 : "txmbuf == NULL!\n");
4551 : bnx_breakpoint(sc));
4552 :
4553 : DBRUN(BNX_INFO_SEND,
4554 : printf("%s: Unloading map/freeing mbuf "
4555 : "from tx_bd[0x%04X]\n",
4556 : __FUNCTION__, sw_tx_chain_cons));
4557 :
4558 : /* Unmap the mbuf. */
4559 0 : bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4560 : map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4561 0 : bus_dmamap_unload(sc->bnx_dmatag, map);
4562 :
4563 : /* Free the mbuf. */
4564 0 : m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4565 0 : sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4566 0 : }
4567 :
4568 0 : freed++;
4569 0 : sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4570 : }
4571 :
4572 0 : used = atomic_sub_int_nv(&sc->used_tx_bd, freed);
4573 :
4574 0 : sc->tx_cons = sw_tx_cons;
4575 :
4576 : /* Clear the TX timeout timer. */
4577 0 : if (used == 0)
4578 0 : ifp->if_timer = 0;
4579 :
4580 0 : if (ifq_is_oactive(&ifp->if_snd))
4581 0 : ifq_restart(&ifp->if_snd);
4582 0 : }
4583 :
4584 : /****************************************************************************/
4585 : /* Disables interrupt generation. */
4586 : /* */
4587 : /* Returns: */
4588 : /* Nothing. */
4589 : /****************************************************************************/
4590 : void
4591 0 : bnx_disable_intr(struct bnx_softc *sc)
4592 : {
4593 0 : REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4594 0 : REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4595 0 : }
4596 :
4597 : /****************************************************************************/
4598 : /* Enables interrupt generation. */
4599 : /* */
4600 : /* Returns: */
4601 : /* Nothing. */
4602 : /****************************************************************************/
4603 : void
4604 0 : bnx_enable_intr(struct bnx_softc *sc)
4605 : {
4606 : u_int32_t val;
4607 :
4608 0 : REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4609 : BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4610 :
4611 0 : REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4612 : sc->last_status_idx);
4613 :
4614 0 : val = REG_RD(sc, BNX_HC_COMMAND);
4615 0 : REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4616 0 : }
4617 :
4618 : /****************************************************************************/
4619 : /* Handles controller initialization. */
4620 : /* */
4621 : /* Returns: */
4622 : /* Nothing. */
4623 : /****************************************************************************/
4624 : void
4625 0 : bnx_init(void *xsc)
4626 : {
4627 0 : struct bnx_softc *sc = (struct bnx_softc *)xsc;
4628 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
4629 : u_int32_t ether_mtu;
4630 : int s;
4631 :
4632 : DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4633 :
4634 0 : s = splnet();
4635 :
4636 0 : bnx_stop(sc);
4637 :
4638 0 : if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4639 0 : BNX_PRINTF(sc, "Controller reset failed!\n");
4640 0 : goto bnx_init_exit;
4641 : }
4642 :
4643 0 : if (bnx_chipinit(sc)) {
4644 0 : BNX_PRINTF(sc, "Controller initialization failed!\n");
4645 0 : goto bnx_init_exit;
4646 : }
4647 :
4648 0 : if (bnx_blockinit(sc)) {
4649 0 : BNX_PRINTF(sc, "Block initialization failed!\n");
4650 0 : goto bnx_init_exit;
4651 : }
4652 :
4653 : /* Load our MAC address. */
4654 0 : bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4655 0 : bnx_set_mac_addr(sc);
4656 :
4657 : /* Calculate and program the Ethernet MRU size. */
4658 : ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4659 :
4660 : DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4661 : __FUNCTION__, ether_mtu);
4662 :
4663 : /*
4664 : * Program the MRU and enable Jumbo frame
4665 : * support.
4666 : */
4667 0 : REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4668 : BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4669 :
4670 : /* Calculate the RX Ethernet frame size for rx_bd's. */
4671 0 : sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4672 :
4673 : DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4674 : "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4675 : sc->mbuf_alloc_size, sc->max_frame_size);
4676 :
4677 : /* Program appropriate promiscuous/multicast filtering. */
4678 0 : bnx_iff(sc);
4679 :
4680 : /* Init RX buffer descriptor chain. */
4681 0 : bnx_init_rx_chain(sc);
4682 :
4683 : /* Init TX buffer descriptor chain. */
4684 0 : bnx_init_tx_chain(sc);
4685 :
4686 : /* Enable host interrupts. */
4687 0 : bnx_enable_intr(sc);
4688 :
4689 0 : bnx_ifmedia_upd(ifp);
4690 :
4691 0 : ifp->if_flags |= IFF_RUNNING;
4692 0 : ifq_clr_oactive(&ifp->if_snd);
4693 :
4694 0 : timeout_add_sec(&sc->bnx_timeout, 1);
4695 :
4696 : bnx_init_exit:
4697 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4698 :
4699 0 : splx(s);
4700 :
4701 : return;
4702 0 : }
4703 :
4704 : void
4705 0 : bnx_mgmt_init(struct bnx_softc *sc)
4706 : {
4707 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
4708 : u_int32_t val;
4709 :
4710 : /* Check if the driver is still running and bail out if it is. */
4711 0 : if (ifp->if_flags & IFF_RUNNING)
4712 : goto bnx_mgmt_init_exit;
4713 :
4714 : /* Initialize the on-boards CPUs */
4715 0 : bnx_init_cpus(sc);
4716 :
4717 : val = (BCM_PAGE_BITS - 8) << 24;
4718 0 : REG_WR(sc, BNX_RV2P_CONFIG, val);
4719 :
4720 : /* Enable all critical blocks in the MAC. */
4721 0 : REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4722 : BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4723 : BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4724 : BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4725 0 : REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4726 0 : DELAY(20);
4727 :
4728 0 : bnx_ifmedia_upd(ifp);
4729 :
4730 : bnx_mgmt_init_exit:
4731 : DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4732 0 : }
4733 :
4734 : /****************************************************************************/
4735 : /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4736 : /* memory visible to the controller. */
4737 : /* */
4738 : /* Returns: */
4739 : /* 0 for success, positive value for failure. */
4740 : /****************************************************************************/
4741 : int
4742 0 : bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m, int *used)
4743 : {
4744 : bus_dmamap_t map;
4745 : struct tx_bd *txbd = NULL;
4746 : u_int16_t vlan_tag = 0, flags = 0;
4747 : u_int16_t chain_prod, chain_head, prod;
4748 : #ifdef BNX_DEBUG
4749 : u_int16_t debug_prod;
4750 : #endif
4751 : u_int32_t addr, prod_bseq;
4752 : int i, error;
4753 :
4754 : /* Transfer any checksum offload flags to the bd. */
4755 0 : if (m->m_pkthdr.csum_flags) {
4756 0 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4757 0 : flags |= TX_BD_FLAGS_IP_CKSUM;
4758 0 : if (m->m_pkthdr.csum_flags &
4759 : (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
4760 0 : flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4761 : }
4762 :
4763 : #if NVLAN > 0
4764 : /* Transfer any VLAN tags to the bd. */
4765 0 : if (m->m_flags & M_VLANTAG) {
4766 0 : flags |= TX_BD_FLAGS_VLAN_TAG;
4767 0 : vlan_tag = m->m_pkthdr.ether_vtag;
4768 0 : }
4769 : #endif
4770 :
4771 : /* Map the mbuf into DMAable memory. */
4772 0 : prod = sc->tx_prod;
4773 0 : chain_head = chain_prod = TX_CHAIN_IDX(prod);
4774 0 : map = sc->tx_mbuf_map[chain_head];
4775 :
4776 : /* Map the mbuf into our DMA address space. */
4777 0 : error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4778 : BUS_DMA_NOWAIT);
4779 0 : switch (error) {
4780 : case 0:
4781 : break;
4782 :
4783 : case EFBIG:
4784 0 : if ((error = m_defrag(m, M_DONTWAIT)) == 0 &&
4785 0 : (error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4786 0 : BUS_DMA_NOWAIT)) == 0)
4787 : break;
4788 :
4789 : /* FALLTHROUGH */
4790 : default:
4791 0 : sc->tx_dma_map_failures++;
4792 0 : return (ENOBUFS);
4793 : }
4794 :
4795 : /* prod points to an empty tx_bd at this point. */
4796 0 : prod_bseq = sc->tx_prod_bseq;
4797 : #ifdef BNX_DEBUG
4798 : debug_prod = chain_prod;
4799 : #endif
4800 :
4801 : DBPRINT(sc, BNX_INFO_SEND,
4802 : "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4803 : "prod_bseq = 0x%08X\n",
4804 : __FUNCTION__, prod, chain_prod, prod_bseq);
4805 :
4806 : /*
4807 : * Cycle through each mbuf segment that makes up
4808 : * the outgoing frame, gathering the mapping info
4809 : * for that segment and creating a tx_bd for the
4810 : * mbuf.
4811 : */
4812 0 : for (i = 0; i < map->dm_nsegs ; i++) {
4813 0 : chain_prod = TX_CHAIN_IDX(prod);
4814 0 : txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4815 :
4816 0 : addr = (u_int32_t)map->dm_segs[i].ds_addr;
4817 0 : txbd->tx_bd_haddr_lo = addr;
4818 0 : addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4819 0 : txbd->tx_bd_haddr_hi = addr;
4820 0 : txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
4821 0 : txbd->tx_bd_vlan_tag = vlan_tag;
4822 0 : txbd->tx_bd_flags = flags;
4823 0 : prod_bseq += map->dm_segs[i].ds_len;
4824 0 : if (i == 0)
4825 0 : txbd->tx_bd_flags |= TX_BD_FLAGS_START;
4826 0 : prod = NEXT_TX_BD(prod);
4827 : }
4828 :
4829 : /* Set the END flag on the last TX buffer descriptor. */
4830 0 : txbd->tx_bd_flags |= TX_BD_FLAGS_END;
4831 :
4832 : DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4833 : map->dm_nsegs));
4834 :
4835 : DBPRINT(sc, BNX_INFO_SEND,
4836 : "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4837 : "prod_bseq = 0x%08X\n",
4838 : __FUNCTION__, prod, chain_prod, prod_bseq);
4839 :
4840 0 : sc->tx_mbuf_ptr[chain_prod] = m;
4841 0 : sc->tx_mbuf_map[chain_head] = sc->tx_mbuf_map[chain_prod];
4842 0 : sc->tx_mbuf_map[chain_prod] = map;
4843 :
4844 0 : *used += map->dm_nsegs;
4845 :
4846 : DBRUNIF(1, sc->tx_mbuf_alloc++);
4847 :
4848 : DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4849 : map->dm_nsegs));
4850 :
4851 0 : bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4852 : BUS_DMASYNC_PREWRITE);
4853 :
4854 : /* prod points to the next free tx_bd at this point. */
4855 0 : sc->tx_prod = prod;
4856 0 : sc->tx_prod_bseq = prod_bseq;
4857 :
4858 0 : return (0);
4859 0 : }
4860 :
4861 : /****************************************************************************/
4862 : /* Main transmit routine. */
4863 : /* */
4864 : /* Returns: */
4865 : /* Nothing. */
4866 : /****************************************************************************/
4867 : void
4868 0 : bnx_start(struct ifqueue *ifq)
4869 : {
4870 0 : struct ifnet *ifp = ifq->ifq_if;
4871 0 : struct bnx_softc *sc = ifp->if_softc;
4872 : struct mbuf *m_head = NULL;
4873 0 : int used;
4874 : u_int16_t tx_prod, tx_chain_prod;
4875 :
4876 0 : if (!sc->bnx_link) {
4877 0 : ifq_purge(ifq);
4878 0 : goto bnx_start_exit;
4879 : }
4880 :
4881 : /* prod points to the next free tx_bd. */
4882 0 : tx_prod = sc->tx_prod;
4883 0 : tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4884 :
4885 : DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4886 : "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4887 : __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4888 :
4889 : /*
4890 : * Keep adding entries while there is space in the ring.
4891 : */
4892 0 : used = 0;
4893 0 : while (1) {
4894 0 : if (sc->used_tx_bd + used + BNX_MAX_SEGMENTS + 1 >=
4895 0 : sc->max_tx_bd) {
4896 : DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4897 : "business! Total tx_bd used = %d\n",
4898 : sc->used_tx_bd + used);
4899 0 : ifq_set_oactive(ifq);
4900 0 : break;
4901 : }
4902 :
4903 0 : m_head = ifq_dequeue(ifq);
4904 0 : if (m_head == NULL)
4905 : break;
4906 :
4907 0 : if (bnx_tx_encap(sc, m_head, &used)) {
4908 0 : m_freem(m_head);
4909 0 : continue;
4910 : }
4911 :
4912 : #if NBPFILTER > 0
4913 0 : if (ifp->if_bpf)
4914 0 : bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4915 : #endif
4916 : }
4917 :
4918 0 : if (used == 0) {
4919 : /* no packets were dequeued */
4920 : DBPRINT(sc, BNX_VERBOSE_SEND,
4921 : "%s(): No packets were dequeued\n", __FUNCTION__);
4922 : goto bnx_start_exit;
4923 : }
4924 :
4925 : /* Update the driver's counters. */
4926 0 : used = atomic_add_int_nv(&sc->used_tx_bd, used);
4927 :
4928 : /* Update some debug statistics counters */
4929 : DBRUNIF((used > sc->tx_hi_watermark),
4930 : sc->tx_hi_watermark = used);
4931 : DBRUNIF(used == sc->max_tx_bd, sc->tx_full_count++);
4932 :
4933 0 : tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4934 : DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4935 : "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
4936 : tx_chain_prod, sc->tx_prod_bseq);
4937 :
4938 : /* Start the transmit. */
4939 0 : REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4940 0 : REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4941 :
4942 : /* Set the tx timeout. */
4943 0 : ifp->if_timer = BNX_TX_TIMEOUT;
4944 :
4945 : bnx_start_exit:
4946 : return;
4947 0 : }
4948 :
4949 : /****************************************************************************/
4950 : /* Handles any IOCTL calls from the operating system. */
4951 : /* */
4952 : /* Returns: */
4953 : /* 0 for success, positive value for failure. */
4954 : /****************************************************************************/
4955 : int
4956 0 : bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4957 : {
4958 0 : struct bnx_softc *sc = ifp->if_softc;
4959 0 : struct ifreq *ifr = (struct ifreq *) data;
4960 0 : struct mii_data *mii = &sc->bnx_mii;
4961 : int s, error = 0;
4962 :
4963 0 : s = splnet();
4964 :
4965 0 : switch (command) {
4966 : case SIOCSIFADDR:
4967 0 : ifp->if_flags |= IFF_UP;
4968 0 : if (!(ifp->if_flags & IFF_RUNNING))
4969 0 : bnx_init(sc);
4970 : break;
4971 :
4972 : case SIOCSIFFLAGS:
4973 0 : if (ifp->if_flags & IFF_UP) {
4974 0 : if (ifp->if_flags & IFF_RUNNING)
4975 0 : error = ENETRESET;
4976 : else
4977 0 : bnx_init(sc);
4978 : } else {
4979 0 : if (ifp->if_flags & IFF_RUNNING)
4980 0 : bnx_stop(sc);
4981 : }
4982 : break;
4983 :
4984 : case SIOCSIFMEDIA:
4985 : /* Flow control requires full-duplex mode. */
4986 0 : if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4987 0 : (ifr->ifr_media & IFM_FDX) == 0)
4988 0 : ifr->ifr_media &= ~IFM_ETH_FMASK;
4989 :
4990 0 : if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4991 0 : if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4992 : /* We can do both TXPAUSE and RXPAUSE. */
4993 0 : ifr->ifr_media |=
4994 : IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4995 0 : }
4996 0 : sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4997 0 : }
4998 : /* FALLTHROUGH */
4999 : case SIOCGIFMEDIA:
5000 : DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5001 : sc->bnx_phy_flags);
5002 :
5003 0 : error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5004 0 : break;
5005 :
5006 : case SIOCGIFRXR:
5007 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
5008 0 : NULL, MCLBYTES, &sc->rx_ring);
5009 0 : break;
5010 :
5011 : default:
5012 0 : error = ether_ioctl(ifp, &sc->arpcom, command, data);
5013 0 : }
5014 :
5015 0 : if (error == ENETRESET) {
5016 0 : if (ifp->if_flags & IFF_RUNNING)
5017 0 : bnx_iff(sc);
5018 : error = 0;
5019 0 : }
5020 :
5021 0 : splx(s);
5022 0 : return (error);
5023 : }
5024 :
5025 : /****************************************************************************/
5026 : /* Transmit timeout handler. */
5027 : /* */
5028 : /* Returns: */
5029 : /* Nothing. */
5030 : /****************************************************************************/
5031 : void
5032 0 : bnx_watchdog(struct ifnet *ifp)
5033 : {
5034 0 : struct bnx_softc *sc = ifp->if_softc;
5035 :
5036 : DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5037 : bnx_dump_status_block(sc));
5038 :
5039 : /*
5040 : * If we are in this routine because of pause frames, then
5041 : * don't reset the hardware.
5042 : */
5043 0 : if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5044 0 : return;
5045 :
5046 0 : printf("%s: Watchdog timeout occurred, resetting!\n",
5047 0 : ifp->if_xname);
5048 :
5049 : /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5050 :
5051 0 : bnx_init(sc);
5052 :
5053 0 : ifp->if_oerrors++;
5054 0 : }
5055 :
5056 : /*
5057 : * Interrupt handler.
5058 : */
5059 : /****************************************************************************/
5060 : /* Main interrupt entry point. Verifies that the controller generated the */
5061 : /* interrupt and then calls a separate routine for handle the various */
5062 : /* interrupt causes (PHY, TX, RX). */
5063 : /* */
5064 : /* Returns: */
5065 : /* 0 for success, positive value for failure. */
5066 : /****************************************************************************/
5067 : int
5068 0 : bnx_intr(void *xsc)
5069 : {
5070 0 : struct bnx_softc *sc = xsc;
5071 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
5072 : u_int32_t status_attn_bits;
5073 : u_int16_t status_idx;
5074 : int rv = 0;
5075 :
5076 0 : if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
5077 0 : return (0);
5078 :
5079 : DBRUNIF(1, sc->interrupts_generated++);
5080 :
5081 0 : bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5082 : sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
5083 :
5084 : /*
5085 : * If the hardware status block index
5086 : * matches the last value read by the
5087 : * driver and we haven't asserted our
5088 : * interrupt then there's nothing to do.
5089 : */
5090 0 : status_idx = sc->status_block->status_idx;
5091 0 : if (status_idx != sc->last_status_idx ||
5092 0 : !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS),
5093 : BNX_PCICFG_MISC_STATUS_INTA_VALUE)) {
5094 : rv = 1;
5095 :
5096 : /* Ack the interrupt */
5097 0 : REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5098 : BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx);
5099 :
5100 0 : status_attn_bits = sc->status_block->status_attn_bits;
5101 :
5102 : DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5103 : printf("Simulating unexpected status attention bit set.");
5104 : status_attn_bits = status_attn_bits |
5105 : STATUS_ATTN_BITS_PARITY_ERROR);
5106 :
5107 : /* Was it a link change interrupt? */
5108 0 : if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5109 0 : (sc->status_block->status_attn_bits_ack &
5110 : STATUS_ATTN_BITS_LINK_STATE)) {
5111 0 : KERNEL_LOCK();
5112 0 : bnx_phy_intr(sc);
5113 0 : KERNEL_UNLOCK();
5114 0 : }
5115 :
5116 : /* If any other attention is asserted then the chip is toast. */
5117 0 : if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5118 0 : (sc->status_block->status_attn_bits_ack &
5119 : ~STATUS_ATTN_BITS_LINK_STATE))) {
5120 0 : KERNEL_LOCK();
5121 : DBRUN(1, sc->unexpected_attentions++);
5122 :
5123 0 : BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5124 : sc->status_block->status_attn_bits);
5125 :
5126 : DBRUN(BNX_FATAL,
5127 : if (bnx_debug_unexpected_attention == 0)
5128 : bnx_breakpoint(sc));
5129 :
5130 0 : bnx_init(sc);
5131 0 : KERNEL_UNLOCK();
5132 0 : goto out;
5133 : }
5134 :
5135 : /* Check for any completed RX frames. */
5136 0 : if (sc->status_block->status_rx_quick_consumer_index0 !=
5137 0 : sc->hw_rx_cons)
5138 0 : bnx_rx_intr(sc);
5139 :
5140 : /* Check for any completed TX frames. */
5141 0 : if (sc->status_block->status_tx_quick_consumer_index0 !=
5142 0 : sc->hw_tx_cons)
5143 0 : bnx_tx_intr(sc);
5144 :
5145 : /*
5146 : * Save the status block index value for use during the
5147 : * next interrupt.
5148 : */
5149 0 : sc->last_status_idx = status_idx;
5150 :
5151 : /* Start moving packets again */
5152 0 : if (ifp->if_flags & IFF_RUNNING &&
5153 0 : !IFQ_IS_EMPTY(&ifp->if_snd))
5154 0 : ifq_start(&ifp->if_snd);
5155 : }
5156 :
5157 : out:
5158 0 : bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5159 : sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5160 :
5161 0 : return (rv);
5162 0 : }
5163 :
5164 : /****************************************************************************/
5165 : /* Programs the various packet receive modes (broadcast and multicast). */
5166 : /* */
5167 : /* Returns: */
5168 : /* Nothing. */
5169 : /****************************************************************************/
5170 : void
5171 0 : bnx_iff(struct bnx_softc *sc)
5172 : {
5173 0 : struct arpcom *ac = &sc->arpcom;
5174 0 : struct ifnet *ifp = &ac->ac_if;
5175 : struct ether_multi *enm;
5176 : struct ether_multistep step;
5177 0 : u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5178 : u_int32_t rx_mode, sort_mode;
5179 : int h, i;
5180 :
5181 : /* Initialize receive mode default settings. */
5182 0 : rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5183 : BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5184 : sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5185 0 : ifp->if_flags &= ~IFF_ALLMULTI;
5186 :
5187 : /*
5188 : * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5189 : * be enbled.
5190 : */
5191 0 : if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) &&
5192 0 : (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)))
5193 0 : rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5194 :
5195 : /*
5196 : * Check for promiscuous, all multicast, or selected
5197 : * multicast address filtering.
5198 : */
5199 0 : if (ifp->if_flags & IFF_PROMISC) {
5200 : DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5201 :
5202 0 : ifp->if_flags |= IFF_ALLMULTI;
5203 : /* Enable promiscuous mode. */
5204 0 : rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5205 : sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5206 0 : } else if (ac->ac_multirangecnt > 0) {
5207 : DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5208 :
5209 0 : ifp->if_flags |= IFF_ALLMULTI;
5210 : /* Enable all multicast addresses. */
5211 0 : for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5212 0 : REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5213 : 0xffffffff);
5214 : sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5215 0 : } else {
5216 : /* Accept one or more multicast(s). */
5217 : DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5218 :
5219 0 : ETHER_FIRST_MULTI(step, ac, enm);
5220 0 : while (enm != NULL) {
5221 0 : h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5222 : 0xFF;
5223 :
5224 0 : hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5225 :
5226 0 : ETHER_NEXT_MULTI(step, enm);
5227 : }
5228 :
5229 0 : for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5230 0 : REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5231 : hashes[i]);
5232 :
5233 : sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5234 : }
5235 :
5236 : /* Only make changes if the recive mode has actually changed. */
5237 0 : if (rx_mode != sc->rx_mode) {
5238 : DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5239 : rx_mode);
5240 :
5241 0 : sc->rx_mode = rx_mode;
5242 0 : REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5243 0 : }
5244 :
5245 : /* Disable and clear the exisitng sort before enabling a new sort. */
5246 0 : REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5247 0 : REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5248 0 : REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5249 0 : }
5250 :
5251 : /****************************************************************************/
5252 : /* Called periodically to updates statistics from the controllers */
5253 : /* statistics block. */
5254 : /* */
5255 : /* Returns: */
5256 : /* Nothing. */
5257 : /****************************************************************************/
5258 : void
5259 0 : bnx_stats_update(struct bnx_softc *sc)
5260 : {
5261 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
5262 : struct statistics_block *stats;
5263 :
5264 : DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5265 :
5266 0 : stats = (struct statistics_block *)sc->stats_block;
5267 :
5268 : /*
5269 : * Update the interface statistics from the
5270 : * hardware statistics.
5271 : */
5272 0 : ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5273 :
5274 0 : ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5275 0 : (u_long)stats->stat_EtherStatsOverrsizePkts +
5276 0 : (u_long)stats->stat_IfInMBUFDiscards +
5277 0 : (u_long)stats->stat_Dot3StatsAlignmentErrors +
5278 0 : (u_long)stats->stat_Dot3StatsFCSErrors;
5279 :
5280 0 : ifp->if_oerrors = (u_long)
5281 0 : stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5282 0 : (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5283 0 : (u_long)stats->stat_Dot3StatsLateCollisions;
5284 :
5285 : /*
5286 : * Certain controllers don't report
5287 : * carrier sense errors correctly.
5288 : * See errata E11_5708CA0_1165.
5289 : */
5290 0 : if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5291 0 : !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5292 0 : ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5293 :
5294 : /*
5295 : * Update the sysctl statistics from the
5296 : * hardware statistics.
5297 : */
5298 0 : sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
5299 0 : (u_int64_t) stats->stat_IfHCInOctets_lo;
5300 :
5301 0 : sc->stat_IfHCInBadOctets =
5302 0 : ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5303 0 : (u_int64_t) stats->stat_IfHCInBadOctets_lo;
5304 :
5305 0 : sc->stat_IfHCOutOctets =
5306 0 : ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
5307 0 : (u_int64_t) stats->stat_IfHCOutOctets_lo;
5308 :
5309 0 : sc->stat_IfHCOutBadOctets =
5310 0 : ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5311 0 : (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
5312 :
5313 0 : sc->stat_IfHCInUcastPkts =
5314 0 : ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5315 0 : (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
5316 :
5317 0 : sc->stat_IfHCInMulticastPkts =
5318 0 : ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5319 0 : (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
5320 :
5321 0 : sc->stat_IfHCInBroadcastPkts =
5322 0 : ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5323 0 : (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
5324 :
5325 0 : sc->stat_IfHCOutUcastPkts =
5326 0 : ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5327 0 : (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
5328 :
5329 0 : sc->stat_IfHCOutMulticastPkts =
5330 0 : ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5331 0 : (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5332 :
5333 0 : sc->stat_IfHCOutBroadcastPkts =
5334 0 : ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5335 0 : (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5336 :
5337 0 : sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5338 0 : stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5339 :
5340 0 : sc->stat_Dot3StatsCarrierSenseErrors =
5341 0 : stats->stat_Dot3StatsCarrierSenseErrors;
5342 :
5343 0 : sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5344 :
5345 0 : sc->stat_Dot3StatsAlignmentErrors =
5346 0 : stats->stat_Dot3StatsAlignmentErrors;
5347 :
5348 0 : sc->stat_Dot3StatsSingleCollisionFrames =
5349 0 : stats->stat_Dot3StatsSingleCollisionFrames;
5350 :
5351 0 : sc->stat_Dot3StatsMultipleCollisionFrames =
5352 0 : stats->stat_Dot3StatsMultipleCollisionFrames;
5353 :
5354 0 : sc->stat_Dot3StatsDeferredTransmissions =
5355 0 : stats->stat_Dot3StatsDeferredTransmissions;
5356 :
5357 0 : sc->stat_Dot3StatsExcessiveCollisions =
5358 0 : stats->stat_Dot3StatsExcessiveCollisions;
5359 :
5360 0 : sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5361 :
5362 0 : sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5363 :
5364 0 : sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5365 :
5366 0 : sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5367 :
5368 0 : sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5369 :
5370 0 : sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5371 :
5372 0 : sc->stat_EtherStatsPktsRx64Octets =
5373 0 : stats->stat_EtherStatsPktsRx64Octets;
5374 :
5375 0 : sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5376 0 : stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5377 :
5378 0 : sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5379 0 : stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5380 :
5381 0 : sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5382 0 : stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5383 :
5384 0 : sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5385 0 : stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5386 :
5387 0 : sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5388 0 : stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5389 :
5390 0 : sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5391 0 : stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5392 :
5393 0 : sc->stat_EtherStatsPktsTx64Octets =
5394 0 : stats->stat_EtherStatsPktsTx64Octets;
5395 :
5396 0 : sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5397 0 : stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5398 :
5399 0 : sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5400 0 : stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5401 :
5402 0 : sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5403 0 : stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5404 :
5405 0 : sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5406 0 : stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5407 :
5408 0 : sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5409 0 : stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5410 :
5411 0 : sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5412 0 : stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5413 :
5414 0 : sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5415 :
5416 0 : sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5417 :
5418 0 : sc->stat_OutXonSent = stats->stat_OutXonSent;
5419 :
5420 0 : sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5421 :
5422 0 : sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5423 :
5424 0 : sc->stat_MacControlFramesReceived =
5425 0 : stats->stat_MacControlFramesReceived;
5426 :
5427 0 : sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5428 :
5429 0 : sc->stat_IfInFramesL2FilterDiscards =
5430 0 : stats->stat_IfInFramesL2FilterDiscards;
5431 :
5432 0 : sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5433 :
5434 0 : sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5435 :
5436 0 : sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5437 :
5438 0 : sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5439 :
5440 0 : sc->stat_CatchupInRuleCheckerDiscards =
5441 0 : stats->stat_CatchupInRuleCheckerDiscards;
5442 :
5443 0 : sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5444 :
5445 0 : sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5446 :
5447 0 : sc->stat_CatchupInRuleCheckerP4Hit =
5448 0 : stats->stat_CatchupInRuleCheckerP4Hit;
5449 :
5450 : DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5451 0 : }
5452 :
5453 : void
5454 0 : bnx_tick(void *xsc)
5455 : {
5456 0 : struct bnx_softc *sc = xsc;
5457 0 : struct ifnet *ifp = &sc->arpcom.ac_if;
5458 : struct mii_data *mii = NULL;
5459 : u_int32_t msg;
5460 :
5461 : /* Tell the firmware that the driver is still running. */
5462 : #ifdef BNX_DEBUG
5463 : msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5464 : #else
5465 0 : msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5466 : #endif
5467 0 : REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5468 :
5469 : /* Update the statistics from the hardware statistics block. */
5470 0 : bnx_stats_update(sc);
5471 :
5472 : /* Schedule the next tick. */
5473 0 : timeout_add_sec(&sc->bnx_timeout, 1);
5474 :
5475 : /* If link is up already up then we're done. */
5476 0 : if (sc->bnx_link)
5477 : goto bnx_tick_exit;
5478 :
5479 0 : mii = &sc->bnx_mii;
5480 0 : mii_tick(mii);
5481 :
5482 : /* Check if the link has come up. */
5483 0 : if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5484 0 : IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5485 0 : sc->bnx_link++;
5486 : /* Now that link is up, handle any outstanding TX traffic. */
5487 0 : if (!ifq_empty(&ifp->if_snd))
5488 0 : ifq_start(&ifp->if_snd);
5489 : }
5490 :
5491 : bnx_tick_exit:
5492 : return;
5493 0 : }
5494 :
5495 : /****************************************************************************/
5496 : /* BNX Debug Routines */
5497 : /****************************************************************************/
5498 : #ifdef BNX_DEBUG
5499 :
5500 : /****************************************************************************/
5501 : /* Prints out information about an mbuf. */
5502 : /* */
5503 : /* Returns: */
5504 : /* Nothing. */
5505 : /****************************************************************************/
5506 : void
5507 : bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5508 : {
5509 : struct mbuf *mp = m;
5510 :
5511 : if (m == NULL) {
5512 : /* Index out of range. */
5513 : printf("mbuf ptr is null!\n");
5514 : return;
5515 : }
5516 :
5517 : while (mp) {
5518 : printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5519 : mp, mp->m_len);
5520 :
5521 : if (mp->m_flags & M_EXT)
5522 : printf("M_EXT ");
5523 : if (mp->m_flags & M_PKTHDR)
5524 : printf("M_PKTHDR ");
5525 : printf("\n");
5526 :
5527 : if (mp->m_flags & M_EXT)
5528 : printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5529 : mp, mp->m_ext.ext_size);
5530 :
5531 : mp = mp->m_next;
5532 : }
5533 : }
5534 :
5535 : /****************************************************************************/
5536 : /* Prints out the mbufs in the TX mbuf chain. */
5537 : /* */
5538 : /* Returns: */
5539 : /* Nothing. */
5540 : /****************************************************************************/
5541 : void
5542 : bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5543 : {
5544 : struct mbuf *m;
5545 : int i;
5546 :
5547 : BNX_PRINTF(sc,
5548 : "----------------------------"
5549 : " tx mbuf data "
5550 : "----------------------------\n");
5551 :
5552 : for (i = 0; i < count; i++) {
5553 : m = sc->tx_mbuf_ptr[chain_prod];
5554 : BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5555 : bnx_dump_mbuf(sc, m);
5556 : chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5557 : }
5558 :
5559 : BNX_PRINTF(sc,
5560 : "--------------------------------------------"
5561 : "----------------------------\n");
5562 : }
5563 :
5564 : /*
5565 : * This routine prints the RX mbuf chain.
5566 : */
5567 : void
5568 : bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5569 : {
5570 : struct mbuf *m;
5571 : int i;
5572 :
5573 : BNX_PRINTF(sc,
5574 : "----------------------------"
5575 : " rx mbuf data "
5576 : "----------------------------\n");
5577 :
5578 : for (i = 0; i < count; i++) {
5579 : m = sc->rx_mbuf_ptr[chain_prod];
5580 : BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5581 : bnx_dump_mbuf(sc, m);
5582 : chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5583 : }
5584 :
5585 :
5586 : BNX_PRINTF(sc,
5587 : "--------------------------------------------"
5588 : "----------------------------\n");
5589 : }
5590 :
5591 : void
5592 : bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5593 : {
5594 : if (idx > MAX_TX_BD)
5595 : /* Index out of range. */
5596 : BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5597 : else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5598 : /* TX Chain page pointer. */
5599 : BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5600 : "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5601 : txbd->tx_bd_haddr_lo);
5602 : else
5603 : /* Normal tx_bd entry. */
5604 : BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5605 : "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5606 : txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5607 : txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5608 : txbd->tx_bd_flags);
5609 : }
5610 :
5611 : void
5612 : bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5613 : {
5614 : if (idx > MAX_RX_BD)
5615 : /* Index out of range. */
5616 : BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5617 : else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5618 : /* TX Chain page pointer. */
5619 : BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5620 : "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5621 : rxbd->rx_bd_haddr_lo);
5622 : else
5623 : /* Normal tx_bd entry. */
5624 : BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5625 : "0x%08X, flags = 0x%08X\n", idx,
5626 : rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5627 : rxbd->rx_bd_len, rxbd->rx_bd_flags);
5628 : }
5629 :
5630 : void
5631 : bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5632 : {
5633 : BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5634 : "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5635 : "tcp_udp_xsum = 0x%04X\n", idx,
5636 : l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5637 : l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5638 : l2fhdr->l2_fhdr_tcp_udp_xsum);
5639 : }
5640 :
5641 : /*
5642 : * This routine prints the TX chain.
5643 : */
5644 : void
5645 : bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5646 : {
5647 : struct tx_bd *txbd;
5648 : int i;
5649 :
5650 : /* First some info about the tx_bd chain structure. */
5651 : BNX_PRINTF(sc,
5652 : "----------------------------"
5653 : " tx_bd chain "
5654 : "----------------------------\n");
5655 :
5656 : BNX_PRINTF(sc,
5657 : "page size = 0x%08X, tx chain pages = 0x%08X\n",
5658 : (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5659 :
5660 : BNX_PRINTF(sc,
5661 : "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5662 : (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5663 :
5664 : BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5665 :
5666 : BNX_PRINTF(sc, ""
5667 : "-----------------------------"
5668 : " tx_bd data "
5669 : "-----------------------------\n");
5670 :
5671 : /* Now print out the tx_bd's themselves. */
5672 : for (i = 0; i < count; i++) {
5673 : txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5674 : bnx_dump_txbd(sc, tx_prod, txbd);
5675 : tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5676 : }
5677 :
5678 : BNX_PRINTF(sc,
5679 : "-----------------------------"
5680 : "--------------"
5681 : "-----------------------------\n");
5682 : }
5683 :
5684 : /*
5685 : * This routine prints the RX chain.
5686 : */
5687 : void
5688 : bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5689 : {
5690 : struct rx_bd *rxbd;
5691 : int i;
5692 :
5693 : /* First some info about the tx_bd chain structure. */
5694 : BNX_PRINTF(sc,
5695 : "----------------------------"
5696 : " rx_bd chain "
5697 : "----------------------------\n");
5698 :
5699 : BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5700 :
5701 : BNX_PRINTF(sc,
5702 : "page size = 0x%08X, rx chain pages = 0x%08X\n",
5703 : (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5704 :
5705 : BNX_PRINTF(sc,
5706 : "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5707 : (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5708 :
5709 : BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5710 :
5711 : BNX_PRINTF(sc,
5712 : "----------------------------"
5713 : " rx_bd data "
5714 : "----------------------------\n");
5715 :
5716 : /* Now print out the rx_bd's themselves. */
5717 : for (i = 0; i < count; i++) {
5718 : rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5719 : bnx_dump_rxbd(sc, rx_prod, rxbd);
5720 : rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5721 : }
5722 :
5723 : BNX_PRINTF(sc,
5724 : "----------------------------"
5725 : "--------------"
5726 : "----------------------------\n");
5727 : }
5728 :
5729 : /*
5730 : * This routine prints the status block.
5731 : */
5732 : void
5733 : bnx_dump_status_block(struct bnx_softc *sc)
5734 : {
5735 : struct status_block *sblk;
5736 :
5737 : sblk = sc->status_block;
5738 :
5739 : BNX_PRINTF(sc, "----------------------------- Status Block "
5740 : "-----------------------------\n");
5741 :
5742 : BNX_PRINTF(sc,
5743 : "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5744 : sblk->status_attn_bits, sblk->status_attn_bits_ack,
5745 : sblk->status_idx);
5746 :
5747 : BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
5748 : sblk->status_rx_quick_consumer_index0,
5749 : sblk->status_tx_quick_consumer_index0);
5750 :
5751 : BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5752 :
5753 : /* Theses indices are not used for normal L2 drivers. */
5754 : if (sblk->status_rx_quick_consumer_index1 ||
5755 : sblk->status_tx_quick_consumer_index1)
5756 : BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
5757 : sblk->status_rx_quick_consumer_index1,
5758 : sblk->status_tx_quick_consumer_index1);
5759 :
5760 : if (sblk->status_rx_quick_consumer_index2 ||
5761 : sblk->status_tx_quick_consumer_index2)
5762 : BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
5763 : sblk->status_rx_quick_consumer_index2,
5764 : sblk->status_tx_quick_consumer_index2);
5765 :
5766 : if (sblk->status_rx_quick_consumer_index3 ||
5767 : sblk->status_tx_quick_consumer_index3)
5768 : BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
5769 : sblk->status_rx_quick_consumer_index3,
5770 : sblk->status_tx_quick_consumer_index3);
5771 :
5772 : if (sblk->status_rx_quick_consumer_index4 ||
5773 : sblk->status_rx_quick_consumer_index5)
5774 : BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
5775 : sblk->status_rx_quick_consumer_index4,
5776 : sblk->status_rx_quick_consumer_index5);
5777 :
5778 : if (sblk->status_rx_quick_consumer_index6 ||
5779 : sblk->status_rx_quick_consumer_index7)
5780 : BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
5781 : sblk->status_rx_quick_consumer_index6,
5782 : sblk->status_rx_quick_consumer_index7);
5783 :
5784 : if (sblk->status_rx_quick_consumer_index8 ||
5785 : sblk->status_rx_quick_consumer_index9)
5786 : BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
5787 : sblk->status_rx_quick_consumer_index8,
5788 : sblk->status_rx_quick_consumer_index9);
5789 :
5790 : if (sblk->status_rx_quick_consumer_index10 ||
5791 : sblk->status_rx_quick_consumer_index11)
5792 : BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
5793 : sblk->status_rx_quick_consumer_index10,
5794 : sblk->status_rx_quick_consumer_index11);
5795 :
5796 : if (sblk->status_rx_quick_consumer_index12 ||
5797 : sblk->status_rx_quick_consumer_index13)
5798 : BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
5799 : sblk->status_rx_quick_consumer_index12,
5800 : sblk->status_rx_quick_consumer_index13);
5801 :
5802 : if (sblk->status_rx_quick_consumer_index14 ||
5803 : sblk->status_rx_quick_consumer_index15)
5804 : BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
5805 : sblk->status_rx_quick_consumer_index14,
5806 : sblk->status_rx_quick_consumer_index15);
5807 :
5808 : if (sblk->status_completion_producer_index ||
5809 : sblk->status_cmd_consumer_index)
5810 : BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
5811 : sblk->status_completion_producer_index,
5812 : sblk->status_cmd_consumer_index);
5813 :
5814 : BNX_PRINTF(sc, "-------------------------------------------"
5815 : "-----------------------------\n");
5816 : }
5817 :
5818 : /*
5819 : * This routine prints the statistics block.
5820 : */
5821 : void
5822 : bnx_dump_stats_block(struct bnx_softc *sc)
5823 : {
5824 : struct statistics_block *sblk;
5825 :
5826 : sblk = sc->stats_block;
5827 :
5828 : BNX_PRINTF(sc, ""
5829 : "-----------------------------"
5830 : " Stats Block "
5831 : "-----------------------------\n");
5832 :
5833 : BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
5834 : "IfHcInBadOctets = 0x%08X:%08X\n",
5835 : sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5836 : sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5837 :
5838 : BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
5839 : "IfHcOutBadOctets = 0x%08X:%08X\n",
5840 : sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5841 : sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5842 :
5843 : BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
5844 : "IfHcInMulticastPkts = 0x%08X:%08X\n",
5845 : sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5846 : sblk->stat_IfHCInMulticastPkts_hi,
5847 : sblk->stat_IfHCInMulticastPkts_lo);
5848 :
5849 : BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
5850 : "IfHcOutUcastPkts = 0x%08X:%08X\n",
5851 : sblk->stat_IfHCInBroadcastPkts_hi,
5852 : sblk->stat_IfHCInBroadcastPkts_lo,
5853 : sblk->stat_IfHCOutUcastPkts_hi,
5854 : sblk->stat_IfHCOutUcastPkts_lo);
5855 :
5856 : BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5857 : "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5858 : sblk->stat_IfHCOutMulticastPkts_hi,
5859 : sblk->stat_IfHCOutMulticastPkts_lo,
5860 : sblk->stat_IfHCOutBroadcastPkts_hi,
5861 : sblk->stat_IfHCOutBroadcastPkts_lo);
5862 :
5863 : if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5864 : BNX_PRINTF(sc, "0x%08X : "
5865 : "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5866 : sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5867 :
5868 : if (sblk->stat_Dot3StatsCarrierSenseErrors)
5869 : BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5870 : sblk->stat_Dot3StatsCarrierSenseErrors);
5871 :
5872 : if (sblk->stat_Dot3StatsFCSErrors)
5873 : BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5874 : sblk->stat_Dot3StatsFCSErrors);
5875 :
5876 : if (sblk->stat_Dot3StatsAlignmentErrors)
5877 : BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5878 : sblk->stat_Dot3StatsAlignmentErrors);
5879 :
5880 : if (sblk->stat_Dot3StatsSingleCollisionFrames)
5881 : BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5882 : sblk->stat_Dot3StatsSingleCollisionFrames);
5883 :
5884 : if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5885 : BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5886 : sblk->stat_Dot3StatsMultipleCollisionFrames);
5887 :
5888 : if (sblk->stat_Dot3StatsDeferredTransmissions)
5889 : BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5890 : sblk->stat_Dot3StatsDeferredTransmissions);
5891 :
5892 : if (sblk->stat_Dot3StatsExcessiveCollisions)
5893 : BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5894 : sblk->stat_Dot3StatsExcessiveCollisions);
5895 :
5896 : if (sblk->stat_Dot3StatsLateCollisions)
5897 : BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5898 : sblk->stat_Dot3StatsLateCollisions);
5899 :
5900 : if (sblk->stat_EtherStatsCollisions)
5901 : BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5902 : sblk->stat_EtherStatsCollisions);
5903 :
5904 : if (sblk->stat_EtherStatsFragments)
5905 : BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5906 : sblk->stat_EtherStatsFragments);
5907 :
5908 : if (sblk->stat_EtherStatsJabbers)
5909 : BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5910 : sblk->stat_EtherStatsJabbers);
5911 :
5912 : if (sblk->stat_EtherStatsUndersizePkts)
5913 : BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5914 : sblk->stat_EtherStatsUndersizePkts);
5915 :
5916 : if (sblk->stat_EtherStatsOverrsizePkts)
5917 : BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5918 : sblk->stat_EtherStatsOverrsizePkts);
5919 :
5920 : if (sblk->stat_EtherStatsPktsRx64Octets)
5921 : BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5922 : sblk->stat_EtherStatsPktsRx64Octets);
5923 :
5924 : if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5925 : BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5926 : sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5927 :
5928 : if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5929 : BNX_PRINTF(sc, "0x%08X : "
5930 : "EtherStatsPktsRx128Octetsto255Octets\n",
5931 : sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5932 :
5933 : if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5934 : BNX_PRINTF(sc, "0x%08X : "
5935 : "EtherStatsPktsRx256Octetsto511Octets\n",
5936 : sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5937 :
5938 : if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5939 : BNX_PRINTF(sc, "0x%08X : "
5940 : "EtherStatsPktsRx512Octetsto1023Octets\n",
5941 : sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5942 :
5943 : if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5944 : BNX_PRINTF(sc, "0x%08X : "
5945 : "EtherStatsPktsRx1024Octetsto1522Octets\n",
5946 : sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5947 :
5948 : if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5949 : BNX_PRINTF(sc, "0x%08X : "
5950 : "EtherStatsPktsRx1523Octetsto9022Octets\n",
5951 : sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5952 :
5953 : if (sblk->stat_EtherStatsPktsTx64Octets)
5954 : BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5955 : sblk->stat_EtherStatsPktsTx64Octets);
5956 :
5957 : if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5958 : BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5959 : sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5960 :
5961 : if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5962 : BNX_PRINTF(sc, "0x%08X : "
5963 : "EtherStatsPktsTx128Octetsto255Octets\n",
5964 : sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5965 :
5966 : if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5967 : BNX_PRINTF(sc, "0x%08X : "
5968 : "EtherStatsPktsTx256Octetsto511Octets\n",
5969 : sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5970 :
5971 : if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5972 : BNX_PRINTF(sc, "0x%08X : "
5973 : "EtherStatsPktsTx512Octetsto1023Octets\n",
5974 : sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5975 :
5976 : if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5977 : BNX_PRINTF(sc, "0x%08X : "
5978 : "EtherStatsPktsTx1024Octetsto1522Octets\n",
5979 : sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5980 :
5981 : if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5982 : BNX_PRINTF(sc, "0x%08X : "
5983 : "EtherStatsPktsTx1523Octetsto9022Octets\n",
5984 : sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5985 :
5986 : if (sblk->stat_XonPauseFramesReceived)
5987 : BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5988 : sblk->stat_XonPauseFramesReceived);
5989 :
5990 : if (sblk->stat_XoffPauseFramesReceived)
5991 : BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5992 : sblk->stat_XoffPauseFramesReceived);
5993 :
5994 : if (sblk->stat_OutXonSent)
5995 : BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5996 : sblk->stat_OutXonSent);
5997 :
5998 : if (sblk->stat_OutXoffSent)
5999 : BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6000 : sblk->stat_OutXoffSent);
6001 :
6002 : if (sblk->stat_FlowControlDone)
6003 : BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6004 : sblk->stat_FlowControlDone);
6005 :
6006 : if (sblk->stat_MacControlFramesReceived)
6007 : BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6008 : sblk->stat_MacControlFramesReceived);
6009 :
6010 : if (sblk->stat_XoffStateEntered)
6011 : BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6012 : sblk->stat_XoffStateEntered);
6013 :
6014 : if (sblk->stat_IfInFramesL2FilterDiscards)
6015 : BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6016 : sblk->stat_IfInFramesL2FilterDiscards);
6017 :
6018 : if (sblk->stat_IfInRuleCheckerDiscards)
6019 : BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6020 : sblk->stat_IfInRuleCheckerDiscards);
6021 :
6022 : if (sblk->stat_IfInFTQDiscards)
6023 : BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6024 : sblk->stat_IfInFTQDiscards);
6025 :
6026 : if (sblk->stat_IfInMBUFDiscards)
6027 : BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6028 : sblk->stat_IfInMBUFDiscards);
6029 :
6030 : if (sblk->stat_IfInRuleCheckerP4Hit)
6031 : BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6032 : sblk->stat_IfInRuleCheckerP4Hit);
6033 :
6034 : if (sblk->stat_CatchupInRuleCheckerDiscards)
6035 : BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6036 : sblk->stat_CatchupInRuleCheckerDiscards);
6037 :
6038 : if (sblk->stat_CatchupInFTQDiscards)
6039 : BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6040 : sblk->stat_CatchupInFTQDiscards);
6041 :
6042 : if (sblk->stat_CatchupInMBUFDiscards)
6043 : BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6044 : sblk->stat_CatchupInMBUFDiscards);
6045 :
6046 : if (sblk->stat_CatchupInRuleCheckerP4Hit)
6047 : BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6048 : sblk->stat_CatchupInRuleCheckerP4Hit);
6049 :
6050 : BNX_PRINTF(sc,
6051 : "-----------------------------"
6052 : "--------------"
6053 : "-----------------------------\n");
6054 : }
6055 :
6056 : void
6057 : bnx_dump_driver_state(struct bnx_softc *sc)
6058 : {
6059 : BNX_PRINTF(sc,
6060 : "-----------------------------"
6061 : " Driver State "
6062 : "-----------------------------\n");
6063 :
6064 : BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6065 : "address\n", sc);
6066 :
6067 : BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6068 : sc->status_block);
6069 :
6070 : BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6071 : "address\n", sc->stats_block);
6072 :
6073 : BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6074 : "adddress\n", sc->tx_bd_chain);
6075 :
6076 : BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6077 : sc->rx_bd_chain);
6078 :
6079 : BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6080 : sc->tx_mbuf_ptr);
6081 :
6082 : BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6083 : sc->rx_mbuf_ptr);
6084 :
6085 : BNX_PRINTF(sc,
6086 : " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
6087 : sc->interrupts_generated);
6088 :
6089 : BNX_PRINTF(sc,
6090 : " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6091 : sc->rx_interrupts);
6092 :
6093 : BNX_PRINTF(sc,
6094 : " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6095 : sc->tx_interrupts);
6096 :
6097 : BNX_PRINTF(sc,
6098 : " 0x%08X - (sc->last_status_idx) status block index\n",
6099 : sc->last_status_idx);
6100 :
6101 : BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
6102 : sc->tx_prod);
6103 :
6104 : BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
6105 : sc->tx_cons);
6106 :
6107 : BNX_PRINTF(sc,
6108 : " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6109 : sc->tx_prod_bseq);
6110 :
6111 : BNX_PRINTF(sc,
6112 : " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6113 : sc->tx_mbuf_alloc);
6114 :
6115 : BNX_PRINTF(sc,
6116 : " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6117 : sc->used_tx_bd);
6118 :
6119 : BNX_PRINTF(sc,
6120 : " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6121 : sc->tx_hi_watermark, sc->max_tx_bd);
6122 :
6123 : BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
6124 : sc->rx_prod);
6125 :
6126 : BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
6127 : sc->rx_cons);
6128 :
6129 : BNX_PRINTF(sc,
6130 : " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6131 : sc->rx_prod_bseq);
6132 :
6133 : BNX_PRINTF(sc,
6134 : " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6135 : sc->rx_mbuf_alloc);
6136 :
6137 : BNX_PRINTF(sc,
6138 : "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6139 : sc->rx_low_watermark, sc->max_rx_bd);
6140 :
6141 : BNX_PRINTF(sc,
6142 : " 0x%08X - (sc->mbuf_alloc_failed) "
6143 : "mbuf alloc failures\n",
6144 : sc->mbuf_alloc_failed);
6145 :
6146 : BNX_PRINTF(sc,
6147 : " 0x%0X - (sc->mbuf_sim_allocated_failed) "
6148 : "simulated mbuf alloc failures\n",
6149 : sc->mbuf_sim_alloc_failed);
6150 :
6151 : BNX_PRINTF(sc, "-------------------------------------------"
6152 : "-----------------------------\n");
6153 : }
6154 :
6155 : void
6156 : bnx_dump_hw_state(struct bnx_softc *sc)
6157 : {
6158 : u_int32_t val1;
6159 : int i;
6160 :
6161 : BNX_PRINTF(sc,
6162 : "----------------------------"
6163 : " Hardware State "
6164 : "----------------------------\n");
6165 :
6166 : BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6167 :
6168 : val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6169 : BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6170 : val1, BNX_MISC_ENABLE_STATUS_BITS);
6171 :
6172 : val1 = REG_RD(sc, BNX_DMA_STATUS);
6173 : BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6174 :
6175 : val1 = REG_RD(sc, BNX_CTX_STATUS);
6176 : BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6177 :
6178 : val1 = REG_RD(sc, BNX_EMAC_STATUS);
6179 : BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6180 : BNX_EMAC_STATUS);
6181 :
6182 : val1 = REG_RD(sc, BNX_RPM_STATUS);
6183 : BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6184 :
6185 : val1 = REG_RD(sc, BNX_TBDR_STATUS);
6186 : BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6187 : BNX_TBDR_STATUS);
6188 :
6189 : val1 = REG_RD(sc, BNX_TDMA_STATUS);
6190 : BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6191 : BNX_TDMA_STATUS);
6192 :
6193 : val1 = REG_RD(sc, BNX_HC_STATUS);
6194 : BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6195 :
6196 : BNX_PRINTF(sc,
6197 : "----------------------------"
6198 : "----------------"
6199 : "----------------------------\n");
6200 :
6201 : BNX_PRINTF(sc,
6202 : "----------------------------"
6203 : " Register Dump "
6204 : "----------------------------\n");
6205 :
6206 : for (i = 0x400; i < 0x8000; i += 0x10)
6207 : BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6208 : i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6209 : REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6210 :
6211 : BNX_PRINTF(sc,
6212 : "----------------------------"
6213 : "----------------"
6214 : "----------------------------\n");
6215 : }
6216 :
6217 : void
6218 : bnx_breakpoint(struct bnx_softc *sc)
6219 : {
6220 : /* Unreachable code to shut the compiler up about unused functions. */
6221 : if (0) {
6222 : bnx_dump_txbd(sc, 0, NULL);
6223 : bnx_dump_rxbd(sc, 0, NULL);
6224 : bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6225 : bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6226 : bnx_dump_l2fhdr(sc, 0, NULL);
6227 : bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6228 : bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6229 : bnx_dump_status_block(sc);
6230 : bnx_dump_stats_block(sc);
6231 : bnx_dump_driver_state(sc);
6232 : bnx_dump_hw_state(sc);
6233 : }
6234 :
6235 : bnx_dump_driver_state(sc);
6236 : /* Print the important status block fields. */
6237 : bnx_dump_status_block(sc);
6238 :
6239 : #if 0
6240 : /* Call the debugger. */
6241 : breakpoint();
6242 : #endif
6243 :
6244 : return;
6245 : }
6246 : #endif
|