Line data Source code
1 : /* $OpenBSD: xl.c,v 1.132 2017/01/22 10:17:38 dlg Exp $ */
2 :
3 : /*
4 : * Copyright (c) 1997, 1998, 1999
5 : * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : * 1. Redistributions of source code must retain the above copyright
11 : * notice, this list of conditions and the following disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : * 3. All advertising materials mentioning features or use of this software
16 : * must display the following acknowledgement:
17 : * This product includes software developed by Bill Paul.
18 : * 4. Neither the name of the author nor the names of any co-contributors
19 : * may be used to endorse or promote products derived from this software
20 : * without specific prior written permission.
21 : *
22 : * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 : * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 : * THE POSSIBILITY OF SUCH DAMAGE.
33 : *
34 : * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35 : */
36 :
37 : /*
38 : * 3Com 3c90x Etherlink XL PCI NIC driver
39 : *
40 : * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41 : * bus-master chips (3c90x cards and embedded controllers) including
42 : * the following:
43 : *
44 : * 3Com 3c900-TPO 10Mbps/RJ-45
45 : * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
46 : * 3Com 3c905-TX 10/100Mbps/RJ-45
47 : * 3Com 3c905-T4 10/100Mbps/RJ-45
48 : * 3Com 3c900B-TPO 10Mbps/RJ-45
49 : * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
50 : * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
51 : * 3Com 3c900B-FL 10Mbps/Fiber-optic
52 : * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
53 : * 3Com 3c905B-TX 10/100Mbps/RJ-45
54 : * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
55 : * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
56 : * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
57 : * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
58 : * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
59 : * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
60 : * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61 : * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 : * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63 : * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 : * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 : * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 : * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 : * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68 : * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69 : * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70 : * Dell on-board 3c920 10/100Mbps/RJ-45
71 : * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72 : * Dell Latitude laptop docking station embedded 3c905-TX
73 : *
74 : * Written by Bill Paul <wpaul@ctr.columbia.edu>
75 : * Electrical Engineering Department
76 : * Columbia University, New York City
77 : */
78 :
79 : /*
80 : * The 3c90x series chips use a bus-master DMA interface for transferring
81 : * packets to and from the controller chip. Some of the "vortex" cards
82 : * (3c59x) also supported a bus master mode, however for those chips
83 : * you could only DMA packets to/from a contiguous memory buffer. For
84 : * transmission this would mean copying the contents of the queued mbuf
85 : * chain into an mbuf cluster and then DMAing the cluster. This extra
86 : * copy would sort of defeat the purpose of the bus master support for
87 : * any packet that doesn't fit into a single mbuf.
88 : *
89 : * By contrast, the 3c90x cards support a fragment-based bus master
90 : * mode where mbuf chains can be encapsulated using TX descriptors.
91 : * This is similar to other PCI chips such as the Texas Instruments
92 : * ThunderLAN and the Intel 82557/82558.
93 : *
94 : * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95 : * bus master chips because they maintain the old PIO interface for
96 : * backwards compatibility, but starting with the 3c905B and the
97 : * "cyclone" chips, the compatibility interface has been dropped.
98 : * Since using bus master DMA is a big win, we use this driver to
99 : * support the PCI "boomerang" chips even though they work with the
100 : * "vortex" driver in order to obtain better performance.
101 : */
102 :
103 : #include "bpfilter.h"
104 :
105 : #include <sys/param.h>
106 : #include <sys/systm.h>
107 : #include <sys/mbuf.h>
108 : #include <sys/protosw.h>
109 : #include <sys/socket.h>
110 : #include <sys/ioctl.h>
111 : #include <sys/errno.h>
112 : #include <sys/malloc.h>
113 : #include <sys/kernel.h>
114 : #include <sys/device.h>
115 :
116 : #include <net/if.h>
117 : #include <net/if_media.h>
118 :
119 : #include <netinet/in.h>
120 : #include <netinet/if_ether.h>
121 :
122 : #include <dev/mii/miivar.h>
123 :
124 : #include <machine/bus.h>
125 :
126 : #if NBPFILTER > 0
127 : #include <net/bpf.h>
128 : #endif
129 :
130 : #include <dev/ic/xlreg.h>
131 :
132 : /*
133 : * TX Checksumming is disabled by default for two reasons:
134 : * - TX Checksumming will occasionally produce corrupt packets
135 : * - TX Checksumming seems to reduce performance
136 : *
137 : * Only 905B/C cards were reported to have this problem, it is possible
138 : * that later chips _may_ be immune.
139 : */
140 : #define XL905B_TXCSUM_BROKEN 1
141 :
142 : int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
143 : void xl_stats_update(void *);
144 : int xl_encap(struct xl_softc *, struct xl_chain *,
145 : struct mbuf * );
146 : void xl_rxeof(struct xl_softc *);
147 : void xl_txeof(struct xl_softc *);
148 : void xl_txeof_90xB(struct xl_softc *);
149 : void xl_txeoc(struct xl_softc *);
150 : int xl_intr(void *);
151 : void xl_start(struct ifnet *);
152 : void xl_start_90xB(struct ifnet *);
153 : int xl_ioctl(struct ifnet *, u_long, caddr_t);
154 : void xl_freetxrx(struct xl_softc *);
155 : void xl_watchdog(struct ifnet *);
156 : int xl_ifmedia_upd(struct ifnet *);
157 : void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
158 :
159 : int xl_eeprom_wait(struct xl_softc *);
160 : int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
161 : void xl_mii_sync(struct xl_softc *);
162 : void xl_mii_send(struct xl_softc *, u_int32_t, int);
163 : int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
164 : int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
165 :
166 : void xl_setcfg(struct xl_softc *);
167 : void xl_setmode(struct xl_softc *, uint64_t);
168 : void xl_iff(struct xl_softc *);
169 : void xl_iff_90x(struct xl_softc *);
170 : void xl_iff_905b(struct xl_softc *);
171 : int xl_list_rx_init(struct xl_softc *);
172 : void xl_fill_rx_ring(struct xl_softc *);
173 : int xl_list_tx_init(struct xl_softc *);
174 : int xl_list_tx_init_90xB(struct xl_softc *);
175 : void xl_wait(struct xl_softc *);
176 : void xl_mediacheck(struct xl_softc *);
177 : void xl_choose_xcvr(struct xl_softc *, int);
178 :
179 : int xl_miibus_readreg(struct device *, int, int);
180 : void xl_miibus_writereg(struct device *, int, int, int);
181 : void xl_miibus_statchg(struct device *);
182 : #ifndef SMALL_KERNEL
183 : int xl_wol(struct ifnet *, int);
184 : void xl_wol_power(struct xl_softc *);
185 : #endif
186 :
187 : int
188 0 : xl_activate(struct device *self, int act)
189 : {
190 0 : struct xl_softc *sc = (struct xl_softc *)self;
191 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
192 : int rv = 0;
193 :
194 0 : switch (act) {
195 : case DVACT_SUSPEND:
196 0 : if (ifp->if_flags & IFF_RUNNING)
197 0 : xl_stop(sc);
198 0 : rv = config_activate_children(self, act);
199 0 : break;
200 : case DVACT_RESUME:
201 0 : if (ifp->if_flags & IFF_UP)
202 0 : xl_init(sc);
203 : break;
204 : case DVACT_POWERDOWN:
205 0 : rv = config_activate_children(self, act);
206 : #ifndef SMALL_KERNEL
207 0 : xl_wol_power(sc);
208 : #endif
209 0 : break;
210 : default:
211 0 : rv = config_activate_children(self, act);
212 0 : break;
213 : }
214 0 : return (rv);
215 : }
216 :
217 : /*
218 : * Murphy's law says that it's possible the chip can wedge and
219 : * the 'command in progress' bit may never clear. Hence, we wait
220 : * only a finite amount of time to avoid getting caught in an
221 : * infinite loop. Normally this delay routine would be a macro,
222 : * but it isn't called during normal operation so we can afford
223 : * to make it a function.
224 : */
225 : void
226 0 : xl_wait(struct xl_softc *sc)
227 : {
228 : int i;
229 :
230 0 : for (i = 0; i < XL_TIMEOUT; i++) {
231 0 : if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
232 : break;
233 : }
234 :
235 0 : if (i == XL_TIMEOUT)
236 0 : printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
237 0 : }
238 :
239 : /*
240 : * MII access routines are provided for adapters with external
241 : * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
242 : * autoneg logic that's faked up to look like a PHY (3c905B-TX).
243 : * Note: if you don't perform the MDIO operations just right,
244 : * it's possible to end up with code that works correctly with
245 : * some chips/CPUs/processor speeds/bus speeds/etc but not
246 : * with others.
247 : */
248 : #define MII_SET(x) \
249 : CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
250 : CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
251 :
252 : #define MII_CLR(x) \
253 : CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
254 : CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
255 :
256 : /*
257 : * Sync the PHYs by setting data bit and strobing the clock 32 times.
258 : */
259 : void
260 0 : xl_mii_sync(struct xl_softc *sc)
261 : {
262 : int i;
263 :
264 0 : XL_SEL_WIN(4);
265 0 : MII_SET(XL_MII_DIR|XL_MII_DATA);
266 :
267 0 : for (i = 0; i < 32; i++) {
268 0 : MII_SET(XL_MII_CLK);
269 0 : MII_SET(XL_MII_DATA);
270 0 : MII_SET(XL_MII_DATA);
271 0 : MII_CLR(XL_MII_CLK);
272 0 : MII_SET(XL_MII_DATA);
273 0 : MII_SET(XL_MII_DATA);
274 : }
275 0 : }
276 :
277 : /*
278 : * Clock a series of bits through the MII.
279 : */
280 : void
281 0 : xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
282 : {
283 : int i;
284 :
285 0 : XL_SEL_WIN(4);
286 0 : MII_CLR(XL_MII_CLK);
287 :
288 0 : for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
289 0 : if (bits & i) {
290 0 : MII_SET(XL_MII_DATA);
291 0 : } else {
292 0 : MII_CLR(XL_MII_DATA);
293 : }
294 0 : MII_CLR(XL_MII_CLK);
295 0 : MII_SET(XL_MII_CLK);
296 : }
297 0 : }
298 :
299 : /*
300 : * Read an PHY register through the MII.
301 : */
302 : int
303 0 : xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
304 : {
305 : int i, ack, s;
306 :
307 0 : s = splnet();
308 :
309 : /*
310 : * Set up frame for RX.
311 : */
312 0 : frame->mii_stdelim = XL_MII_STARTDELIM;
313 0 : frame->mii_opcode = XL_MII_READOP;
314 0 : frame->mii_turnaround = 0;
315 0 : frame->mii_data = 0;
316 :
317 : /*
318 : * Select register window 4.
319 : */
320 :
321 0 : XL_SEL_WIN(4);
322 :
323 0 : CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
324 : /*
325 : * Turn on data xmit.
326 : */
327 0 : MII_SET(XL_MII_DIR);
328 :
329 0 : xl_mii_sync(sc);
330 :
331 : /*
332 : * Send command/address info.
333 : */
334 0 : xl_mii_send(sc, frame->mii_stdelim, 2);
335 0 : xl_mii_send(sc, frame->mii_opcode, 2);
336 0 : xl_mii_send(sc, frame->mii_phyaddr, 5);
337 0 : xl_mii_send(sc, frame->mii_regaddr, 5);
338 :
339 : /* Idle bit */
340 0 : MII_CLR((XL_MII_CLK|XL_MII_DATA));
341 0 : MII_SET(XL_MII_CLK);
342 :
343 : /* Turn off xmit. */
344 0 : MII_CLR(XL_MII_DIR);
345 :
346 : /* Check for ack */
347 0 : MII_CLR(XL_MII_CLK);
348 0 : ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
349 0 : MII_SET(XL_MII_CLK);
350 :
351 : /*
352 : * Now try reading data bits. If the ack failed, we still
353 : * need to clock through 16 cycles to keep the PHY(s) in sync.
354 : */
355 0 : if (ack) {
356 0 : for(i = 0; i < 16; i++) {
357 0 : MII_CLR(XL_MII_CLK);
358 0 : MII_SET(XL_MII_CLK);
359 : }
360 : goto fail;
361 : }
362 :
363 0 : for (i = 0x8000; i; i >>= 1) {
364 0 : MII_CLR(XL_MII_CLK);
365 0 : if (!ack) {
366 0 : if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
367 0 : frame->mii_data |= i;
368 : }
369 0 : MII_SET(XL_MII_CLK);
370 : }
371 :
372 : fail:
373 :
374 0 : MII_CLR(XL_MII_CLK);
375 0 : MII_SET(XL_MII_CLK);
376 :
377 0 : splx(s);
378 :
379 0 : if (ack)
380 0 : return (1);
381 0 : return (0);
382 0 : }
383 :
384 : /*
385 : * Write to a PHY register through the MII.
386 : */
387 : int
388 0 : xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
389 : {
390 : int s;
391 :
392 0 : s = splnet();
393 :
394 : /*
395 : * Set up frame for TX.
396 : */
397 :
398 0 : frame->mii_stdelim = XL_MII_STARTDELIM;
399 0 : frame->mii_opcode = XL_MII_WRITEOP;
400 0 : frame->mii_turnaround = XL_MII_TURNAROUND;
401 :
402 : /*
403 : * Select the window 4.
404 : */
405 0 : XL_SEL_WIN(4);
406 :
407 : /*
408 : * Turn on data output.
409 : */
410 0 : MII_SET(XL_MII_DIR);
411 :
412 0 : xl_mii_sync(sc);
413 :
414 0 : xl_mii_send(sc, frame->mii_stdelim, 2);
415 0 : xl_mii_send(sc, frame->mii_opcode, 2);
416 0 : xl_mii_send(sc, frame->mii_phyaddr, 5);
417 0 : xl_mii_send(sc, frame->mii_regaddr, 5);
418 0 : xl_mii_send(sc, frame->mii_turnaround, 2);
419 0 : xl_mii_send(sc, frame->mii_data, 16);
420 :
421 : /* Idle bit. */
422 0 : MII_SET(XL_MII_CLK);
423 0 : MII_CLR(XL_MII_CLK);
424 :
425 : /*
426 : * Turn off xmit.
427 : */
428 0 : MII_CLR(XL_MII_DIR);
429 :
430 0 : splx(s);
431 :
432 0 : return (0);
433 : }
434 :
435 : int
436 0 : xl_miibus_readreg(struct device *self, int phy, int reg)
437 : {
438 0 : struct xl_softc *sc = (struct xl_softc *)self;
439 0 : struct xl_mii_frame frame;
440 :
441 0 : if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
442 0 : return (0);
443 :
444 0 : bzero(&frame, sizeof(frame));
445 :
446 0 : frame.mii_phyaddr = phy;
447 0 : frame.mii_regaddr = reg;
448 0 : xl_mii_readreg(sc, &frame);
449 :
450 0 : return (frame.mii_data);
451 0 : }
452 :
453 : void
454 0 : xl_miibus_writereg(struct device *self, int phy, int reg, int data)
455 : {
456 0 : struct xl_softc *sc = (struct xl_softc *)self;
457 0 : struct xl_mii_frame frame;
458 :
459 0 : if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
460 0 : return;
461 :
462 0 : bzero(&frame, sizeof(frame));
463 :
464 0 : frame.mii_phyaddr = phy;
465 0 : frame.mii_regaddr = reg;
466 0 : frame.mii_data = data;
467 :
468 0 : xl_mii_writereg(sc, &frame);
469 0 : }
470 :
471 : void
472 0 : xl_miibus_statchg(struct device *self)
473 : {
474 0 : struct xl_softc *sc = (struct xl_softc *)self;
475 :
476 0 : xl_setcfg(sc);
477 :
478 : /* Set ASIC's duplex mode to match the PHY. */
479 0 : XL_SEL_WIN(3);
480 0 : if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
481 0 : CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
482 : else
483 0 : CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
484 : (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
485 0 : }
486 :
487 : /*
488 : * The EEPROM is slow: give it time to come ready after issuing
489 : * it a command.
490 : */
491 : int
492 0 : xl_eeprom_wait(struct xl_softc *sc)
493 : {
494 : int i;
495 :
496 0 : for (i = 0; i < 100; i++) {
497 0 : if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
498 0 : DELAY(162);
499 : else
500 : break;
501 : }
502 :
503 0 : if (i == 100) {
504 0 : printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
505 0 : return (1);
506 : }
507 :
508 0 : return (0);
509 0 : }
510 :
511 : /*
512 : * Read a sequence of words from the EEPROM. Note that ethernet address
513 : * data is stored in the EEPROM in network byte order.
514 : */
515 : int
516 0 : xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
517 : {
518 : int err = 0, i;
519 : u_int16_t word = 0, *ptr;
520 : #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
521 : #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
522 : /* WARNING! DANGER!
523 : * It's easy to accidentally overwrite the rom content!
524 : * Note: the 3c575 uses 8bit EEPROM offsets.
525 : */
526 0 : XL_SEL_WIN(0);
527 :
528 0 : if (xl_eeprom_wait(sc))
529 0 : return (1);
530 :
531 0 : if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
532 0 : off += 0x30;
533 :
534 0 : for (i = 0; i < cnt; i++) {
535 0 : if (sc->xl_flags & XL_FLAG_8BITROM)
536 0 : CSR_WRITE_2(sc, XL_W0_EE_CMD,
537 : XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
538 : else
539 0 : CSR_WRITE_2(sc, XL_W0_EE_CMD,
540 : XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
541 0 : err = xl_eeprom_wait(sc);
542 0 : if (err)
543 : break;
544 0 : word = CSR_READ_2(sc, XL_W0_EE_DATA);
545 0 : ptr = (u_int16_t *)(dest + (i * 2));
546 0 : if (swap)
547 0 : *ptr = ntohs(word);
548 : else
549 0 : *ptr = word;
550 : }
551 :
552 0 : return (err ? 1 : 0);
553 0 : }
554 :
555 : void
556 0 : xl_iff(struct xl_softc *sc)
557 : {
558 0 : if (sc->xl_type == XL_TYPE_905B)
559 0 : xl_iff_905b(sc);
560 : else
561 0 : xl_iff_90x(sc);
562 0 : }
563 :
564 : /*
565 : * NICs older than the 3c905B have only one multicast option, which
566 : * is to enable reception of all multicast frames.
567 : */
568 : void
569 0 : xl_iff_90x(struct xl_softc *sc)
570 : {
571 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
572 : struct arpcom *ac = &sc->sc_arpcom;
573 : u_int8_t rxfilt;
574 :
575 0 : XL_SEL_WIN(5);
576 :
577 0 : rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
578 0 : rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
579 : XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
580 0 : ifp->if_flags &= ~IFF_ALLMULTI;
581 :
582 : /*
583 : * Always accept broadcast frames.
584 : * Always accept frames destined to our station address.
585 : */
586 0 : rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
587 :
588 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
589 0 : ifp->if_flags |= IFF_ALLMULTI;
590 0 : if (ifp->if_flags & IFF_PROMISC)
591 0 : rxfilt |= XL_RXFILTER_ALLFRAMES;
592 : else
593 0 : rxfilt |= XL_RXFILTER_ALLMULTI;
594 : }
595 :
596 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
597 :
598 0 : XL_SEL_WIN(7);
599 0 : }
600 :
601 : /*
602 : * 3c905B adapters have a hash filter that we can program.
603 : */
604 : void
605 0 : xl_iff_905b(struct xl_softc *sc)
606 : {
607 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
608 : struct arpcom *ac = &sc->sc_arpcom;
609 : int h = 0, i;
610 : struct ether_multi *enm;
611 : struct ether_multistep step;
612 : u_int8_t rxfilt;
613 :
614 0 : XL_SEL_WIN(5);
615 :
616 0 : rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
617 0 : rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
618 : XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
619 : XL_RXFILTER_MULTIHASH);
620 0 : ifp->if_flags &= ~IFF_ALLMULTI;
621 :
622 : /*
623 : * Always accept broadcast frames.
624 : * Always accept frames destined to our station address.
625 : */
626 0 : rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
627 :
628 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
629 0 : ifp->if_flags |= IFF_ALLMULTI;
630 0 : if (ifp->if_flags & IFF_PROMISC)
631 0 : rxfilt |= XL_RXFILTER_ALLFRAMES;
632 : else
633 0 : rxfilt |= XL_RXFILTER_ALLMULTI;
634 : } else {
635 0 : rxfilt |= XL_RXFILTER_MULTIHASH;
636 :
637 : /* first, zot all the existing hash bits */
638 0 : for (i = 0; i < XL_HASHFILT_SIZE; i++)
639 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
640 :
641 : /* now program new ones */
642 0 : ETHER_FIRST_MULTI(step, ac, enm);
643 0 : while (enm != NULL) {
644 0 : h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
645 : 0x000000FF;
646 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH |
647 : XL_HASH_SET | h);
648 :
649 0 : ETHER_NEXT_MULTI(step, enm);
650 : }
651 : }
652 :
653 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
654 :
655 0 : XL_SEL_WIN(7);
656 0 : }
657 :
658 : void
659 0 : xl_setcfg(struct xl_softc *sc)
660 : {
661 : u_int32_t icfg;
662 :
663 0 : XL_SEL_WIN(3);
664 0 : icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
665 0 : icfg &= ~XL_ICFG_CONNECTOR_MASK;
666 0 : if (sc->xl_media & XL_MEDIAOPT_MII ||
667 0 : sc->xl_media & XL_MEDIAOPT_BT4)
668 0 : icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
669 0 : if (sc->xl_media & XL_MEDIAOPT_BTX)
670 0 : icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
671 :
672 0 : CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
673 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
674 0 : }
675 :
676 : void
677 0 : xl_setmode(struct xl_softc *sc, uint64_t media)
678 : {
679 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
680 : u_int32_t icfg;
681 : u_int16_t mediastat;
682 :
683 0 : XL_SEL_WIN(4);
684 0 : mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
685 0 : XL_SEL_WIN(3);
686 0 : icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
687 :
688 0 : if (sc->xl_media & XL_MEDIAOPT_BT) {
689 0 : if (IFM_SUBTYPE(media) == IFM_10_T) {
690 0 : ifp->if_baudrate = IF_Mbps(10);
691 0 : sc->xl_xcvr = XL_XCVR_10BT;
692 0 : icfg &= ~XL_ICFG_CONNECTOR_MASK;
693 : icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
694 0 : mediastat |= XL_MEDIASTAT_LINKBEAT|
695 : XL_MEDIASTAT_JABGUARD;
696 0 : mediastat &= ~XL_MEDIASTAT_SQEENB;
697 0 : }
698 : }
699 :
700 0 : if (sc->xl_media & XL_MEDIAOPT_BFX) {
701 0 : if (IFM_SUBTYPE(media) == IFM_100_FX) {
702 0 : ifp->if_baudrate = IF_Mbps(100);
703 0 : sc->xl_xcvr = XL_XCVR_100BFX;
704 0 : icfg &= ~XL_ICFG_CONNECTOR_MASK;
705 0 : icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
706 0 : mediastat |= XL_MEDIASTAT_LINKBEAT;
707 0 : mediastat &= ~XL_MEDIASTAT_SQEENB;
708 0 : }
709 : }
710 :
711 0 : if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
712 0 : if (IFM_SUBTYPE(media) == IFM_10_5) {
713 0 : ifp->if_baudrate = IF_Mbps(10);
714 0 : sc->xl_xcvr = XL_XCVR_AUI;
715 0 : icfg &= ~XL_ICFG_CONNECTOR_MASK;
716 0 : icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
717 0 : mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
718 : XL_MEDIASTAT_JABGUARD);
719 0 : mediastat |= ~XL_MEDIASTAT_SQEENB;
720 0 : }
721 0 : if (IFM_SUBTYPE(media) == IFM_10_FL) {
722 0 : ifp->if_baudrate = IF_Mbps(10);
723 0 : sc->xl_xcvr = XL_XCVR_AUI;
724 0 : icfg &= ~XL_ICFG_CONNECTOR_MASK;
725 0 : icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
726 0 : mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
727 : XL_MEDIASTAT_JABGUARD);
728 0 : mediastat |= ~XL_MEDIASTAT_SQEENB;
729 0 : }
730 : }
731 :
732 0 : if (sc->xl_media & XL_MEDIAOPT_BNC) {
733 0 : if (IFM_SUBTYPE(media) == IFM_10_2) {
734 0 : ifp->if_baudrate = IF_Mbps(10);
735 0 : sc->xl_xcvr = XL_XCVR_COAX;
736 0 : icfg &= ~XL_ICFG_CONNECTOR_MASK;
737 0 : icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
738 0 : mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
739 : XL_MEDIASTAT_JABGUARD|
740 : XL_MEDIASTAT_SQEENB);
741 0 : }
742 : }
743 :
744 0 : if ((media & IFM_GMASK) == IFM_FDX ||
745 0 : IFM_SUBTYPE(media) == IFM_100_FX) {
746 0 : XL_SEL_WIN(3);
747 0 : CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
748 0 : } else {
749 0 : XL_SEL_WIN(3);
750 0 : CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
751 : (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
752 : }
753 :
754 0 : if (IFM_SUBTYPE(media) == IFM_10_2)
755 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
756 : else
757 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
758 0 : CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
759 0 : XL_SEL_WIN(4);
760 0 : CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
761 0 : DELAY(800);
762 0 : XL_SEL_WIN(7);
763 0 : }
764 :
765 : void
766 0 : xl_reset(struct xl_softc *sc)
767 : {
768 : int i;
769 :
770 0 : XL_SEL_WIN(0);
771 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
772 : ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
773 : XL_RESETOPT_DISADVFD:0));
774 :
775 : /*
776 : * Pause briefly after issuing the reset command before trying
777 : * to access any other registers. With my 3c575C cardbus card,
778 : * failing to do this results in the system locking up while
779 : * trying to poll the command busy bit in the status register.
780 : */
781 0 : DELAY(100000);
782 :
783 0 : for (i = 0; i < XL_TIMEOUT; i++) {
784 0 : DELAY(10);
785 0 : if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
786 : break;
787 : }
788 :
789 0 : if (i == XL_TIMEOUT)
790 0 : printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
791 :
792 : /* Note: the RX reset takes an absurd amount of time
793 : * on newer versions of the Tornado chips such as those
794 : * on the 3c905CX and newer 3c908C cards. We wait an
795 : * extra amount of time so that xl_wait() doesn't complain
796 : * and annoy the users.
797 : */
798 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
799 0 : DELAY(100000);
800 0 : xl_wait(sc);
801 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
802 0 : xl_wait(sc);
803 :
804 0 : if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
805 0 : sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
806 0 : XL_SEL_WIN(2);
807 0 : CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
808 : XL_W2_RESET_OPTIONS)
809 : | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
810 : | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
811 : );
812 0 : }
813 :
814 : /* Wait a little while for the chip to get its brains in order. */
815 0 : DELAY(100000);
816 0 : }
817 :
818 : /*
819 : * This routine is a kludge to work around possible hardware faults
820 : * or manufacturing defects that can cause the media options register
821 : * (or reset options register, as it's called for the first generation
822 : * 3c90x adapters) to return an incorrect result. I have encountered
823 : * one Dell Latitude laptop docking station with an integrated 3c905-TX
824 : * which doesn't have any of the 'mediaopt' bits set. This screws up
825 : * the attach routine pretty badly because it doesn't know what media
826 : * to look for. If we find ourselves in this predicament, this routine
827 : * will try to guess the media options values and warn the user of a
828 : * possible manufacturing defect with his adapter/system/whatever.
829 : */
830 : void
831 0 : xl_mediacheck(struct xl_softc *sc)
832 : {
833 : /*
834 : * If some of the media options bits are set, assume they are
835 : * correct. If not, try to figure it out down below.
836 : * XXX I should check for 10baseFL, but I don't have an adapter
837 : * to test with.
838 : */
839 0 : if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
840 : /*
841 : * Check the XCVR value. If it's not in the normal range
842 : * of values, we need to fake it up here.
843 : */
844 0 : if (sc->xl_xcvr <= XL_XCVR_AUTO)
845 : return;
846 : else {
847 0 : printf("%s: bogus xcvr value "
848 0 : "in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
849 0 : printf("%s: choosing new default based "
850 : "on card type\n", sc->sc_dev.dv_xname);
851 : }
852 0 : } else {
853 0 : if (sc->xl_type == XL_TYPE_905B &&
854 0 : sc->xl_media & XL_MEDIAOPT_10FL)
855 : return;
856 0 : printf("%s: WARNING: no media options bits set in "
857 0 : "the media options register!!\n", sc->sc_dev.dv_xname);
858 0 : printf("%s: this could be a manufacturing defect in "
859 : "your adapter or system\n", sc->sc_dev.dv_xname);
860 0 : printf("%s: attempting to guess media type; you "
861 : "should probably consult your vendor\n", sc->sc_dev.dv_xname);
862 : }
863 :
864 0 : xl_choose_xcvr(sc, 1);
865 0 : }
866 :
867 : void
868 0 : xl_choose_xcvr(struct xl_softc *sc, int verbose)
869 : {
870 0 : u_int16_t devid;
871 :
872 : /*
873 : * Read the device ID from the EEPROM.
874 : * This is what's loaded into the PCI device ID register, so it has
875 : * to be correct otherwise we wouldn't have gotten this far.
876 : */
877 0 : xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
878 :
879 0 : switch(devid) {
880 : case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
881 : case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
882 0 : sc->xl_media = XL_MEDIAOPT_BT;
883 0 : sc->xl_xcvr = XL_XCVR_10BT;
884 0 : if (verbose)
885 0 : printf("%s: guessing 10BaseT transceiver\n",
886 0 : sc->sc_dev.dv_xname);
887 : break;
888 : case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
889 : case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
890 0 : sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
891 0 : sc->xl_xcvr = XL_XCVR_10BT;
892 0 : if (verbose)
893 0 : printf("%s: guessing COMBO (AUI/BNC/TP)\n",
894 0 : sc->sc_dev.dv_xname);
895 : break;
896 : case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
897 0 : sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
898 0 : sc->xl_xcvr = XL_XCVR_10BT;
899 0 : if (verbose)
900 0 : printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
901 : break;
902 : case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
903 0 : sc->xl_media = XL_MEDIAOPT_10FL;
904 0 : sc->xl_xcvr = XL_XCVR_AUI;
905 0 : if (verbose)
906 0 : printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
907 : break;
908 : case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
909 : case TC_DEVICEID_HURRICANE_555: /* 3c555 */
910 : case TC_DEVICEID_HURRICANE_556: /* 3c556 */
911 : case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
912 : case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
913 : case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
914 : case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
915 : case TC_DEVICEID_HURRICANE_656: /* 3c656 */
916 : case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
917 : case TC_DEVICEID_TORNADO_656C: /* 3c656C */
918 : case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
919 0 : sc->xl_media = XL_MEDIAOPT_MII;
920 0 : sc->xl_xcvr = XL_XCVR_MII;
921 0 : if (verbose)
922 0 : printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
923 : break;
924 : case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
925 : case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
926 0 : sc->xl_media = XL_MEDIAOPT_BT4;
927 0 : sc->xl_xcvr = XL_XCVR_MII;
928 0 : if (verbose)
929 0 : printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
930 : break;
931 : case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
932 : case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
933 : case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
934 : case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
935 : case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
936 : case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
937 0 : sc->xl_media = XL_MEDIAOPT_BTX;
938 0 : sc->xl_xcvr = XL_XCVR_AUTO;
939 0 : if (verbose)
940 0 : printf("%s: guessing 10/100 internal\n",
941 0 : sc->sc_dev.dv_xname);
942 : break;
943 : case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
944 0 : sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
945 0 : sc->xl_xcvr = XL_XCVR_AUTO;
946 0 : if (verbose)
947 0 : printf("%s: guessing 10/100 plus BNC/AUI\n",
948 0 : sc->sc_dev.dv_xname);
949 : break;
950 : default:
951 0 : printf("%s: unknown device ID: %x -- "
952 0 : "defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
953 0 : sc->xl_media = XL_MEDIAOPT_BT;
954 0 : break;
955 : }
956 0 : }
957 :
958 : /*
959 : * Initialize the transmit descriptors.
960 : */
961 : int
962 0 : xl_list_tx_init(struct xl_softc *sc)
963 : {
964 : struct xl_chain_data *cd;
965 : struct xl_list_data *ld;
966 : int i;
967 :
968 0 : cd = &sc->xl_cdata;
969 0 : ld = sc->xl_ldata;
970 0 : for (i = 0; i < XL_TX_LIST_CNT; i++) {
971 0 : cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
972 0 : if (i == (XL_TX_LIST_CNT - 1))
973 0 : cd->xl_tx_chain[i].xl_next = NULL;
974 : else
975 0 : cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
976 : }
977 :
978 0 : cd->xl_tx_free = &cd->xl_tx_chain[0];
979 0 : cd->xl_tx_tail = cd->xl_tx_head = NULL;
980 :
981 0 : return (0);
982 : }
983 :
984 : /*
985 : * Initialize the transmit descriptors.
986 : */
987 : int
988 0 : xl_list_tx_init_90xB(struct xl_softc *sc)
989 : {
990 : struct xl_chain_data *cd;
991 : struct xl_list_data *ld;
992 : int i, next, prev;
993 :
994 0 : cd = &sc->xl_cdata;
995 0 : ld = sc->xl_ldata;
996 0 : for (i = 0; i < XL_TX_LIST_CNT; i++) {
997 0 : if (i == (XL_TX_LIST_CNT - 1))
998 0 : next = 0;
999 : else
1000 0 : next = i + 1;
1001 0 : if (i == 0)
1002 0 : prev = XL_TX_LIST_CNT - 1;
1003 : else
1004 0 : prev = i - 1;
1005 0 : cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1006 0 : cd->xl_tx_chain[i].xl_phys =
1007 0 : sc->sc_listmap->dm_segs[0].ds_addr +
1008 0 : offsetof(struct xl_list_data, xl_tx_list[i]);
1009 0 : cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1010 0 : cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1011 : }
1012 :
1013 0 : bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1014 0 : ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1015 :
1016 0 : cd->xl_tx_prod = 1;
1017 0 : cd->xl_tx_cons = 1;
1018 0 : cd->xl_tx_cnt = 0;
1019 :
1020 0 : return (0);
1021 : }
1022 :
1023 : /*
1024 : * Initialize the RX descriptors and allocate mbufs for them. Note that
1025 : * we arrange the descriptors in a closed ring, so that the last descriptor
1026 : * points back to the first.
1027 : */
1028 : int
1029 0 : xl_list_rx_init(struct xl_softc *sc)
1030 : {
1031 : struct xl_chain_data *cd;
1032 : struct xl_list_data *ld;
1033 : int i, n;
1034 : bus_addr_t next;
1035 :
1036 0 : cd = &sc->xl_cdata;
1037 0 : ld = sc->xl_ldata;
1038 :
1039 0 : for (i = 0; i < XL_RX_LIST_CNT; i++) {
1040 0 : cd->xl_rx_chain[i].xl_ptr =
1041 0 : (struct xl_list_onefrag *)&ld->xl_rx_list[i];
1042 0 : if (i == (XL_RX_LIST_CNT - 1))
1043 0 : n = 0;
1044 : else
1045 0 : n = i + 1;
1046 0 : cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1047 0 : next = sc->sc_listmap->dm_segs[0].ds_addr +
1048 0 : offsetof(struct xl_list_data, xl_rx_list[n]);
1049 0 : ld->xl_rx_list[i].xl_next = htole32(next);
1050 : }
1051 :
1052 0 : cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
1053 0 : if_rxr_init(&cd->xl_rx_ring, 2, XL_RX_LIST_CNT - 1);
1054 0 : xl_fill_rx_ring(sc);
1055 0 : return (0);
1056 : }
1057 :
1058 : void
1059 0 : xl_fill_rx_ring(struct xl_softc *sc)
1060 : {
1061 : struct xl_chain_data *cd;
1062 : u_int slots;
1063 :
1064 0 : cd = &sc->xl_cdata;
1065 :
1066 0 : for (slots = if_rxr_get(&cd->xl_rx_ring, XL_RX_LIST_CNT);
1067 0 : slots > 0; slots--) {
1068 0 : if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS)
1069 : break;
1070 0 : cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
1071 : }
1072 0 : if_rxr_put(&cd->xl_rx_ring, slots);
1073 0 : }
1074 :
1075 : /*
1076 : * Initialize an RX descriptor and attach an MBUF cluster.
1077 : */
1078 : int
1079 0 : xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1080 : {
1081 : struct mbuf *m_new = NULL;
1082 : bus_dmamap_t map;
1083 :
1084 0 : m_new = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1085 0 : if (!m_new)
1086 0 : return (ENOBUFS);
1087 :
1088 0 : m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1089 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1090 0 : mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1091 0 : m_freem(m_new);
1092 0 : return (ENOBUFS);
1093 : }
1094 :
1095 : /* sync the old map, and unload it (if necessary) */
1096 0 : if (c->map->dm_nsegs != 0) {
1097 0 : bus_dmamap_sync(sc->sc_dmat, c->map,
1098 : 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1099 0 : bus_dmamap_unload(sc->sc_dmat, c->map);
1100 0 : }
1101 :
1102 0 : map = c->map;
1103 0 : c->map = sc->sc_rx_sparemap;
1104 0 : sc->sc_rx_sparemap = map;
1105 :
1106 : /* Force longword alignment for packet payload. */
1107 0 : m_adj(m_new, ETHER_ALIGN);
1108 :
1109 0 : bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1110 : BUS_DMASYNC_PREREAD);
1111 :
1112 0 : c->xl_mbuf = m_new;
1113 0 : c->xl_ptr->xl_frag.xl_addr =
1114 0 : htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1115 0 : c->xl_ptr->xl_frag.xl_len =
1116 0 : htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1117 0 : c->xl_ptr->xl_status = htole32(0);
1118 :
1119 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1120 : ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1121 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1122 :
1123 0 : return (0);
1124 0 : }
1125 :
1126 : /*
1127 : * A frame has been uploaded: pass the resulting mbuf chain up to
1128 : * the higher level protocols.
1129 : */
1130 : void
1131 0 : xl_rxeof(struct xl_softc *sc)
1132 : {
1133 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1134 : struct mbuf *m;
1135 : struct ifnet *ifp;
1136 : struct xl_chain_onefrag *cur_rx;
1137 : int total_len = 0;
1138 : u_int32_t rxstat;
1139 : u_int16_t sumflags = 0;
1140 :
1141 0 : ifp = &sc->sc_arpcom.ac_if;
1142 :
1143 : again:
1144 :
1145 0 : while (if_rxr_inuse(&sc->xl_cdata.xl_rx_ring) > 0) {
1146 0 : cur_rx = sc->xl_cdata.xl_rx_cons;
1147 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1148 : ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1149 : sizeof(struct xl_list),
1150 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1151 0 : if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0)
1152 : break;
1153 0 : m = cur_rx->xl_mbuf;
1154 0 : cur_rx->xl_mbuf = NULL;
1155 0 : sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
1156 0 : if_rxr_put(&sc->xl_cdata.xl_rx_ring, 1);
1157 0 : total_len = rxstat & XL_RXSTAT_LENMASK;
1158 :
1159 : /*
1160 : * Since we have told the chip to allow large frames,
1161 : * we need to trap giant frame errors in software. We allow
1162 : * a little more than the normal frame size to account for
1163 : * frames with VLAN tags.
1164 : */
1165 0 : if (total_len > XL_MAX_FRAMELEN)
1166 0 : rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1167 :
1168 : /*
1169 : * If an error occurs, update stats, clear the
1170 : * status word and leave the mbuf cluster in place:
1171 : * it should simply get re-used next time this descriptor
1172 : * comes up in the ring.
1173 : */
1174 0 : if (rxstat & XL_RXSTAT_UP_ERROR) {
1175 0 : ifp->if_ierrors++;
1176 0 : cur_rx->xl_ptr->xl_status = htole32(0);
1177 0 : m_freem(m);
1178 0 : continue;
1179 : }
1180 :
1181 : /*
1182 : * If the error bit was not set, the upload complete
1183 : * bit should be set which means we have a valid packet.
1184 : * If not, something truly strange has happened.
1185 : */
1186 0 : if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1187 0 : printf("%s: bad receive status -- "
1188 0 : "packet dropped\n", sc->sc_dev.dv_xname);
1189 0 : ifp->if_ierrors++;
1190 0 : cur_rx->xl_ptr->xl_status = htole32(0);
1191 0 : m_freem(m);
1192 0 : continue;
1193 : }
1194 :
1195 0 : m->m_pkthdr.len = m->m_len = total_len;
1196 :
1197 0 : if (sc->xl_type == XL_TYPE_905B) {
1198 0 : if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1199 0 : (rxstat & XL_RXSTAT_IPCKOK))
1200 0 : sumflags |= M_IPV4_CSUM_IN_OK;
1201 :
1202 0 : if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1203 0 : (rxstat & XL_RXSTAT_TCPCKOK))
1204 0 : sumflags |= M_TCP_CSUM_IN_OK;
1205 :
1206 0 : if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1207 0 : (rxstat & XL_RXSTAT_UDPCKOK))
1208 0 : sumflags |= M_UDP_CSUM_IN_OK;
1209 :
1210 0 : m->m_pkthdr.csum_flags = sumflags;
1211 0 : }
1212 :
1213 0 : ml_enqueue(&ml, m);
1214 : }
1215 :
1216 0 : xl_fill_rx_ring(sc);
1217 :
1218 : /*
1219 : * Handle the 'end of channel' condition. When the upload
1220 : * engine hits the end of the RX ring, it will stall. This
1221 : * is our cue to flush the RX ring, reload the uplist pointer
1222 : * register and unstall the engine.
1223 : * XXX This is actually a little goofy. With the ThunderLAN
1224 : * chip, you get an interrupt when the receiver hits the end
1225 : * of the receive ring, which tells you exactly when you
1226 : * you need to reload the ring pointer. Here we have to
1227 : * fake it. I'm mad at myself for not being clever enough
1228 : * to avoid the use of a goto here.
1229 : */
1230 0 : if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1231 0 : CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1232 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1233 0 : xl_wait(sc);
1234 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1235 0 : xl_fill_rx_ring(sc);
1236 0 : goto again;
1237 : }
1238 :
1239 0 : if_input(ifp, &ml);
1240 0 : }
1241 :
1242 : /*
1243 : * A frame was downloaded to the chip. It's safe for us to clean up
1244 : * the list buffers.
1245 : */
1246 : void
1247 0 : xl_txeof(struct xl_softc *sc)
1248 : {
1249 : struct xl_chain *cur_tx;
1250 : struct ifnet *ifp;
1251 :
1252 0 : ifp = &sc->sc_arpcom.ac_if;
1253 :
1254 : /*
1255 : * Go through our tx list and free mbufs for those
1256 : * frames that have been uploaded. Note: the 3c905B
1257 : * sets a special bit in the status word to let us
1258 : * know that a frame has been downloaded, but the
1259 : * original 3c900/3c905 adapters don't do that.
1260 : * Consequently, we have to use a different test if
1261 : * xl_type != XL_TYPE_905B.
1262 : */
1263 0 : while (sc->xl_cdata.xl_tx_head != NULL) {
1264 : cur_tx = sc->xl_cdata.xl_tx_head;
1265 :
1266 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1267 : ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1268 : sizeof(struct xl_list),
1269 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1270 :
1271 0 : if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1272 : break;
1273 :
1274 0 : sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1275 0 : if (cur_tx->map->dm_nsegs != 0) {
1276 : bus_dmamap_t map = cur_tx->map;
1277 :
1278 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1279 : BUS_DMASYNC_POSTWRITE);
1280 0 : bus_dmamap_unload(sc->sc_dmat, map);
1281 0 : }
1282 0 : if (cur_tx->xl_mbuf != NULL) {
1283 0 : m_freem(cur_tx->xl_mbuf);
1284 0 : cur_tx->xl_mbuf = NULL;
1285 0 : }
1286 0 : cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1287 0 : sc->xl_cdata.xl_tx_free = cur_tx;
1288 : }
1289 :
1290 0 : if (sc->xl_cdata.xl_tx_head == NULL) {
1291 0 : ifq_clr_oactive(&ifp->if_snd);
1292 : /* Clear the timeout timer. */
1293 0 : ifp->if_timer = 0;
1294 0 : sc->xl_cdata.xl_tx_tail = NULL;
1295 0 : } else {
1296 0 : if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1297 0 : !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1298 0 : CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1299 : sc->sc_listmap->dm_segs[0].ds_addr +
1300 : ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1301 : sc->sc_listkva));
1302 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1303 0 : }
1304 : }
1305 0 : }
1306 :
1307 : void
1308 0 : xl_txeof_90xB(struct xl_softc *sc)
1309 : {
1310 : struct xl_chain *cur_tx = NULL;
1311 : struct ifnet *ifp;
1312 : int idx;
1313 :
1314 0 : ifp = &sc->sc_arpcom.ac_if;
1315 :
1316 0 : idx = sc->xl_cdata.xl_tx_cons;
1317 0 : while (idx != sc->xl_cdata.xl_tx_prod) {
1318 :
1319 0 : cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1320 :
1321 0 : if ((cur_tx->xl_ptr->xl_status &
1322 0 : htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1323 : break;
1324 :
1325 0 : if (cur_tx->xl_mbuf != NULL) {
1326 0 : m_freem(cur_tx->xl_mbuf);
1327 0 : cur_tx->xl_mbuf = NULL;
1328 0 : }
1329 :
1330 0 : if (cur_tx->map->dm_nsegs != 0) {
1331 0 : bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1332 : 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1333 0 : bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1334 0 : }
1335 :
1336 0 : sc->xl_cdata.xl_tx_cnt--;
1337 0 : XL_INC(idx, XL_TX_LIST_CNT);
1338 : }
1339 :
1340 0 : sc->xl_cdata.xl_tx_cons = idx;
1341 :
1342 0 : if (cur_tx != NULL)
1343 0 : ifq_clr_oactive(&ifp->if_snd);
1344 0 : if (sc->xl_cdata.xl_tx_cnt == 0)
1345 0 : ifp->if_timer = 0;
1346 0 : }
1347 :
1348 : /*
1349 : * TX 'end of channel' interrupt handler. Actually, we should
1350 : * only get a 'TX complete' interrupt if there's a transmit error,
1351 : * so this is really TX error handler.
1352 : */
1353 : void
1354 0 : xl_txeoc(struct xl_softc *sc)
1355 : {
1356 : u_int8_t txstat;
1357 :
1358 0 : while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1359 0 : if (txstat & XL_TXSTATUS_UNDERRUN ||
1360 0 : txstat & XL_TXSTATUS_JABBER ||
1361 0 : txstat & XL_TXSTATUS_RECLAIM) {
1362 0 : if (txstat != 0x90) {
1363 0 : printf("%s: transmission error: %x\n",
1364 0 : sc->sc_dev.dv_xname, txstat);
1365 0 : }
1366 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1367 0 : xl_wait(sc);
1368 0 : if (sc->xl_type == XL_TYPE_905B) {
1369 0 : if (sc->xl_cdata.xl_tx_cnt) {
1370 : int i;
1371 : struct xl_chain *c;
1372 :
1373 0 : i = sc->xl_cdata.xl_tx_cons;
1374 0 : c = &sc->xl_cdata.xl_tx_chain[i];
1375 0 : CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1376 : c->xl_phys);
1377 0 : CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1378 0 : }
1379 : } else {
1380 0 : if (sc->xl_cdata.xl_tx_head != NULL)
1381 0 : CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1382 : sc->sc_listmap->dm_segs[0].ds_addr +
1383 : ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1384 : sc->sc_listkva));
1385 : }
1386 : /*
1387 : * Remember to set this for the
1388 : * first generation 3c90X chips.
1389 : */
1390 0 : CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1391 0 : if (txstat & XL_TXSTATUS_UNDERRUN &&
1392 0 : sc->xl_tx_thresh < XL_PACKET_SIZE) {
1393 0 : sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1394 : #ifdef notdef
1395 : printf("%s: tx underrun, increasing tx start"
1396 : " threshold to %d\n", sc->sc_dev.dv_xname,
1397 : sc->xl_tx_thresh);
1398 : #endif
1399 0 : }
1400 0 : CSR_WRITE_2(sc, XL_COMMAND,
1401 : XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1402 0 : if (sc->xl_type == XL_TYPE_905B) {
1403 0 : CSR_WRITE_2(sc, XL_COMMAND,
1404 : XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1405 0 : }
1406 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1407 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1408 0 : } else {
1409 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1410 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1411 : }
1412 : /*
1413 : * Write an arbitrary byte to the TX_STATUS register
1414 : * to clear this interrupt/error and advance to the next.
1415 : */
1416 0 : CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1417 : }
1418 0 : }
1419 :
1420 : int
1421 0 : xl_intr(void *arg)
1422 : {
1423 : struct xl_softc *sc;
1424 : struct ifnet *ifp;
1425 : u_int16_t status;
1426 : int claimed = 0;
1427 :
1428 0 : sc = arg;
1429 0 : ifp = &sc->sc_arpcom.ac_if;
1430 :
1431 0 : while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1432 :
1433 : claimed = 1;
1434 :
1435 0 : CSR_WRITE_2(sc, XL_COMMAND,
1436 : XL_CMD_INTR_ACK|(status & XL_INTRS));
1437 :
1438 0 : if (sc->intr_ack)
1439 0 : (*sc->intr_ack)(sc);
1440 :
1441 0 : if (!(ifp->if_flags & IFF_RUNNING))
1442 0 : return (claimed);
1443 :
1444 0 : if (status & XL_STAT_UP_COMPLETE)
1445 0 : xl_rxeof(sc);
1446 :
1447 0 : if (status & XL_STAT_DOWN_COMPLETE) {
1448 0 : if (sc->xl_type == XL_TYPE_905B)
1449 0 : xl_txeof_90xB(sc);
1450 : else
1451 0 : xl_txeof(sc);
1452 : }
1453 :
1454 0 : if (status & XL_STAT_TX_COMPLETE) {
1455 0 : ifp->if_oerrors++;
1456 0 : xl_txeoc(sc);
1457 0 : }
1458 :
1459 0 : if (status & XL_STAT_ADFAIL)
1460 0 : xl_init(sc);
1461 :
1462 0 : if (status & XL_STAT_STATSOFLOW) {
1463 0 : sc->xl_stats_no_timeout = 1;
1464 0 : xl_stats_update(sc);
1465 0 : sc->xl_stats_no_timeout = 0;
1466 0 : }
1467 : }
1468 :
1469 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1470 0 : (*ifp->if_start)(ifp);
1471 :
1472 0 : return (claimed);
1473 0 : }
1474 :
1475 : void
1476 0 : xl_stats_update(void *xsc)
1477 : {
1478 : struct xl_softc *sc;
1479 : struct ifnet *ifp;
1480 0 : struct xl_stats xl_stats;
1481 : u_int8_t *p;
1482 : int i;
1483 : struct mii_data *mii = NULL;
1484 :
1485 0 : bzero(&xl_stats, sizeof(struct xl_stats));
1486 :
1487 0 : sc = xsc;
1488 0 : ifp = &sc->sc_arpcom.ac_if;
1489 0 : if (sc->xl_hasmii)
1490 0 : mii = &sc->sc_mii;
1491 :
1492 : p = (u_int8_t *)&xl_stats;
1493 :
1494 : /* Read all the stats registers. */
1495 0 : XL_SEL_WIN(6);
1496 :
1497 0 : for (i = 0; i < 16; i++)
1498 0 : *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1499 :
1500 0 : ifp->if_ierrors += xl_stats.xl_rx_overrun;
1501 :
1502 0 : ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1503 0 : xl_stats.xl_tx_single_collision +
1504 0 : xl_stats.xl_tx_late_collision;
1505 :
1506 : /*
1507 : * Boomerang and cyclone chips have an extra stats counter
1508 : * in window 4 (BadSSD). We have to read this too in order
1509 : * to clear out all the stats registers and avoid a statsoflow
1510 : * interrupt.
1511 : */
1512 0 : XL_SEL_WIN(4);
1513 0 : CSR_READ_1(sc, XL_W4_BADSSD);
1514 :
1515 0 : if (mii != NULL && (!sc->xl_stats_no_timeout))
1516 0 : mii_tick(mii);
1517 :
1518 0 : XL_SEL_WIN(7);
1519 :
1520 0 : if (!sc->xl_stats_no_timeout)
1521 0 : timeout_add_sec(&sc->xl_stsup_tmo, 1);
1522 0 : }
1523 :
1524 : /*
1525 : * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1526 : * pointers to the fragment pointers.
1527 : */
1528 : int
1529 0 : xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1530 : {
1531 : int error, frag, total_len;
1532 : u_int32_t status;
1533 : bus_dmamap_t map;
1534 :
1535 0 : map = sc->sc_tx_sparemap;
1536 :
1537 : reload:
1538 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1539 : m_head, BUS_DMA_NOWAIT);
1540 :
1541 0 : if (error && error != EFBIG) {
1542 0 : m_freem(m_head);
1543 0 : return (1);
1544 : }
1545 :
1546 : /*
1547 : * Start packing the mbufs in this chain into
1548 : * the fragment pointers. Stop when we run out
1549 : * of fragments or hit the end of the mbuf chain.
1550 : */
1551 0 : for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1552 0 : if (frag == XL_MAXFRAGS)
1553 : break;
1554 0 : total_len += map->dm_segs[frag].ds_len;
1555 0 : c->xl_ptr->xl_frag[frag].xl_addr =
1556 0 : htole32(map->dm_segs[frag].ds_addr);
1557 0 : c->xl_ptr->xl_frag[frag].xl_len =
1558 0 : htole32(map->dm_segs[frag].ds_len);
1559 : }
1560 :
1561 : /*
1562 : * Handle special case: we used up all 63 fragments,
1563 : * but we have more mbufs left in the chain. Copy the
1564 : * data into an mbuf cluster. Note that we don't
1565 : * bother clearing the values in the other fragment
1566 : * pointers/counters; it wouldn't gain us anything,
1567 : * and would waste cycles.
1568 : */
1569 0 : if (error) {
1570 : struct mbuf *m_new = NULL;
1571 :
1572 0 : MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1573 0 : if (m_new == NULL) {
1574 0 : m_freem(m_head);
1575 0 : return (1);
1576 : }
1577 0 : if (m_head->m_pkthdr.len > MHLEN) {
1578 0 : MCLGET(m_new, M_DONTWAIT);
1579 0 : if (!(m_new->m_flags & M_EXT)) {
1580 0 : m_freem(m_new);
1581 0 : m_freem(m_head);
1582 0 : return (1);
1583 : }
1584 : }
1585 0 : m_copydata(m_head, 0, m_head->m_pkthdr.len,
1586 0 : mtod(m_new, caddr_t));
1587 0 : m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1588 0 : m_freem(m_head);
1589 : m_head = m_new;
1590 0 : goto reload;
1591 : }
1592 :
1593 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1594 : BUS_DMASYNC_PREWRITE);
1595 :
1596 0 : if (c->map->dm_nsegs != 0) {
1597 0 : bus_dmamap_sync(sc->sc_dmat, c->map,
1598 : 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1599 0 : bus_dmamap_unload(sc->sc_dmat, c->map);
1600 0 : }
1601 :
1602 0 : c->xl_mbuf = m_head;
1603 0 : sc->sc_tx_sparemap = c->map;
1604 0 : c->map = map;
1605 0 : c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1606 0 : c->xl_ptr->xl_status = htole32(total_len);
1607 0 : c->xl_ptr->xl_next = 0;
1608 :
1609 0 : if (sc->xl_type == XL_TYPE_905B) {
1610 : status = XL_TXSTAT_RND_DEFEAT;
1611 :
1612 : #ifndef XL905B_TXCSUM_BROKEN
1613 : if (m_head->m_pkthdr.csum_flags) {
1614 : if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1615 : status |= XL_TXSTAT_IPCKSUM;
1616 : if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1617 : status |= XL_TXSTAT_TCPCKSUM;
1618 : if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1619 : status |= XL_TXSTAT_UDPCKSUM;
1620 : }
1621 : #endif
1622 0 : c->xl_ptr->xl_status = htole32(status);
1623 0 : }
1624 :
1625 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1626 : offsetof(struct xl_list_data, xl_tx_list[0]),
1627 : sizeof(struct xl_list) * XL_TX_LIST_CNT,
1628 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1629 :
1630 0 : return (0);
1631 0 : }
1632 :
1633 : /*
1634 : * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1635 : * to the mbuf data regions directly in the transmit lists. We also save a
1636 : * copy of the pointers since the transmit list fragment pointers are
1637 : * physical addresses.
1638 : */
1639 : void
1640 0 : xl_start(struct ifnet *ifp)
1641 : {
1642 : struct xl_softc *sc;
1643 : struct mbuf *m_head = NULL;
1644 : struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1645 : struct xl_chain *prev_tx;
1646 : int error;
1647 :
1648 0 : sc = ifp->if_softc;
1649 :
1650 : /*
1651 : * Check for an available queue slot. If there are none,
1652 : * punt.
1653 : */
1654 0 : if (sc->xl_cdata.xl_tx_free == NULL) {
1655 0 : xl_txeoc(sc);
1656 0 : xl_txeof(sc);
1657 0 : if (sc->xl_cdata.xl_tx_free == NULL) {
1658 0 : ifq_set_oactive(&ifp->if_snd);
1659 0 : return;
1660 : }
1661 : }
1662 :
1663 0 : start_tx = sc->xl_cdata.xl_tx_free;
1664 :
1665 0 : while (sc->xl_cdata.xl_tx_free != NULL) {
1666 0 : IFQ_DEQUEUE(&ifp->if_snd, m_head);
1667 0 : if (m_head == NULL)
1668 : break;
1669 :
1670 : /* Pick a descriptor off the free list. */
1671 : prev_tx = cur_tx;
1672 0 : cur_tx = sc->xl_cdata.xl_tx_free;
1673 :
1674 : /* Pack the data into the descriptor. */
1675 0 : error = xl_encap(sc, cur_tx, m_head);
1676 0 : if (error) {
1677 : cur_tx = prev_tx;
1678 0 : continue;
1679 : }
1680 :
1681 0 : sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1682 0 : cur_tx->xl_next = NULL;
1683 :
1684 : /* Chain it together. */
1685 0 : if (prev != NULL) {
1686 0 : prev->xl_next = cur_tx;
1687 0 : prev->xl_ptr->xl_next =
1688 0 : sc->sc_listmap->dm_segs[0].ds_addr +
1689 0 : ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1690 :
1691 0 : }
1692 : prev = cur_tx;
1693 :
1694 : #if NBPFILTER > 0
1695 : /*
1696 : * If there's a BPF listener, bounce a copy of this frame
1697 : * to him.
1698 : */
1699 0 : if (ifp->if_bpf)
1700 0 : bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1701 : BPF_DIRECTION_OUT);
1702 : #endif
1703 : }
1704 :
1705 : /*
1706 : * If there are no packets queued, bail.
1707 : */
1708 0 : if (cur_tx == NULL)
1709 0 : return;
1710 :
1711 : /*
1712 : * Place the request for the upload interrupt
1713 : * in the last descriptor in the chain. This way, if
1714 : * we're chaining several packets at once, we'll only
1715 : * get an interrupt once for the whole chain rather than
1716 : * once for each packet.
1717 : */
1718 0 : cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1719 :
1720 : /*
1721 : * Queue the packets. If the TX channel is clear, update
1722 : * the downlist pointer register.
1723 : */
1724 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1725 0 : xl_wait(sc);
1726 :
1727 0 : if (sc->xl_cdata.xl_tx_head != NULL) {
1728 0 : sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1729 0 : sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1730 0 : sc->sc_listmap->dm_segs[0].ds_addr +
1731 0 : ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1732 0 : sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1733 : htole32(~XL_TXSTAT_DL_INTR);
1734 0 : sc->xl_cdata.xl_tx_tail = cur_tx;
1735 0 : } else {
1736 0 : sc->xl_cdata.xl_tx_head = start_tx;
1737 0 : sc->xl_cdata.xl_tx_tail = cur_tx;
1738 : }
1739 0 : if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1740 0 : CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1741 : sc->sc_listmap->dm_segs[0].ds_addr +
1742 : ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1743 :
1744 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1745 :
1746 0 : XL_SEL_WIN(7);
1747 :
1748 : /*
1749 : * Set a timeout in case the chip goes out to lunch.
1750 : */
1751 0 : ifp->if_timer = 5;
1752 :
1753 : /*
1754 : * XXX Under certain conditions, usually on slower machines
1755 : * where interrupts may be dropped, it's possible for the
1756 : * adapter to chew up all the buffers in the receive ring
1757 : * and stall, without us being able to do anything about it.
1758 : * To guard against this, we need to make a pass over the
1759 : * RX queue to make sure there aren't any packets pending.
1760 : * Doing it here means we can flush the receive ring at the
1761 : * same time the chip is DMAing the transmit descriptors we
1762 : * just gave it.
1763 : *
1764 : * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1765 : * nature of their chips in all their marketing literature;
1766 : * we may as well take advantage of it. :)
1767 : */
1768 0 : xl_rxeof(sc);
1769 0 : }
1770 :
1771 : void
1772 0 : xl_start_90xB(struct ifnet *ifp)
1773 : {
1774 : struct xl_softc *sc;
1775 : struct mbuf *m_head = NULL;
1776 : struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1777 : struct xl_chain *prev_tx;
1778 : int error, idx;
1779 :
1780 0 : sc = ifp->if_softc;
1781 :
1782 0 : if (ifq_is_oactive(&ifp->if_snd))
1783 0 : return;
1784 :
1785 0 : idx = sc->xl_cdata.xl_tx_prod;
1786 0 : start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1787 :
1788 0 : while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1789 :
1790 0 : if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1791 0 : ifq_set_oactive(&ifp->if_snd);
1792 0 : break;
1793 : }
1794 :
1795 0 : IFQ_DEQUEUE(&ifp->if_snd, m_head);
1796 0 : if (m_head == NULL)
1797 : break;
1798 :
1799 : prev_tx = cur_tx;
1800 : cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1801 :
1802 : /* Pack the data into the descriptor. */
1803 0 : error = xl_encap(sc, cur_tx, m_head);
1804 0 : if (error) {
1805 : cur_tx = prev_tx;
1806 0 : continue;
1807 : }
1808 :
1809 : /* Chain it together. */
1810 0 : if (prev != NULL)
1811 0 : prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1812 : prev = cur_tx;
1813 :
1814 : #if NBPFILTER > 0
1815 : /*
1816 : * If there's a BPF listener, bounce a copy of this frame
1817 : * to him.
1818 : */
1819 0 : if (ifp->if_bpf)
1820 0 : bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1821 : BPF_DIRECTION_OUT);
1822 : #endif
1823 :
1824 0 : XL_INC(idx, XL_TX_LIST_CNT);
1825 0 : sc->xl_cdata.xl_tx_cnt++;
1826 : }
1827 :
1828 : /*
1829 : * If there are no packets queued, bail.
1830 : */
1831 0 : if (cur_tx == NULL)
1832 0 : return;
1833 :
1834 : /*
1835 : * Place the request for the upload interrupt
1836 : * in the last descriptor in the chain. This way, if
1837 : * we're chaining several packets at once, we'll only
1838 : * get an interrupt once for the whole chain rather than
1839 : * once for each packet.
1840 : */
1841 0 : cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1842 :
1843 : /* Start transmission */
1844 0 : sc->xl_cdata.xl_tx_prod = idx;
1845 0 : start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1846 :
1847 : /*
1848 : * Set a timeout in case the chip goes out to lunch.
1849 : */
1850 0 : ifp->if_timer = 5;
1851 0 : }
1852 :
1853 : void
1854 0 : xl_init(void *xsc)
1855 : {
1856 0 : struct xl_softc *sc = xsc;
1857 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1858 : int s, i;
1859 : struct mii_data *mii = NULL;
1860 :
1861 0 : s = splnet();
1862 :
1863 : /*
1864 : * Cancel pending I/O and free all RX/TX buffers.
1865 : */
1866 0 : xl_stop(sc);
1867 :
1868 : /* Reset the chip to a known state. */
1869 0 : xl_reset(sc);
1870 :
1871 0 : if (sc->xl_hasmii)
1872 0 : mii = &sc->sc_mii;
1873 :
1874 0 : if (mii == NULL) {
1875 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1876 0 : xl_wait(sc);
1877 0 : }
1878 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1879 0 : xl_wait(sc);
1880 0 : DELAY(10000);
1881 :
1882 : /* Init our MAC address */
1883 0 : XL_SEL_WIN(2);
1884 0 : for (i = 0; i < ETHER_ADDR_LEN; i++) {
1885 0 : CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1886 : sc->sc_arpcom.ac_enaddr[i]);
1887 : }
1888 :
1889 : /* Clear the station mask. */
1890 0 : for (i = 0; i < 3; i++)
1891 0 : CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1892 : #ifdef notdef
1893 : /* Reset TX and RX. */
1894 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1895 : xl_wait(sc);
1896 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1897 : xl_wait(sc);
1898 : #endif
1899 : /* Init circular RX list. */
1900 0 : if (xl_list_rx_init(sc) == ENOBUFS) {
1901 0 : printf("%s: initialization failed: no "
1902 0 : "memory for rx buffers\n", sc->sc_dev.dv_xname);
1903 0 : xl_stop(sc);
1904 0 : splx(s);
1905 0 : return;
1906 : }
1907 :
1908 : /* Init TX descriptors. */
1909 0 : if (sc->xl_type == XL_TYPE_905B)
1910 0 : xl_list_tx_init_90xB(sc);
1911 : else
1912 0 : xl_list_tx_init(sc);
1913 :
1914 : /*
1915 : * Set the TX freethresh value.
1916 : * Note that this has no effect on 3c905B "cyclone"
1917 : * cards but is required for 3c900/3c905 "boomerang"
1918 : * cards in order to enable the download engine.
1919 : */
1920 0 : CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1921 :
1922 : /* Set the TX start threshold for best performance. */
1923 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1924 :
1925 : /*
1926 : * If this is a 3c905B, also set the tx reclaim threshold.
1927 : * This helps cut down on the number of tx reclaim errors
1928 : * that could happen on a busy network. The chip multiplies
1929 : * the register value by 16 to obtain the actual threshold
1930 : * in bytes, so we divide by 16 when setting the value here.
1931 : * The existing threshold value can be examined by reading
1932 : * the register at offset 9 in window 5.
1933 : */
1934 0 : if (sc->xl_type == XL_TYPE_905B) {
1935 0 : CSR_WRITE_2(sc, XL_COMMAND,
1936 : XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1937 0 : }
1938 :
1939 : /* Program promiscuous mode and multicast filters. */
1940 0 : xl_iff(sc);
1941 :
1942 : /*
1943 : * Load the address of the RX list. We have to
1944 : * stall the upload engine before we can manipulate
1945 : * the uplist pointer register, then unstall it when
1946 : * we're finished. We also have to wait for the
1947 : * stall command to complete before proceeding.
1948 : * Note that we have to do this after any RX resets
1949 : * have completed since the uplist register is cleared
1950 : * by a reset.
1951 : */
1952 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1953 0 : xl_wait(sc);
1954 0 : CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
1955 : offsetof(struct xl_list_data, xl_rx_list[0]));
1956 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1957 0 : xl_wait(sc);
1958 :
1959 0 : if (sc->xl_type == XL_TYPE_905B) {
1960 : /* Set polling interval */
1961 0 : CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1962 : /* Load the address of the TX list */
1963 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1964 0 : xl_wait(sc);
1965 0 : CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1966 : sc->sc_listmap->dm_segs[0].ds_addr +
1967 : offsetof(struct xl_list_data, xl_tx_list[0]));
1968 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1969 0 : xl_wait(sc);
1970 0 : }
1971 :
1972 : /*
1973 : * If the coax transceiver is on, make sure to enable
1974 : * the DC-DC converter.
1975 : */
1976 0 : XL_SEL_WIN(3);
1977 0 : if (sc->xl_xcvr == XL_XCVR_COAX)
1978 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1979 : else
1980 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1981 :
1982 : /*
1983 : * increase packet size to allow reception of 802.1q or ISL packets.
1984 : * For the 3c90x chip, set the 'allow large packets' bit in the MAC
1985 : * control register. For 3c90xB/C chips, use the RX packet size
1986 : * register.
1987 : */
1988 :
1989 0 : if (sc->xl_type == XL_TYPE_905B)
1990 0 : CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
1991 : else {
1992 : u_int8_t macctl;
1993 0 : macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
1994 0 : macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
1995 0 : CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
1996 : }
1997 :
1998 : /* Clear out the stats counters. */
1999 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2000 0 : sc->xl_stats_no_timeout = 1;
2001 0 : xl_stats_update(sc);
2002 0 : sc->xl_stats_no_timeout = 0;
2003 0 : XL_SEL_WIN(4);
2004 0 : CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2005 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2006 :
2007 : /*
2008 : * Enable interrupts.
2009 : */
2010 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2011 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2012 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2013 :
2014 0 : if (sc->intr_ack)
2015 0 : (*sc->intr_ack)(sc);
2016 :
2017 : /* Set the RX early threshold */
2018 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2019 0 : CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2020 :
2021 : /* Enable receiver and transmitter. */
2022 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2023 0 : xl_wait(sc);
2024 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2025 0 : xl_wait(sc);
2026 :
2027 : /* Restore state of BMCR */
2028 0 : if (mii != NULL)
2029 0 : mii_mediachg(mii);
2030 :
2031 : /* Select window 7 for normal operations. */
2032 0 : XL_SEL_WIN(7);
2033 :
2034 0 : ifp->if_flags |= IFF_RUNNING;
2035 0 : ifq_clr_oactive(&ifp->if_snd);
2036 :
2037 0 : splx(s);
2038 :
2039 0 : timeout_add_sec(&sc->xl_stsup_tmo, 1);
2040 0 : }
2041 :
2042 : /*
2043 : * Set media options.
2044 : */
2045 : int
2046 0 : xl_ifmedia_upd(struct ifnet *ifp)
2047 : {
2048 : struct xl_softc *sc;
2049 : struct ifmedia *ifm = NULL;
2050 : struct mii_data *mii = NULL;
2051 :
2052 0 : sc = ifp->if_softc;
2053 :
2054 0 : if (sc->xl_hasmii)
2055 0 : mii = &sc->sc_mii;
2056 0 : if (mii == NULL)
2057 0 : ifm = &sc->ifmedia;
2058 : else
2059 0 : ifm = &mii->mii_media;
2060 :
2061 0 : switch(IFM_SUBTYPE(ifm->ifm_media)) {
2062 : case IFM_100_FX:
2063 : case IFM_10_FL:
2064 : case IFM_10_2:
2065 : case IFM_10_5:
2066 0 : xl_setmode(sc, ifm->ifm_media);
2067 0 : return (0);
2068 : break;
2069 : default:
2070 : break;
2071 : }
2072 :
2073 0 : if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2074 0 : || sc->xl_media & XL_MEDIAOPT_BT4) {
2075 0 : xl_init(sc);
2076 0 : } else {
2077 0 : xl_setmode(sc, ifm->ifm_media);
2078 : }
2079 :
2080 0 : return (0);
2081 0 : }
2082 :
2083 : /*
2084 : * Report current media status.
2085 : */
2086 : void
2087 0 : xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2088 : {
2089 : struct xl_softc *sc;
2090 : u_int32_t icfg;
2091 : u_int16_t status = 0;
2092 : struct mii_data *mii = NULL;
2093 :
2094 0 : sc = ifp->if_softc;
2095 0 : if (sc->xl_hasmii != 0)
2096 0 : mii = &sc->sc_mii;
2097 :
2098 0 : XL_SEL_WIN(4);
2099 0 : status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2100 :
2101 0 : XL_SEL_WIN(3);
2102 0 : icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2103 0 : icfg >>= XL_ICFG_CONNECTOR_BITS;
2104 :
2105 0 : ifmr->ifm_active = IFM_ETHER;
2106 0 : ifmr->ifm_status = IFM_AVALID;
2107 :
2108 0 : if ((status & XL_MEDIASTAT_CARRIER) == 0)
2109 0 : ifmr->ifm_status |= IFM_ACTIVE;
2110 :
2111 0 : switch(icfg) {
2112 : case XL_XCVR_10BT:
2113 0 : ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2114 0 : if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2115 0 : ifmr->ifm_active |= IFM_FDX;
2116 : else
2117 0 : ifmr->ifm_active |= IFM_HDX;
2118 : break;
2119 : case XL_XCVR_AUI:
2120 0 : if (sc->xl_type == XL_TYPE_905B &&
2121 0 : sc->xl_media == XL_MEDIAOPT_10FL) {
2122 0 : ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2123 0 : if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2124 0 : ifmr->ifm_active |= IFM_FDX;
2125 : else
2126 0 : ifmr->ifm_active |= IFM_HDX;
2127 : } else
2128 0 : ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2129 : break;
2130 : case XL_XCVR_COAX:
2131 0 : ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2132 0 : break;
2133 : /*
2134 : * XXX MII and BTX/AUTO should be separate cases.
2135 : */
2136 :
2137 : case XL_XCVR_100BTX:
2138 : case XL_XCVR_AUTO:
2139 : case XL_XCVR_MII:
2140 0 : if (mii != NULL) {
2141 0 : mii_pollstat(mii);
2142 0 : ifmr->ifm_active = mii->mii_media_active;
2143 0 : ifmr->ifm_status = mii->mii_media_status;
2144 0 : }
2145 : break;
2146 : case XL_XCVR_100BFX:
2147 0 : ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2148 0 : break;
2149 : default:
2150 0 : printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2151 0 : break;
2152 : }
2153 0 : }
2154 :
2155 : int
2156 0 : xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2157 : {
2158 0 : struct xl_softc *sc = ifp->if_softc;
2159 0 : struct ifreq *ifr = (struct ifreq *)data;
2160 : int s, error = 0;
2161 : struct mii_data *mii = NULL;
2162 :
2163 0 : s = splnet();
2164 :
2165 0 : switch(command) {
2166 : case SIOCSIFADDR:
2167 0 : ifp->if_flags |= IFF_UP;
2168 0 : if (!(ifp->if_flags & IFF_RUNNING))
2169 0 : xl_init(sc);
2170 : break;
2171 :
2172 : case SIOCSIFFLAGS:
2173 0 : if (ifp->if_flags & IFF_UP) {
2174 0 : if (ifp->if_flags & IFF_RUNNING)
2175 0 : error = ENETRESET;
2176 : else
2177 0 : xl_init(sc);
2178 : } else {
2179 0 : if (ifp->if_flags & IFF_RUNNING)
2180 0 : xl_stop(sc);
2181 : }
2182 : break;
2183 :
2184 : case SIOCGIFMEDIA:
2185 : case SIOCSIFMEDIA:
2186 0 : if (sc->xl_hasmii != 0)
2187 0 : mii = &sc->sc_mii;
2188 0 : if (mii == NULL)
2189 0 : error = ifmedia_ioctl(ifp, ifr,
2190 0 : &sc->ifmedia, command);
2191 : else
2192 0 : error = ifmedia_ioctl(ifp, ifr,
2193 0 : &mii->mii_media, command);
2194 : break;
2195 :
2196 : case SIOCGIFRXR:
2197 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
2198 0 : NULL, MCLBYTES, &sc->xl_cdata.xl_rx_ring);
2199 0 : break;
2200 :
2201 : default:
2202 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2203 0 : }
2204 :
2205 0 : if (error == ENETRESET) {
2206 0 : if (ifp->if_flags & IFF_RUNNING)
2207 0 : xl_iff(sc);
2208 : error = 0;
2209 0 : }
2210 :
2211 0 : splx(s);
2212 0 : return (error);
2213 : }
2214 :
2215 : void
2216 0 : xl_watchdog(struct ifnet *ifp)
2217 : {
2218 : struct xl_softc *sc;
2219 : u_int16_t status = 0;
2220 :
2221 0 : sc = ifp->if_softc;
2222 :
2223 0 : ifp->if_oerrors++;
2224 0 : XL_SEL_WIN(4);
2225 0 : status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2226 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2227 :
2228 0 : if (status & XL_MEDIASTAT_CARRIER)
2229 0 : printf("%s: no carrier - transceiver cable problem?\n",
2230 : sc->sc_dev.dv_xname);
2231 0 : xl_txeoc(sc);
2232 0 : xl_txeof(sc);
2233 0 : xl_rxeof(sc);
2234 0 : xl_init(sc);
2235 :
2236 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
2237 0 : (*ifp->if_start)(ifp);
2238 0 : }
2239 :
2240 : void
2241 0 : xl_freetxrx(struct xl_softc *sc)
2242 : {
2243 : bus_dmamap_t map;
2244 : int i;
2245 :
2246 : /*
2247 : * Free data in the RX lists.
2248 : */
2249 0 : for (i = 0; i < XL_RX_LIST_CNT; i++) {
2250 0 : if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2251 : map = sc->xl_cdata.xl_rx_chain[i].map;
2252 :
2253 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2254 : BUS_DMASYNC_POSTREAD);
2255 0 : bus_dmamap_unload(sc->sc_dmat, map);
2256 0 : }
2257 0 : if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2258 0 : m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2259 0 : sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2260 0 : }
2261 : }
2262 0 : bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list));
2263 : /*
2264 : * Free the TX list buffers.
2265 : */
2266 0 : for (i = 0; i < XL_TX_LIST_CNT; i++) {
2267 0 : if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2268 : map = sc->xl_cdata.xl_tx_chain[i].map;
2269 :
2270 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2271 : BUS_DMASYNC_POSTWRITE);
2272 0 : bus_dmamap_unload(sc->sc_dmat, map);
2273 0 : }
2274 0 : if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2275 0 : m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2276 0 : sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2277 0 : }
2278 : }
2279 0 : bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list));
2280 0 : }
2281 :
2282 : /*
2283 : * Stop the adapter and free any mbufs allocated to the
2284 : * RX and TX lists.
2285 : */
2286 : void
2287 0 : xl_stop(struct xl_softc *sc)
2288 : {
2289 : struct ifnet *ifp;
2290 :
2291 : /* Stop the stats updater. */
2292 0 : timeout_del(&sc->xl_stsup_tmo);
2293 :
2294 0 : ifp = &sc->sc_arpcom.ac_if;
2295 :
2296 0 : ifp->if_flags &= ~IFF_RUNNING;
2297 0 : ifq_clr_oactive(&ifp->if_snd);
2298 0 : ifp->if_timer = 0;
2299 :
2300 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2301 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2302 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2303 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2304 0 : xl_wait(sc);
2305 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2306 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2307 0 : DELAY(800);
2308 :
2309 : #ifdef foo
2310 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2311 : xl_wait(sc);
2312 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2313 : xl_wait(sc);
2314 : #endif
2315 :
2316 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2317 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2318 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2319 :
2320 0 : if (sc->intr_ack)
2321 0 : (*sc->intr_ack)(sc);
2322 :
2323 0 : xl_freetxrx(sc);
2324 0 : }
2325 :
2326 : #ifndef SMALL_KERNEL
2327 : void
2328 0 : xl_wol_power(struct xl_softc *sc)
2329 : {
2330 : /* Re-enable RX and call upper layer WOL power routine
2331 : * if WOL is enabled. */
2332 0 : if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) {
2333 0 : CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2334 0 : sc->wol_power(sc->wol_power_arg);
2335 0 : }
2336 0 : }
2337 : #endif
2338 :
2339 : void
2340 0 : xl_attach(struct xl_softc *sc)
2341 : {
2342 0 : u_int8_t enaddr[ETHER_ADDR_LEN];
2343 0 : u_int16_t xcvr[2];
2344 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2345 : int i;
2346 : uint64_t media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2347 : struct ifmedia *ifm;
2348 :
2349 0 : i = splnet();
2350 0 : xl_reset(sc);
2351 0 : splx(i);
2352 :
2353 : /*
2354 : * Get station address from the EEPROM.
2355 : */
2356 0 : if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2357 0 : printf("\n%s: failed to read station address\n",
2358 0 : sc->sc_dev.dv_xname);
2359 0 : return;
2360 : }
2361 0 : memcpy(&sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
2362 :
2363 0 : if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2364 : PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2365 0 : BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2366 0 : printf(": can't alloc list mem\n");
2367 0 : return;
2368 : }
2369 0 : if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2370 : sizeof(struct xl_list_data), &sc->sc_listkva,
2371 0 : BUS_DMA_NOWAIT) != 0) {
2372 0 : printf(": can't map list mem\n");
2373 0 : return;
2374 : }
2375 0 : if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2376 : sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2377 0 : &sc->sc_listmap) != 0) {
2378 0 : printf(": can't alloc list map\n");
2379 0 : return;
2380 : }
2381 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2382 0 : sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2383 0 : printf(": can't load list map\n");
2384 0 : return;
2385 : }
2386 0 : sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2387 :
2388 0 : for (i = 0; i < XL_RX_LIST_CNT; i++) {
2389 0 : if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2390 : 0, BUS_DMA_NOWAIT,
2391 0 : &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2392 0 : printf(": can't create rx map\n");
2393 0 : return;
2394 : }
2395 : }
2396 0 : if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2397 0 : BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2398 0 : printf(": can't create rx spare map\n");
2399 0 : return;
2400 : }
2401 :
2402 0 : for (i = 0; i < XL_TX_LIST_CNT; i++) {
2403 0 : if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2404 : XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2405 0 : &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2406 0 : printf(": can't create tx map\n");
2407 0 : return;
2408 : }
2409 : }
2410 0 : if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2411 0 : MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2412 0 : printf(": can't create tx spare map\n");
2413 0 : return;
2414 : }
2415 :
2416 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2417 :
2418 0 : if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2419 : u_int16_t n;
2420 :
2421 0 : XL_SEL_WIN(2);
2422 0 : n = CSR_READ_2(sc, 12);
2423 :
2424 0 : if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2425 0 : n |= 0x0010;
2426 :
2427 0 : if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2428 0 : n |= 0x4000;
2429 :
2430 0 : CSR_WRITE_2(sc, 12, n);
2431 0 : }
2432 :
2433 : /*
2434 : * Figure out the card type. 3c905B adapters have the
2435 : * 'supportsNoTxLength' bit set in the capabilities
2436 : * word in the EEPROM.
2437 : * Note: my 3c575C cardbus card lies. It returns a value
2438 : * of 0x1578 for its capabilities word, which is somewhat
2439 : * nonsensical. Another way to distinguish a 3c90x chip
2440 : * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2441 : * bit. This will only be set for 3c90x boomerage chips.
2442 : */
2443 0 : xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2444 0 : if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2445 0 : !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2446 0 : sc->xl_type = XL_TYPE_905B;
2447 : else
2448 0 : sc->xl_type = XL_TYPE_90X;
2449 :
2450 : /* Set the TX start threshold for best performance. */
2451 0 : sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2452 :
2453 0 : timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2454 :
2455 0 : ifp->if_softc = sc;
2456 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2457 0 : ifp->if_ioctl = xl_ioctl;
2458 0 : if (sc->xl_type == XL_TYPE_905B)
2459 0 : ifp->if_start = xl_start_90xB;
2460 : else
2461 0 : ifp->if_start = xl_start;
2462 0 : ifp->if_watchdog = xl_watchdog;
2463 0 : ifp->if_baudrate = 10000000;
2464 0 : IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2465 0 : memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
2466 :
2467 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
2468 :
2469 : #ifndef XL905B_TXCSUM_BROKEN
2470 : ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2471 : IFCAP_CSUM_UDPv4;
2472 : #endif
2473 :
2474 0 : XL_SEL_WIN(3);
2475 0 : sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2476 :
2477 0 : xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2478 0 : sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2479 0 : sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2480 0 : sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2481 :
2482 0 : xl_mediacheck(sc);
2483 :
2484 0 : if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2485 0 : || sc->xl_media & XL_MEDIAOPT_BT4) {
2486 0 : ifmedia_init(&sc->sc_mii.mii_media, 0,
2487 : xl_ifmedia_upd, xl_ifmedia_sts);
2488 0 : sc->xl_hasmii = 1;
2489 0 : sc->sc_mii.mii_ifp = ifp;
2490 0 : sc->sc_mii.mii_readreg = xl_miibus_readreg;
2491 0 : sc->sc_mii.mii_writereg = xl_miibus_writereg;
2492 0 : sc->sc_mii.mii_statchg = xl_miibus_statchg;
2493 0 : xl_setcfg(sc);
2494 0 : mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2495 : MII_PHY_ANY, MII_OFFSET_ANY, 0);
2496 :
2497 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2498 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2499 : 0, NULL);
2500 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2501 0 : }
2502 : else {
2503 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2504 : }
2505 : ifm = &sc->sc_mii.mii_media;
2506 0 : }
2507 : else {
2508 0 : ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2509 0 : sc->xl_hasmii = 0;
2510 : ifm = &sc->ifmedia;
2511 : }
2512 :
2513 : /*
2514 : * Sanity check. If the user has selected "auto" and this isn't
2515 : * a 10/100 card of some kind, we need to force the transceiver
2516 : * type to something sane.
2517 : */
2518 0 : if (sc->xl_xcvr == XL_XCVR_AUTO)
2519 0 : xl_choose_xcvr(sc, 0);
2520 :
2521 0 : if (sc->xl_media & XL_MEDIAOPT_BT) {
2522 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2523 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2524 0 : if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2525 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2526 : }
2527 :
2528 0 : if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2529 : /*
2530 : * Check for a 10baseFL board in disguise.
2531 : */
2532 0 : if (sc->xl_type == XL_TYPE_905B &&
2533 0 : sc->xl_media == XL_MEDIAOPT_10FL) {
2534 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2535 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2536 : 0, NULL);
2537 0 : if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2538 0 : ifmedia_add(ifm,
2539 : IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2540 : } else {
2541 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2542 : }
2543 : }
2544 :
2545 0 : if (sc->xl_media & XL_MEDIAOPT_BNC) {
2546 0 : ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2547 0 : }
2548 :
2549 0 : if (sc->xl_media & XL_MEDIAOPT_BFX) {
2550 0 : ifp->if_baudrate = 100000000;
2551 0 : ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2552 0 : }
2553 :
2554 : /* Choose a default media. */
2555 0 : switch(sc->xl_xcvr) {
2556 : case XL_XCVR_10BT:
2557 : media = IFM_ETHER|IFM_10_T;
2558 0 : xl_setmode(sc, media);
2559 0 : break;
2560 : case XL_XCVR_AUI:
2561 0 : if (sc->xl_type == XL_TYPE_905B &&
2562 0 : sc->xl_media == XL_MEDIAOPT_10FL) {
2563 : media = IFM_ETHER|IFM_10_FL;
2564 0 : xl_setmode(sc, media);
2565 0 : } else {
2566 : media = IFM_ETHER|IFM_10_5;
2567 0 : xl_setmode(sc, media);
2568 : }
2569 : break;
2570 : case XL_XCVR_COAX:
2571 : media = IFM_ETHER|IFM_10_2;
2572 0 : xl_setmode(sc, media);
2573 0 : break;
2574 : case XL_XCVR_AUTO:
2575 : case XL_XCVR_100BTX:
2576 : case XL_XCVR_MII:
2577 : /* Chosen by miibus */
2578 : break;
2579 : case XL_XCVR_100BFX:
2580 : media = IFM_ETHER|IFM_100_FX;
2581 0 : xl_setmode(sc, media);
2582 0 : break;
2583 : default:
2584 0 : printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2585 : sc->xl_xcvr);
2586 : /*
2587 : * This will probably be wrong, but it prevents
2588 : * the ifmedia code from panicking.
2589 : */
2590 : media = IFM_ETHER | IFM_10_T;
2591 0 : break;
2592 : }
2593 :
2594 0 : if (sc->xl_hasmii == 0)
2595 0 : ifmedia_set(&sc->ifmedia, media);
2596 :
2597 0 : if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2598 0 : XL_SEL_WIN(0);
2599 0 : CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2600 0 : }
2601 :
2602 : #ifndef SMALL_KERNEL
2603 : /* Check availability of WOL. */
2604 0 : if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) {
2605 0 : ifp->if_capabilities |= IFCAP_WOL;
2606 0 : ifp->if_wol = xl_wol;
2607 0 : xl_wol(ifp, 0);
2608 0 : }
2609 : #endif
2610 :
2611 : /*
2612 : * Call MI attach routines.
2613 : */
2614 0 : if_attach(ifp);
2615 0 : ether_ifattach(ifp);
2616 0 : }
2617 :
2618 : int
2619 0 : xl_detach(struct xl_softc *sc)
2620 : {
2621 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2622 : extern void xl_freetxrx(struct xl_softc *);
2623 :
2624 : /* Unhook our tick handler. */
2625 0 : timeout_del(&sc->xl_stsup_tmo);
2626 :
2627 0 : xl_freetxrx(sc);
2628 :
2629 : /* Detach all PHYs */
2630 0 : if (sc->xl_hasmii)
2631 0 : mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2632 :
2633 : /* Delete all remaining media. */
2634 0 : ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2635 :
2636 0 : ether_ifdetach(ifp);
2637 0 : if_detach(ifp);
2638 :
2639 0 : return (0);
2640 : }
2641 :
2642 : #ifndef SMALL_KERNEL
2643 : int
2644 0 : xl_wol(struct ifnet *ifp, int enable)
2645 : {
2646 0 : struct xl_softc *sc = ifp->if_softc;
2647 :
2648 0 : XL_SEL_WIN(7);
2649 0 : if (enable) {
2650 0 : if (!(ifp->if_flags & IFF_RUNNING))
2651 0 : xl_init(sc);
2652 0 : CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC);
2653 0 : sc->xl_flags |= XL_FLAG_WOL;
2654 0 : } else {
2655 0 : CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
2656 0 : sc->xl_flags &= ~XL_FLAG_WOL;
2657 : }
2658 0 : return (0);
2659 : }
2660 : #endif
2661 :
2662 : struct cfdriver xl_cd = {
2663 : 0, "xl", DV_IFNET
2664 : };
|