Line data Source code
1 : /* $OpenBSD: re.c,v 1.202 2017/06/19 09:36:27 mpi Exp $ */
2 : /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */
3 : /*
4 : * Copyright (c) 1997, 1998-2003
5 : * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 : *
7 : * Redistribution and use in source and binary forms, with or without
8 : * modification, are permitted provided that the following conditions
9 : * are met:
10 : * 1. Redistributions of source code must retain the above copyright
11 : * notice, this list of conditions and the following disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : * 3. All advertising materials mentioning features or use of this software
16 : * must display the following acknowledgement:
17 : * This product includes software developed by Bill Paul.
18 : * 4. Neither the name of the author nor the names of any co-contributors
19 : * may be used to endorse or promote products derived from this software
20 : * without specific prior written permission.
21 : *
22 : * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 : * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 : * THE POSSIBILITY OF SUCH DAMAGE.
33 : */
34 :
35 : /*
36 : * Realtek 8139C+/8169/8169S/8110S PCI NIC driver
37 : *
38 : * Written by Bill Paul <wpaul@windriver.com>
39 : * Senior Networking Software Engineer
40 : * Wind River Systems
41 : */
42 :
43 : /*
44 : * This driver is designed to support Realtek's next generation of
45 : * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
46 : * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
47 : * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
48 : *
49 : * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
50 : * with the older 8139 family, however it also supports a special
51 : * C+ mode of operation that provides several new performance enhancing
52 : * features. These include:
53 : *
54 : * o Descriptor based DMA mechanism. Each descriptor represents
55 : * a single packet fragment. Data buffers may be aligned on
56 : * any byte boundary.
57 : *
58 : * o 64-bit DMA
59 : *
60 : * o TCP/IP checksum offload for both RX and TX
61 : *
62 : * o High and normal priority transmit DMA rings
63 : *
64 : * o VLAN tag insertion and extraction
65 : *
66 : * o TCP large send (segmentation offload)
67 : *
68 : * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
69 : * programming API is fairly straightforward. The RX filtering, EEPROM
70 : * access and PHY access is the same as it is on the older 8139 series
71 : * chips.
72 : *
73 : * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
74 : * same programming API and feature set as the 8139C+ with the following
75 : * differences and additions:
76 : *
77 : * o 1000Mbps mode
78 : *
79 : * o Jumbo frames
80 : *
81 : * o GMII and TBI ports/registers for interfacing with copper
82 : * or fiber PHYs
83 : *
84 : * o RX and TX DMA rings can have up to 1024 descriptors
85 : * (the 8139C+ allows a maximum of 64)
86 : *
87 : * o Slight differences in register layout from the 8139C+
88 : *
89 : * The TX start and timer interrupt registers are at different locations
90 : * on the 8169 than they are on the 8139C+. Also, the status word in the
91 : * RX descriptor has a slightly different bit layout. The 8169 does not
92 : * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
93 : * copper gigE PHY.
94 : *
95 : * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
96 : * (the 'S' stands for 'single-chip'). These devices have the same
97 : * programming API as the older 8169, but also have some vendor-specific
98 : * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
99 : * part designed to be pin-compatible with the Realtek 8100 10/100 chip.
100 : *
101 : * This driver takes advantage of the RX and TX checksum offload and
102 : * VLAN tag insertion/extraction features. It also implements TX
103 : * interrupt moderation using the timer interrupt registers, which
104 : * significantly reduces TX interrupt load. There is also support
105 : * for jumbo frames, however the 8169/8169S/8110S can not transmit
106 : * jumbo frames larger than 7440, so the max MTU possible with this
107 : * driver is 7422 bytes.
108 : */
109 :
110 : #include "bpfilter.h"
111 : #include "vlan.h"
112 :
113 : #include <sys/param.h>
114 : #include <sys/endian.h>
115 : #include <sys/systm.h>
116 : #include <sys/sockio.h>
117 : #include <sys/mbuf.h>
118 : #include <sys/malloc.h>
119 : #include <sys/kernel.h>
120 : #include <sys/device.h>
121 : #include <sys/timeout.h>
122 : #include <sys/socket.h>
123 : #include <sys/atomic.h>
124 :
125 : #include <machine/bus.h>
126 :
127 : #include <net/if.h>
128 : #include <net/if_media.h>
129 :
130 : #include <netinet/in.h>
131 : #include <netinet/ip.h>
132 : #include <netinet/if_ether.h>
133 :
134 : #if NBPFILTER > 0
135 : #include <net/bpf.h>
136 : #endif
137 :
138 : #include <dev/mii/mii.h>
139 : #include <dev/mii/miivar.h>
140 :
141 : #include <dev/pci/pcidevs.h>
142 :
143 : #include <dev/ic/rtl81x9reg.h>
144 : #include <dev/ic/revar.h>
145 :
146 : #ifdef RE_DEBUG
147 : int redebug = 0;
148 : #define DPRINTF(x) do { if (redebug) printf x; } while (0)
149 : #else
150 : #define DPRINTF(x)
151 : #endif
152 :
153 : static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t);
154 :
155 : int re_encap(struct rl_softc *, unsigned int, struct mbuf *);
156 :
157 : int re_newbuf(struct rl_softc *);
158 : int re_rx_list_init(struct rl_softc *);
159 : void re_rx_list_fill(struct rl_softc *);
160 : int re_tx_list_init(struct rl_softc *);
161 : int re_rxeof(struct rl_softc *);
162 : int re_txeof(struct rl_softc *);
163 : void re_tick(void *);
164 : void re_start(struct ifqueue *);
165 : void re_txstart(void *);
166 : int re_ioctl(struct ifnet *, u_long, caddr_t);
167 : void re_watchdog(struct ifnet *);
168 : int re_ifmedia_upd(struct ifnet *);
169 : void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
170 :
171 : void re_set_jumbo(struct rl_softc *);
172 :
173 : void re_eeprom_putbyte(struct rl_softc *, int);
174 : void re_eeprom_getword(struct rl_softc *, int, u_int16_t *);
175 : void re_read_eeprom(struct rl_softc *, caddr_t, int, int);
176 :
177 : int re_gmii_readreg(struct device *, int, int);
178 : void re_gmii_writereg(struct device *, int, int, int);
179 :
180 : int re_miibus_readreg(struct device *, int, int);
181 : void re_miibus_writereg(struct device *, int, int, int);
182 : void re_miibus_statchg(struct device *);
183 :
184 : void re_iff(struct rl_softc *);
185 :
186 : void re_setup_hw_im(struct rl_softc *);
187 : void re_setup_sim_im(struct rl_softc *);
188 : void re_disable_hw_im(struct rl_softc *);
189 : void re_disable_sim_im(struct rl_softc *);
190 : void re_config_imtype(struct rl_softc *, int);
191 : void re_setup_intr(struct rl_softc *, int, int);
192 : #ifndef SMALL_KERNEL
193 : int re_wol(struct ifnet*, int);
194 : #endif
195 :
196 : void in_delayed_cksum(struct mbuf *);
197 :
198 : struct cfdriver re_cd = {
199 : 0, "re", DV_IFNET
200 : };
201 :
202 : extern char *hw_vendor, *hw_prod;
203 :
204 : #define EE_SET(x) \
205 : CSR_WRITE_1(sc, RL_EECMD, \
206 : CSR_READ_1(sc, RL_EECMD) | x)
207 :
208 : #define EE_CLR(x) \
209 : CSR_WRITE_1(sc, RL_EECMD, \
210 : CSR_READ_1(sc, RL_EECMD) & ~x)
211 :
212 : #define RL_FRAMELEN(mtu) \
213 : (mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + \
214 : ETHER_VLAN_ENCAP_LEN)
215 :
216 : static const struct re_revision {
217 : u_int32_t re_chipid;
218 : const char *re_name;
219 : } re_revisions[] = {
220 : { RL_HWREV_8100, "RTL8100" },
221 : { RL_HWREV_8100E, "RTL8100E" },
222 : { RL_HWREV_8100E_SPIN2, "RTL8100E 2" },
223 : { RL_HWREV_8101, "RTL8101" },
224 : { RL_HWREV_8101E, "RTL8101E" },
225 : { RL_HWREV_8102E, "RTL8102E" },
226 : { RL_HWREV_8106E, "RTL8106E" },
227 : { RL_HWREV_8401E, "RTL8401E" },
228 : { RL_HWREV_8402, "RTL8402" },
229 : { RL_HWREV_8411, "RTL8411" },
230 : { RL_HWREV_8411B, "RTL8411B" },
231 : { RL_HWREV_8102EL, "RTL8102EL" },
232 : { RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" },
233 : { RL_HWREV_8103E, "RTL8103E" },
234 : { RL_HWREV_8110S, "RTL8110S" },
235 : { RL_HWREV_8139CPLUS, "RTL8139C+" },
236 : { RL_HWREV_8168B_SPIN1, "RTL8168 1" },
237 : { RL_HWREV_8168B_SPIN2, "RTL8168 2" },
238 : { RL_HWREV_8168B_SPIN3, "RTL8168 3" },
239 : { RL_HWREV_8168C, "RTL8168C/8111C" },
240 : { RL_HWREV_8168C_SPIN2, "RTL8168C/8111C" },
241 : { RL_HWREV_8168CP, "RTL8168CP/8111CP" },
242 : { RL_HWREV_8168F, "RTL8168F/8111F" },
243 : { RL_HWREV_8168G, "RTL8168G/8111G" },
244 : { RL_HWREV_8168GU, "RTL8168GU/8111GU" },
245 : { RL_HWREV_8168H, "RTL8168H/8111H" },
246 : { RL_HWREV_8105E, "RTL8105E" },
247 : { RL_HWREV_8105E_SPIN1, "RTL8105E" },
248 : { RL_HWREV_8168D, "RTL8168D/8111D" },
249 : { RL_HWREV_8168DP, "RTL8168DP/8111DP" },
250 : { RL_HWREV_8168E, "RTL8168E/8111E" },
251 : { RL_HWREV_8168E_VL, "RTL8168E/8111E-VL" },
252 : { RL_HWREV_8168EP, "RTL8168EP/8111EP" },
253 : { RL_HWREV_8169, "RTL8169" },
254 : { RL_HWREV_8169_8110SB, "RTL8169/8110SB" },
255 : { RL_HWREV_8169_8110SBL, "RTL8169SBL" },
256 : { RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" },
257 : { RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" },
258 : { RL_HWREV_8169S, "RTL8169S" },
259 :
260 : { 0, NULL }
261 : };
262 :
263 :
264 : static inline void
265 0 : re_set_bufaddr(struct rl_desc *d, bus_addr_t addr)
266 : {
267 0 : d->rl_bufaddr_lo = htole32((uint32_t)addr);
268 : if (sizeof(bus_addr_t) == sizeof(uint64_t))
269 0 : d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32);
270 : else
271 : d->rl_bufaddr_hi = 0;
272 0 : }
273 :
274 : /*
275 : * Send a read command and address to the EEPROM, check for ACK.
276 : */
277 : void
278 0 : re_eeprom_putbyte(struct rl_softc *sc, int addr)
279 : {
280 : int d, i;
281 :
282 0 : d = addr | (RL_9346_READ << sc->rl_eewidth);
283 :
284 : /*
285 : * Feed in each bit and strobe the clock.
286 : */
287 :
288 0 : for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
289 0 : if (d & i)
290 0 : EE_SET(RL_EE_DATAIN);
291 : else
292 0 : EE_CLR(RL_EE_DATAIN);
293 0 : DELAY(100);
294 0 : EE_SET(RL_EE_CLK);
295 0 : DELAY(150);
296 0 : EE_CLR(RL_EE_CLK);
297 0 : DELAY(100);
298 : }
299 0 : }
300 :
301 : /*
302 : * Read a word of data stored in the EEPROM at address 'addr.'
303 : */
304 : void
305 0 : re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
306 : {
307 : int i;
308 : u_int16_t word = 0;
309 :
310 : /*
311 : * Send address of word we want to read.
312 : */
313 0 : re_eeprom_putbyte(sc, addr);
314 :
315 : /*
316 : * Start reading bits from EEPROM.
317 : */
318 0 : for (i = 0x8000; i; i >>= 1) {
319 0 : EE_SET(RL_EE_CLK);
320 0 : DELAY(100);
321 0 : if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
322 0 : word |= i;
323 0 : EE_CLR(RL_EE_CLK);
324 0 : DELAY(100);
325 : }
326 :
327 0 : *dest = word;
328 0 : }
329 :
330 : /*
331 : * Read a sequence of words from the EEPROM.
332 : */
333 : void
334 0 : re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
335 : {
336 : int i;
337 0 : u_int16_t word = 0, *ptr;
338 :
339 0 : CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
340 :
341 0 : DELAY(100);
342 :
343 0 : for (i = 0; i < cnt; i++) {
344 0 : CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
345 0 : re_eeprom_getword(sc, off + i, &word);
346 0 : CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
347 0 : ptr = (u_int16_t *)(dest + (i * 2));
348 0 : *ptr = word;
349 : }
350 :
351 0 : CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
352 0 : }
353 :
354 : int
355 0 : re_gmii_readreg(struct device *self, int phy, int reg)
356 : {
357 0 : struct rl_softc *sc = (struct rl_softc *)self;
358 : u_int32_t rval;
359 : int i;
360 :
361 0 : if (phy != 7)
362 0 : return (0);
363 :
364 : /* Let the rgephy driver read the GMEDIASTAT register */
365 :
366 0 : if (reg == RL_GMEDIASTAT) {
367 0 : rval = CSR_READ_1(sc, RL_GMEDIASTAT);
368 0 : return (rval);
369 : }
370 :
371 0 : CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
372 :
373 0 : for (i = 0; i < RL_PHY_TIMEOUT; i++) {
374 0 : rval = CSR_READ_4(sc, RL_PHYAR);
375 0 : if (rval & RL_PHYAR_BUSY)
376 : break;
377 0 : DELAY(25);
378 : }
379 :
380 0 : if (i == RL_PHY_TIMEOUT) {
381 0 : printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname);
382 0 : return (0);
383 : }
384 :
385 0 : DELAY(20);
386 :
387 0 : return (rval & RL_PHYAR_PHYDATA);
388 0 : }
389 :
390 : void
391 0 : re_gmii_writereg(struct device *dev, int phy, int reg, int data)
392 : {
393 0 : struct rl_softc *sc = (struct rl_softc *)dev;
394 : u_int32_t rval;
395 : int i;
396 :
397 0 : CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
398 : (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
399 :
400 0 : for (i = 0; i < RL_PHY_TIMEOUT; i++) {
401 0 : rval = CSR_READ_4(sc, RL_PHYAR);
402 0 : if (!(rval & RL_PHYAR_BUSY))
403 : break;
404 0 : DELAY(25);
405 : }
406 :
407 0 : if (i == RL_PHY_TIMEOUT)
408 0 : printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname);
409 :
410 0 : DELAY(20);
411 0 : }
412 :
413 : int
414 0 : re_miibus_readreg(struct device *dev, int phy, int reg)
415 : {
416 0 : struct rl_softc *sc = (struct rl_softc *)dev;
417 : u_int16_t rval = 0;
418 : u_int16_t re8139_reg = 0;
419 : int s;
420 :
421 0 : s = splnet();
422 :
423 0 : if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
424 0 : rval = re_gmii_readreg(dev, phy, reg);
425 0 : splx(s);
426 0 : return (rval);
427 : }
428 :
429 : /* Pretend the internal PHY is only at address 0 */
430 0 : if (phy) {
431 0 : splx(s);
432 0 : return (0);
433 : }
434 0 : switch(reg) {
435 : case MII_BMCR:
436 : re8139_reg = RL_BMCR;
437 0 : break;
438 : case MII_BMSR:
439 : re8139_reg = RL_BMSR;
440 0 : break;
441 : case MII_ANAR:
442 : re8139_reg = RL_ANAR;
443 0 : break;
444 : case MII_ANER:
445 : re8139_reg = RL_ANER;
446 0 : break;
447 : case MII_ANLPAR:
448 : re8139_reg = RL_LPAR;
449 0 : break;
450 : case MII_PHYIDR1:
451 : case MII_PHYIDR2:
452 0 : splx(s);
453 0 : return (0);
454 : /*
455 : * Allow the rlphy driver to read the media status
456 : * register. If we have a link partner which does not
457 : * support NWAY, this is the register which will tell
458 : * us the results of parallel detection.
459 : */
460 : case RL_MEDIASTAT:
461 0 : rval = CSR_READ_1(sc, RL_MEDIASTAT);
462 0 : splx(s);
463 0 : return (rval);
464 : default:
465 0 : printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
466 0 : splx(s);
467 0 : return (0);
468 : }
469 0 : rval = CSR_READ_2(sc, re8139_reg);
470 0 : if (re8139_reg == RL_BMCR) {
471 : /* 8139C+ has different bit layout. */
472 0 : rval &= ~(BMCR_LOOP | BMCR_ISO);
473 0 : }
474 0 : splx(s);
475 0 : return (rval);
476 0 : }
477 :
478 : void
479 0 : re_miibus_writereg(struct device *dev, int phy, int reg, int data)
480 : {
481 0 : struct rl_softc *sc = (struct rl_softc *)dev;
482 : u_int16_t re8139_reg = 0;
483 : int s;
484 :
485 0 : s = splnet();
486 :
487 0 : if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
488 0 : re_gmii_writereg(dev, phy, reg, data);
489 0 : splx(s);
490 0 : return;
491 : }
492 :
493 : /* Pretend the internal PHY is only at address 0 */
494 0 : if (phy) {
495 0 : splx(s);
496 0 : return;
497 : }
498 0 : switch(reg) {
499 : case MII_BMCR:
500 : re8139_reg = RL_BMCR;
501 : /* 8139C+ has different bit layout. */
502 0 : data &= ~(BMCR_LOOP | BMCR_ISO);
503 0 : break;
504 : case MII_BMSR:
505 : re8139_reg = RL_BMSR;
506 0 : break;
507 : case MII_ANAR:
508 : re8139_reg = RL_ANAR;
509 0 : break;
510 : case MII_ANER:
511 : re8139_reg = RL_ANER;
512 0 : break;
513 : case MII_ANLPAR:
514 : re8139_reg = RL_LPAR;
515 0 : break;
516 : case MII_PHYIDR1:
517 : case MII_PHYIDR2:
518 0 : splx(s);
519 0 : return;
520 : break;
521 : default:
522 0 : printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
523 0 : splx(s);
524 0 : return;
525 : }
526 0 : CSR_WRITE_2(sc, re8139_reg, data);
527 0 : splx(s);
528 0 : }
529 :
530 : void
531 0 : re_miibus_statchg(struct device *dev)
532 : {
533 0 : struct rl_softc *sc = (struct rl_softc *)dev;
534 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
535 0 : struct mii_data *mii = &sc->sc_mii;
536 :
537 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
538 0 : return;
539 :
540 0 : sc->rl_flags &= ~RL_FLAG_LINK;
541 0 : if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
542 : (IFM_ACTIVE | IFM_AVALID)) {
543 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
544 : case IFM_10_T:
545 : case IFM_100_TX:
546 0 : sc->rl_flags |= RL_FLAG_LINK;
547 0 : break;
548 : case IFM_1000_T:
549 0 : if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
550 : break;
551 0 : sc->rl_flags |= RL_FLAG_LINK;
552 0 : break;
553 : default:
554 : break;
555 : }
556 : }
557 :
558 : /*
559 : * Realtek controllers do not provide an interface to
560 : * Tx/Rx MACs for resolved speed, duplex and flow-control
561 : * parameters.
562 : */
563 0 : }
564 :
565 : void
566 0 : re_iff(struct rl_softc *sc)
567 : {
568 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
569 : int h = 0;
570 : u_int32_t hashes[2];
571 : u_int32_t rxfilt;
572 : struct arpcom *ac = &sc->sc_arpcom;
573 : struct ether_multi *enm;
574 : struct ether_multistep step;
575 :
576 0 : rxfilt = CSR_READ_4(sc, RL_RXCFG);
577 0 : rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
578 : RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI);
579 0 : ifp->if_flags &= ~IFF_ALLMULTI;
580 :
581 : /*
582 : * Always accept frames destined to our station address.
583 : * Always accept broadcast frames.
584 : */
585 0 : rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
586 :
587 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
588 0 : ifp->if_flags |= IFF_ALLMULTI;
589 0 : rxfilt |= RL_RXCFG_RX_MULTI;
590 0 : if (ifp->if_flags & IFF_PROMISC)
591 0 : rxfilt |= RL_RXCFG_RX_ALLPHYS;
592 : hashes[0] = hashes[1] = 0xFFFFFFFF;
593 0 : } else {
594 0 : rxfilt |= RL_RXCFG_RX_MULTI;
595 : /* Program new filter. */
596 : bzero(hashes, sizeof(hashes));
597 :
598 0 : ETHER_FIRST_MULTI(step, ac, enm);
599 0 : while (enm != NULL) {
600 0 : h = ether_crc32_be(enm->enm_addrlo,
601 0 : ETHER_ADDR_LEN) >> 26;
602 :
603 0 : if (h < 32)
604 0 : hashes[0] |= (1 << h);
605 : else
606 0 : hashes[1] |= (1 << (h - 32));
607 :
608 0 : ETHER_NEXT_MULTI(step, enm);
609 : }
610 : }
611 :
612 : /*
613 : * For some unfathomable reason, Realtek decided to reverse
614 : * the order of the multicast hash registers in the PCI Express
615 : * parts. This means we have to write the hash pattern in reverse
616 : * order for those devices.
617 : */
618 0 : if (sc->rl_flags & RL_FLAG_PCIE) {
619 0 : CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1]));
620 0 : CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0]));
621 0 : } else {
622 0 : CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
623 0 : CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
624 : }
625 :
626 0 : CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
627 0 : }
628 :
629 : void
630 0 : re_reset(struct rl_softc *sc)
631 : {
632 : int i;
633 :
634 0 : CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
635 :
636 0 : for (i = 0; i < RL_TIMEOUT; i++) {
637 0 : DELAY(10);
638 0 : if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
639 : break;
640 : }
641 0 : if (i == RL_TIMEOUT)
642 0 : printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
643 :
644 0 : if (sc->rl_flags & RL_FLAG_MACRESET)
645 0 : CSR_WRITE_1(sc, RL_LDPS, 1);
646 0 : }
647 :
648 : /*
649 : * Attach the interface. Allocate softc structures, do ifmedia
650 : * setup and ethernet/BPF attach.
651 : */
652 : int
653 0 : re_attach(struct rl_softc *sc, const char *intrstr)
654 : {
655 0 : u_char eaddr[ETHER_ADDR_LEN];
656 0 : u_int16_t as[ETHER_ADDR_LEN / 2];
657 : struct ifnet *ifp;
658 0 : u_int16_t re_did = 0;
659 : int error = 0, i;
660 : const struct re_revision *rr;
661 : const char *re_name = NULL;
662 :
663 0 : sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
664 :
665 0 : switch (sc->sc_hwrev) {
666 : case RL_HWREV_8139CPLUS:
667 0 : sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
668 0 : sc->rl_max_mtu = RL_MTU;
669 0 : break;
670 : case RL_HWREV_8100E:
671 : case RL_HWREV_8100E_SPIN2:
672 : case RL_HWREV_8101E:
673 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
674 0 : sc->rl_max_mtu = RL_MTU;
675 0 : break;
676 : case RL_HWREV_8103E:
677 0 : sc->rl_flags |= RL_FLAG_MACSLEEP;
678 : /* FALLTHROUGH */
679 : case RL_HWREV_8102E:
680 : case RL_HWREV_8102EL:
681 : case RL_HWREV_8102EL_SPIN1:
682 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
683 : RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER |
684 : RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
685 0 : sc->rl_max_mtu = RL_MTU;
686 0 : break;
687 : case RL_HWREV_8401E:
688 : case RL_HWREV_8105E:
689 : case RL_HWREV_8105E_SPIN1:
690 : case RL_HWREV_8106E:
691 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
692 : RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
693 : RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
694 0 : sc->rl_max_mtu = RL_MTU;
695 0 : break;
696 : case RL_HWREV_8402:
697 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
698 : RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
699 : RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
700 : RL_FLAG_CMDSTOP_WAIT_TXQ;
701 0 : sc->rl_max_mtu = RL_MTU;
702 0 : break;
703 : case RL_HWREV_8168B_SPIN1:
704 : case RL_HWREV_8168B_SPIN2:
705 0 : sc->rl_flags |= RL_FLAG_WOLRXENB;
706 : /* FALLTHROUGH */
707 : case RL_HWREV_8168B_SPIN3:
708 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
709 0 : sc->rl_max_mtu = RL_MTU;
710 0 : break;
711 : case RL_HWREV_8168C_SPIN2:
712 0 : sc->rl_flags |= RL_FLAG_MACSLEEP;
713 : /* FALLTHROUGH */
714 : case RL_HWREV_8168C:
715 : case RL_HWREV_8168CP:
716 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
717 : RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
718 : RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
719 0 : sc->rl_max_mtu = RL_JUMBO_MTU_6K;
720 0 : break;
721 : case RL_HWREV_8168D:
722 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
723 : RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
724 : RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
725 : RL_FLAG_WOL_MANLINK;
726 0 : sc->rl_max_mtu = RL_JUMBO_MTU_9K;
727 0 : break;
728 : case RL_HWREV_8168DP:
729 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
730 : RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
731 : RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
732 0 : sc->rl_max_mtu = RL_JUMBO_MTU_9K;
733 0 : break;
734 : case RL_HWREV_8168E:
735 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
736 : RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
737 : RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
738 : RL_FLAG_WOL_MANLINK;
739 0 : sc->rl_max_mtu = RL_JUMBO_MTU_9K;
740 0 : break;
741 : case RL_HWREV_8168E_VL:
742 0 : sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR |
743 : RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
744 : RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ |
745 : RL_FLAG_WOL_MANLINK;
746 0 : sc->rl_max_mtu = RL_JUMBO_MTU_6K;
747 0 : break;
748 : case RL_HWREV_8168F:
749 0 : sc->rl_flags |= RL_FLAG_EARLYOFF;
750 : /* FALLTHROUGH */
751 : case RL_HWREV_8411:
752 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
753 : RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
754 : RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ |
755 : RL_FLAG_WOL_MANLINK;
756 0 : sc->rl_max_mtu = RL_JUMBO_MTU_9K;
757 0 : break;
758 : case RL_HWREV_8168EP:
759 : case RL_HWREV_8168G:
760 : case RL_HWREV_8168GU:
761 : case RL_HWREV_8168H:
762 : case RL_HWREV_8411B:
763 0 : if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) {
764 : /* RTL8106EUS */
765 0 : sc->rl_flags |= RL_FLAG_FASTETHER;
766 0 : sc->rl_max_mtu = RL_MTU;
767 0 : } else {
768 0 : sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
769 0 : sc->rl_max_mtu = RL_JUMBO_MTU_9K;
770 : }
771 :
772 0 : sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
773 : RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
774 : RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
775 : RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED;
776 0 : break;
777 : case RL_HWREV_8169_8110SB:
778 : case RL_HWREV_8169_8110SBL:
779 : case RL_HWREV_8169_8110SCd:
780 : case RL_HWREV_8169_8110SCe:
781 0 : sc->rl_flags |= RL_FLAG_PHYWAKE;
782 : /* FALLTHROUGH */
783 : case RL_HWREV_8169:
784 : case RL_HWREV_8169S:
785 : case RL_HWREV_8110S:
786 0 : sc->rl_flags |= RL_FLAG_MACRESET;
787 0 : sc->rl_max_mtu = RL_JUMBO_MTU_7K;
788 0 : break;
789 : default:
790 : break;
791 : }
792 :
793 0 : if (sc->sc_hwrev == RL_HWREV_8139CPLUS) {
794 : sc->rl_cfg0 = RL_8139_CFG0;
795 : sc->rl_cfg1 = RL_8139_CFG1;
796 0 : sc->rl_cfg2 = 0;
797 0 : sc->rl_cfg3 = RL_8139_CFG3;
798 0 : sc->rl_cfg4 = RL_8139_CFG4;
799 0 : sc->rl_cfg5 = RL_8139_CFG5;
800 0 : } else {
801 : sc->rl_cfg0 = RL_CFG0;
802 : sc->rl_cfg1 = RL_CFG1;
803 0 : sc->rl_cfg2 = RL_CFG2;
804 0 : sc->rl_cfg3 = RL_CFG3;
805 0 : sc->rl_cfg4 = RL_CFG4;
806 0 : sc->rl_cfg5 = RL_CFG5;
807 : }
808 :
809 : /* Reset the adapter. */
810 0 : re_reset(sc);
811 :
812 0 : sc->rl_tx_time = 5; /* 125us */
813 0 : sc->rl_rx_time = 2; /* 50us */
814 0 : if (sc->rl_flags & RL_FLAG_PCIE)
815 0 : sc->rl_sim_time = 75; /* 75us */
816 : else
817 0 : sc->rl_sim_time = 125; /* 125us */
818 0 : sc->rl_imtype = RL_IMTYPE_SIM; /* simulated interrupt moderation */
819 :
820 0 : if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
821 0 : sc->rl_bus_speed = 33; /* XXX */
822 0 : else if (sc->rl_flags & RL_FLAG_PCIE)
823 0 : sc->rl_bus_speed = 125;
824 : else {
825 : u_int8_t cfg2;
826 :
827 0 : cfg2 = CSR_READ_1(sc, sc->rl_cfg2);
828 0 : switch (cfg2 & RL_CFG2_PCI_MASK) {
829 : case RL_CFG2_PCI_33MHZ:
830 0 : sc->rl_bus_speed = 33;
831 0 : break;
832 : case RL_CFG2_PCI_66MHZ:
833 0 : sc->rl_bus_speed = 66;
834 0 : break;
835 : default:
836 0 : printf("%s: unknown bus speed, assume 33MHz\n",
837 0 : sc->sc_dev.dv_xname);
838 0 : sc->rl_bus_speed = 33;
839 0 : break;
840 : }
841 :
842 0 : if (cfg2 & RL_CFG2_PCI_64BIT)
843 0 : sc->rl_flags |= RL_FLAG_PCI64;
844 : }
845 :
846 0 : re_config_imtype(sc, sc->rl_imtype);
847 :
848 0 : if (sc->rl_flags & RL_FLAG_PAR) {
849 : /*
850 : * XXX Should have a better way to extract station
851 : * address from EEPROM.
852 : */
853 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
854 0 : eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
855 : } else {
856 0 : sc->rl_eewidth = RL_9356_ADDR_LEN;
857 0 : re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
858 0 : if (re_did != 0x8129)
859 0 : sc->rl_eewidth = RL_9346_ADDR_LEN;
860 :
861 : /*
862 : * Get station address from the EEPROM.
863 : */
864 0 : re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
865 0 : for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
866 0 : as[i] = letoh16(as[i]);
867 0 : bcopy(as, eaddr, ETHER_ADDR_LEN);
868 : }
869 :
870 : /*
871 : * Set RX length mask, TX poll request register
872 : * and descriptor count.
873 : */
874 0 : if (sc->sc_hwrev == RL_HWREV_8139CPLUS) {
875 0 : sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
876 0 : sc->rl_txstart = RL_TXSTART;
877 0 : sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
878 0 : sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
879 0 : sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS;
880 0 : } else {
881 0 : sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
882 0 : sc->rl_txstart = RL_GTXSTART;
883 0 : sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
884 0 : sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
885 0 : sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS;
886 : }
887 :
888 0 : bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
889 :
890 0 : for (rr = re_revisions; rr->re_name != NULL; rr++) {
891 0 : if (rr->re_chipid == sc->sc_hwrev)
892 0 : re_name = rr->re_name;
893 : }
894 :
895 0 : if (re_name == NULL)
896 0 : printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16);
897 : else
898 0 : printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16);
899 :
900 0 : printf(", %s, address %s\n", intrstr,
901 0 : ether_sprintf(sc->sc_arpcom.ac_enaddr));
902 :
903 : /* Allocate DMA'able memory for the TX ring */
904 0 : if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc),
905 : RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1,
906 : &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT |
907 0 : BUS_DMA_ZERO)) != 0) {
908 0 : printf("%s: can't allocate tx listseg, error = %d\n",
909 0 : sc->sc_dev.dv_xname, error);
910 0 : goto fail_0;
911 : }
912 :
913 : /* Load the map for the TX ring. */
914 0 : if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg,
915 : sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc),
916 : (caddr_t *)&sc->rl_ldata.rl_tx_list,
917 0 : BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
918 0 : printf("%s: can't map tx list, error = %d\n",
919 0 : sc->sc_dev.dv_xname, error);
920 0 : goto fail_1;
921 : }
922 :
923 0 : if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1,
924 : RL_TX_LIST_SZ(sc), 0, 0,
925 0 : &sc->rl_ldata.rl_tx_list_map)) != 0) {
926 0 : printf("%s: can't create tx list map, error = %d\n",
927 0 : sc->sc_dev.dv_xname, error);
928 0 : goto fail_2;
929 : }
930 :
931 0 : if ((error = bus_dmamap_load(sc->sc_dmat,
932 : sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
933 0 : RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
934 0 : printf("%s: can't load tx list, error = %d\n",
935 0 : sc->sc_dev.dv_xname, error);
936 0 : goto fail_3;
937 : }
938 :
939 : /* Create DMA maps for TX buffers */
940 0 : for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
941 0 : error = bus_dmamap_create(sc->sc_dmat,
942 : RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs,
943 : RL_JUMBO_FRAMELEN, 0, 0,
944 : &sc->rl_ldata.rl_txq[i].txq_dmamap);
945 0 : if (error) {
946 0 : printf("%s: can't create DMA map for TX\n",
947 0 : sc->sc_dev.dv_xname);
948 0 : goto fail_4;
949 : }
950 : }
951 :
952 : /* Allocate DMA'able memory for the RX ring */
953 0 : if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc),
954 : RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1,
955 : &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT |
956 0 : BUS_DMA_ZERO)) != 0) {
957 0 : printf("%s: can't allocate rx listnseg, error = %d\n",
958 0 : sc->sc_dev.dv_xname, error);
959 0 : goto fail_4;
960 : }
961 :
962 : /* Load the map for the RX ring. */
963 0 : if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg,
964 : sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc),
965 : (caddr_t *)&sc->rl_ldata.rl_rx_list,
966 0 : BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
967 0 : printf("%s: can't map rx list, error = %d\n",
968 0 : sc->sc_dev.dv_xname, error);
969 0 : goto fail_5;
970 :
971 : }
972 :
973 0 : if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1,
974 : RL_RX_DMAMEM_SZ(sc), 0, 0,
975 0 : &sc->rl_ldata.rl_rx_list_map)) != 0) {
976 0 : printf("%s: can't create rx list map, error = %d\n",
977 0 : sc->sc_dev.dv_xname, error);
978 0 : goto fail_6;
979 : }
980 :
981 0 : if ((error = bus_dmamap_load(sc->sc_dmat,
982 : sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
983 0 : RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
984 0 : printf("%s: can't load rx list, error = %d\n",
985 0 : sc->sc_dev.dv_xname, error);
986 0 : goto fail_7;
987 : }
988 :
989 : /* Create DMA maps for RX buffers */
990 0 : for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
991 0 : error = bus_dmamap_create(sc->sc_dmat,
992 : RL_FRAMELEN(sc->rl_max_mtu), 1,
993 : RL_FRAMELEN(sc->rl_max_mtu), 0, 0,
994 : &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
995 0 : if (error) {
996 0 : printf("%s: can't create DMA map for RX\n",
997 0 : sc->sc_dev.dv_xname);
998 : goto fail_8;
999 : }
1000 : }
1001 :
1002 0 : ifp = &sc->sc_arpcom.ac_if;
1003 0 : ifp->if_softc = sc;
1004 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1005 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1006 0 : ifp->if_xflags = IFXF_MPSAFE;
1007 0 : ifp->if_ioctl = re_ioctl;
1008 0 : ifp->if_qstart = re_start;
1009 0 : ifp->if_watchdog = re_watchdog;
1010 0 : ifp->if_hardmtu = sc->rl_max_mtu;
1011 0 : IFQ_SET_MAXLEN(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt);
1012 :
1013 0 : ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
1014 : IFCAP_CSUM_UDPv4;
1015 :
1016 : /*
1017 : * RTL8168/8111C generates wrong IP checksummed frame if the
1018 : * packet has IP options so disable TX IP checksum offloading.
1019 : */
1020 0 : switch (sc->sc_hwrev) {
1021 : case RL_HWREV_8168C:
1022 : case RL_HWREV_8168C_SPIN2:
1023 : case RL_HWREV_8168CP:
1024 : break;
1025 : default:
1026 0 : ifp->if_capabilities |= IFCAP_CSUM_IPv4;
1027 0 : }
1028 :
1029 : #if NVLAN > 0
1030 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1031 : #endif
1032 :
1033 : #ifndef SMALL_KERNEL
1034 0 : ifp->if_capabilities |= IFCAP_WOL;
1035 0 : ifp->if_wol = re_wol;
1036 0 : re_wol(ifp, 0);
1037 : #endif
1038 0 : timeout_set(&sc->timer_handle, re_tick, sc);
1039 0 : task_set(&sc->rl_start, re_txstart, sc);
1040 :
1041 : /* Take PHY out of power down mode. */
1042 0 : if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) {
1043 0 : CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1044 0 : if (sc->sc_hwrev == RL_HWREV_8401E)
1045 0 : CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1046 : }
1047 0 : if (sc->rl_flags & RL_FLAG_PHYWAKE) {
1048 0 : re_gmii_writereg((struct device *)sc, 1, 0x1f, 0);
1049 0 : re_gmii_writereg((struct device *)sc, 1, 0x0e, 0);
1050 0 : }
1051 :
1052 : /* Do MII setup */
1053 0 : sc->sc_mii.mii_ifp = ifp;
1054 0 : sc->sc_mii.mii_readreg = re_miibus_readreg;
1055 0 : sc->sc_mii.mii_writereg = re_miibus_writereg;
1056 0 : sc->sc_mii.mii_statchg = re_miibus_statchg;
1057 0 : ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd,
1058 : re_ifmedia_sts);
1059 0 : mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1060 : MII_OFFSET_ANY, MIIF_DOPAUSE);
1061 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1062 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1063 0 : ifmedia_add(&sc->sc_mii.mii_media,
1064 : IFM_ETHER|IFM_NONE, 0, NULL);
1065 0 : ifmedia_set(&sc->sc_mii.mii_media,
1066 : IFM_ETHER|IFM_NONE);
1067 0 : } else
1068 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1069 :
1070 : /*
1071 : * Call MI attach routine.
1072 : */
1073 0 : if_attach(ifp);
1074 0 : ether_ifattach(ifp);
1075 :
1076 0 : return (0);
1077 :
1078 : fail_8:
1079 : /* Destroy DMA maps for RX buffers. */
1080 0 : for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1081 0 : if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL)
1082 0 : bus_dmamap_destroy(sc->sc_dmat,
1083 : sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
1084 : }
1085 :
1086 : /* Free DMA'able memory for the RX ring. */
1087 0 : bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1088 : fail_7:
1089 0 : bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1090 : fail_6:
1091 0 : bus_dmamem_unmap(sc->sc_dmat,
1092 : (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc));
1093 : fail_5:
1094 0 : bus_dmamem_free(sc->sc_dmat,
1095 : &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg);
1096 :
1097 : fail_4:
1098 : /* Destroy DMA maps for TX buffers. */
1099 0 : for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1100 0 : if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL)
1101 0 : bus_dmamap_destroy(sc->sc_dmat,
1102 : sc->rl_ldata.rl_txq[i].txq_dmamap);
1103 : }
1104 :
1105 : /* Free DMA'able memory for the TX ring. */
1106 0 : bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1107 : fail_3:
1108 0 : bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1109 : fail_2:
1110 0 : bus_dmamem_unmap(sc->sc_dmat,
1111 : (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc));
1112 : fail_1:
1113 0 : bus_dmamem_free(sc->sc_dmat,
1114 : &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg);
1115 : fail_0:
1116 0 : return (1);
1117 0 : }
1118 :
1119 :
1120 : int
1121 0 : re_newbuf(struct rl_softc *sc)
1122 : {
1123 : struct mbuf *m;
1124 : bus_dmamap_t map;
1125 : struct rl_desc *d;
1126 : struct rl_rxsoft *rxs;
1127 : u_int32_t cmdstat;
1128 : int error, idx;
1129 :
1130 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, RL_FRAMELEN(sc->rl_max_mtu));
1131 0 : if (!m)
1132 0 : return (ENOBUFS);
1133 :
1134 : /*
1135 : * Initialize mbuf length fields and fixup
1136 : * alignment so that the frame payload is
1137 : * longword aligned on strict alignment archs.
1138 : */
1139 0 : m->m_len = m->m_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu);
1140 0 : m->m_data += RE_ETHER_ALIGN;
1141 :
1142 0 : idx = sc->rl_ldata.rl_rx_prodidx;
1143 0 : rxs = &sc->rl_ldata.rl_rxsoft[idx];
1144 0 : map = rxs->rxs_dmamap;
1145 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1146 : BUS_DMA_READ|BUS_DMA_NOWAIT);
1147 0 : if (error) {
1148 0 : m_freem(m);
1149 0 : return (ENOBUFS);
1150 : }
1151 :
1152 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1153 : BUS_DMASYNC_PREREAD);
1154 :
1155 0 : d = &sc->rl_ldata.rl_rx_list[idx];
1156 0 : RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1157 0 : cmdstat = letoh32(d->rl_cmdstat);
1158 0 : RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1159 0 : if (cmdstat & RL_RDESC_STAT_OWN) {
1160 0 : printf("%s: tried to map busy RX descriptor\n",
1161 0 : sc->sc_dev.dv_xname);
1162 0 : m_freem(m);
1163 0 : return (ENOBUFS);
1164 : }
1165 :
1166 0 : rxs->rxs_mbuf = m;
1167 :
1168 0 : d->rl_vlanctl = 0;
1169 0 : cmdstat = map->dm_segs[0].ds_len;
1170 0 : if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1171 0 : cmdstat |= RL_RDESC_CMD_EOR;
1172 0 : re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1173 0 : d->rl_cmdstat = htole32(cmdstat);
1174 0 : RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1175 0 : cmdstat |= RL_RDESC_CMD_OWN;
1176 0 : d->rl_cmdstat = htole32(cmdstat);
1177 0 : RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1178 :
1179 0 : sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx);
1180 :
1181 0 : return (0);
1182 0 : }
1183 :
1184 :
1185 : int
1186 0 : re_tx_list_init(struct rl_softc *sc)
1187 : {
1188 : int i;
1189 :
1190 0 : memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc));
1191 0 : for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1192 0 : sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
1193 : }
1194 :
1195 0 : bus_dmamap_sync(sc->sc_dmat,
1196 : sc->rl_ldata.rl_tx_list_map, 0,
1197 : sc->rl_ldata.rl_tx_list_map->dm_mapsize,
1198 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1199 0 : sc->rl_ldata.rl_txq_prodidx = 0;
1200 0 : sc->rl_ldata.rl_txq_considx = 0;
1201 0 : sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
1202 0 : sc->rl_ldata.rl_tx_nextfree = 0;
1203 :
1204 0 : return (0);
1205 : }
1206 :
1207 : int
1208 0 : re_rx_list_init(struct rl_softc *sc)
1209 : {
1210 0 : bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc));
1211 :
1212 0 : sc->rl_ldata.rl_rx_prodidx = 0;
1213 0 : sc->rl_ldata.rl_rx_considx = 0;
1214 0 : sc->rl_head = sc->rl_tail = NULL;
1215 :
1216 0 : if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2,
1217 0 : sc->rl_ldata.rl_rx_desc_cnt - 1);
1218 0 : re_rx_list_fill(sc);
1219 :
1220 0 : return (0);
1221 : }
1222 :
1223 : void
1224 0 : re_rx_list_fill(struct rl_softc *sc)
1225 : {
1226 : u_int slots;
1227 :
1228 0 : for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring,
1229 0 : sc->rl_ldata.rl_rx_desc_cnt);
1230 0 : slots > 0; slots--) {
1231 0 : if (re_newbuf(sc) == ENOBUFS)
1232 : break;
1233 : }
1234 0 : if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots);
1235 0 : }
1236 :
1237 : /*
1238 : * RX handler for C+ and 8169. For the gigE chips, we support
1239 : * the reception of jumbo frames that have been fragmented
1240 : * across multiple 2K mbuf cluster buffers.
1241 : */
1242 : int
1243 0 : re_rxeof(struct rl_softc *sc)
1244 : {
1245 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1246 : struct mbuf *m;
1247 : struct ifnet *ifp;
1248 : int i, total_len, rx = 0;
1249 : struct rl_desc *cur_rx;
1250 : struct rl_rxsoft *rxs;
1251 : u_int32_t rxstat, rxvlan;
1252 :
1253 0 : ifp = &sc->sc_arpcom.ac_if;
1254 :
1255 0 : for (i = sc->rl_ldata.rl_rx_considx;
1256 0 : if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0;
1257 0 : i = RL_NEXT_RX_DESC(sc, i)) {
1258 0 : cur_rx = &sc->rl_ldata.rl_rx_list[i];
1259 0 : RL_RXDESCSYNC(sc, i,
1260 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1261 0 : rxstat = letoh32(cur_rx->rl_cmdstat);
1262 0 : rxvlan = letoh32(cur_rx->rl_vlanctl);
1263 0 : RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1264 0 : if ((rxstat & RL_RDESC_STAT_OWN) != 0)
1265 : break;
1266 0 : total_len = rxstat & sc->rl_rxlenmask;
1267 0 : rxs = &sc->rl_ldata.rl_rxsoft[i];
1268 0 : m = rxs->rxs_mbuf;
1269 0 : rxs->rxs_mbuf = NULL;
1270 0 : if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1);
1271 : rx = 1;
1272 :
1273 : /* Invalidate the RX mbuf and unload its map */
1274 :
1275 0 : bus_dmamap_sync(sc->sc_dmat,
1276 : rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1277 : BUS_DMASYNC_POSTREAD);
1278 0 : bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1279 :
1280 0 : if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
1281 0 : (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
1282 : (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
1283 : continue;
1284 0 : } else if (!(rxstat & RL_RDESC_STAT_EOF)) {
1285 0 : m->m_len = RL_FRAMELEN(sc->rl_max_mtu);
1286 0 : if (sc->rl_head == NULL)
1287 0 : sc->rl_head = sc->rl_tail = m;
1288 : else {
1289 0 : m->m_flags &= ~M_PKTHDR;
1290 0 : sc->rl_tail->m_next = m;
1291 0 : sc->rl_tail = m;
1292 : }
1293 : continue;
1294 : }
1295 :
1296 : /*
1297 : * NOTE: for the 8139C+, the frame length field
1298 : * is always 12 bits in size, but for the gigE chips,
1299 : * it is 13 bits (since the max RX frame length is 16K).
1300 : * Unfortunately, all 32 bits in the status word
1301 : * were already used, so to make room for the extra
1302 : * length bit, Realtek took out the 'frame alignment
1303 : * error' bit and shifted the other status bits
1304 : * over one slot. The OWN, EOR, FS and LS bits are
1305 : * still in the same places. We have already extracted
1306 : * the frame length and checked the OWN bit, so rather
1307 : * than using an alternate bit mapping, we shift the
1308 : * status bits one space to the right so we can evaluate
1309 : * them using the 8169 status as though it was in the
1310 : * same format as that of the 8139C+.
1311 : */
1312 0 : if (sc->sc_hwrev != RL_HWREV_8139CPLUS)
1313 0 : rxstat >>= 1;
1314 :
1315 : /*
1316 : * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
1317 : * set, but if CRC is clear, it will still be a valid frame.
1318 : */
1319 0 : if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0 &&
1320 0 : !(rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 &&
1321 0 : (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT))) {
1322 0 : ifp->if_ierrors++;
1323 : /*
1324 : * If this is part of a multi-fragment packet,
1325 : * discard all the pieces.
1326 : */
1327 0 : if (sc->rl_head != NULL) {
1328 0 : m_freem(sc->rl_head);
1329 0 : sc->rl_head = sc->rl_tail = NULL;
1330 0 : }
1331 : continue;
1332 : }
1333 :
1334 0 : if (sc->rl_head != NULL) {
1335 0 : m->m_len = total_len % RL_FRAMELEN(sc->rl_max_mtu);
1336 0 : if (m->m_len == 0)
1337 0 : m->m_len = RL_FRAMELEN(sc->rl_max_mtu);
1338 : /*
1339 : * Special case: if there's 4 bytes or less
1340 : * in this buffer, the mbuf can be discarded:
1341 : * the last 4 bytes is the CRC, which we don't
1342 : * care about anyway.
1343 : */
1344 0 : if (m->m_len <= ETHER_CRC_LEN) {
1345 0 : sc->rl_tail->m_len -=
1346 0 : (ETHER_CRC_LEN - m->m_len);
1347 0 : m_freem(m);
1348 0 : } else {
1349 0 : m->m_len -= ETHER_CRC_LEN;
1350 0 : m->m_flags &= ~M_PKTHDR;
1351 0 : sc->rl_tail->m_next = m;
1352 : }
1353 0 : m = sc->rl_head;
1354 0 : sc->rl_head = sc->rl_tail = NULL;
1355 0 : m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1356 0 : } else
1357 0 : m->m_pkthdr.len = m->m_len =
1358 0 : (total_len - ETHER_CRC_LEN);
1359 :
1360 : /* Do RX checksumming */
1361 :
1362 0 : if (sc->rl_flags & RL_FLAG_DESCV2) {
1363 : /* Check IP header checksum */
1364 0 : if ((rxvlan & RL_RDESC_IPV4) &&
1365 0 : !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1366 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1367 :
1368 : /* Check TCP/UDP checksum */
1369 0 : if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) &&
1370 0 : (((rxstat & RL_RDESC_STAT_TCP) &&
1371 0 : !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1372 0 : ((rxstat & RL_RDESC_STAT_UDP) &&
1373 0 : !(rxstat & RL_RDESC_STAT_UDPSUMBAD))))
1374 0 : m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1375 : M_UDP_CSUM_IN_OK;
1376 : } else {
1377 : /* Check IP header checksum */
1378 0 : if ((rxstat & RL_RDESC_STAT_PROTOID) &&
1379 0 : !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1380 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1381 :
1382 : /* Check TCP/UDP checksum */
1383 0 : if ((RL_TCPPKT(rxstat) &&
1384 0 : !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1385 0 : (RL_UDPPKT(rxstat) &&
1386 0 : !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))
1387 0 : m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1388 : M_UDP_CSUM_IN_OK;
1389 : }
1390 : #if NVLAN > 0
1391 0 : if (rxvlan & RL_RDESC_VLANCTL_TAG) {
1392 0 : m->m_pkthdr.ether_vtag =
1393 0 : ntohs((rxvlan & RL_RDESC_VLANCTL_DATA));
1394 0 : m->m_flags |= M_VLANTAG;
1395 0 : }
1396 : #endif
1397 :
1398 0 : ml_enqueue(&ml, m);
1399 0 : }
1400 :
1401 0 : sc->rl_ldata.rl_rx_considx = i;
1402 0 : re_rx_list_fill(sc);
1403 :
1404 0 : if_input(ifp, &ml);
1405 :
1406 0 : return (rx);
1407 0 : }
1408 :
1409 : int
1410 0 : re_txeof(struct rl_softc *sc)
1411 : {
1412 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1413 : struct rl_txq *txq;
1414 : uint32_t txstat;
1415 : unsigned int prod, cons;
1416 : unsigned int idx;
1417 : int free = 0;
1418 :
1419 0 : ifp = &sc->sc_arpcom.ac_if;
1420 :
1421 0 : prod = sc->rl_ldata.rl_txq_prodidx;
1422 0 : cons = sc->rl_ldata.rl_txq_considx;
1423 :
1424 0 : while (prod != cons) {
1425 0 : txq = &sc->rl_ldata.rl_txq[cons];
1426 :
1427 0 : idx = txq->txq_descidx;
1428 0 : RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD);
1429 0 : txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
1430 0 : RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1431 0 : if (ISSET(txstat, RL_TDESC_CMD_OWN)) {
1432 : free = 2;
1433 0 : break;
1434 : }
1435 :
1436 0 : bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1437 : 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1438 0 : bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1439 0 : m_freem(txq->txq_mbuf);
1440 0 : txq->txq_mbuf = NULL;
1441 :
1442 0 : if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT))
1443 0 : ifp->if_collisions++;
1444 0 : if (txstat & RL_TDESC_STAT_TXERRSUM)
1445 0 : ifp->if_oerrors++;
1446 :
1447 0 : cons = RL_NEXT_TX_DESC(sc, idx);
1448 : free = 1;
1449 : }
1450 :
1451 0 : if (free == 0)
1452 0 : return (0);
1453 :
1454 0 : sc->rl_ldata.rl_txq_considx = cons;
1455 :
1456 : /*
1457 : * Some chips will ignore a second TX request issued while an
1458 : * existing transmission is in progress. If the transmitter goes
1459 : * idle but there are still packets waiting to be sent, we need
1460 : * to restart the channel here to flush them out. This only
1461 : * seems to be required with the PCIe devices.
1462 : */
1463 0 : if (ifq_is_oactive(&ifp->if_snd))
1464 0 : ifq_restart(&ifp->if_snd);
1465 0 : else if (free == 2)
1466 0 : ifq_serialize(&ifp->if_snd, &sc->rl_start);
1467 : else
1468 0 : ifp->if_timer = 0;
1469 :
1470 0 : return (1);
1471 0 : }
1472 :
1473 : void
1474 0 : re_tick(void *xsc)
1475 : {
1476 0 : struct rl_softc *sc = xsc;
1477 : struct mii_data *mii;
1478 : int s;
1479 :
1480 0 : mii = &sc->sc_mii;
1481 :
1482 0 : s = splnet();
1483 :
1484 0 : mii_tick(mii);
1485 :
1486 0 : if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1487 0 : re_miibus_statchg(&sc->sc_dev);
1488 :
1489 0 : splx(s);
1490 :
1491 0 : timeout_add_sec(&sc->timer_handle, 1);
1492 0 : }
1493 :
1494 : int
1495 0 : re_intr(void *arg)
1496 : {
1497 0 : struct rl_softc *sc = arg;
1498 : struct ifnet *ifp;
1499 : u_int16_t status;
1500 : int claimed = 0, rx, tx;
1501 :
1502 0 : ifp = &sc->sc_arpcom.ac_if;
1503 :
1504 0 : if (!(ifp->if_flags & IFF_RUNNING))
1505 0 : return (0);
1506 :
1507 : /* Disable interrupts. */
1508 0 : CSR_WRITE_2(sc, RL_IMR, 0);
1509 :
1510 : rx = tx = 0;
1511 0 : status = CSR_READ_2(sc, RL_ISR);
1512 : /* If the card has gone away the read returns 0xffff. */
1513 0 : if (status == 0xffff)
1514 0 : return (0);
1515 0 : if (status)
1516 0 : CSR_WRITE_2(sc, RL_ISR, status);
1517 :
1518 0 : if (status & RL_ISR_TIMEOUT_EXPIRED)
1519 0 : claimed = 1;
1520 :
1521 0 : if (status & RL_INTRS_CPLUS) {
1522 0 : if (status &
1523 0 : (sc->rl_rx_ack | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW)) {
1524 0 : rx |= re_rxeof(sc);
1525 : claimed = 1;
1526 0 : }
1527 :
1528 0 : if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) {
1529 0 : tx |= re_txeof(sc);
1530 : claimed = 1;
1531 0 : }
1532 :
1533 0 : if (status & RL_ISR_SYSTEM_ERR) {
1534 0 : KERNEL_LOCK();
1535 0 : re_init(ifp);
1536 0 : KERNEL_UNLOCK();
1537 : claimed = 1;
1538 0 : }
1539 : }
1540 :
1541 0 : if (sc->rl_imtype == RL_IMTYPE_SIM) {
1542 0 : if (sc->rl_timerintr) {
1543 0 : if ((tx | rx) == 0) {
1544 : /*
1545 : * Nothing needs to be processed, fallback
1546 : * to use TX/RX interrupts.
1547 : */
1548 0 : re_setup_intr(sc, 1, RL_IMTYPE_NONE);
1549 :
1550 : /*
1551 : * Recollect, mainly to avoid the possible
1552 : * race introduced by changing interrupt
1553 : * masks.
1554 : */
1555 0 : re_rxeof(sc);
1556 0 : re_txeof(sc);
1557 0 : } else
1558 0 : CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */
1559 0 : } else if (tx | rx) {
1560 : /*
1561 : * Assume that using simulated interrupt moderation
1562 : * (hardware timer based) could reduce the interrupt
1563 : * rate.
1564 : */
1565 0 : re_setup_intr(sc, 1, RL_IMTYPE_SIM);
1566 0 : }
1567 : }
1568 :
1569 0 : CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs);
1570 :
1571 0 : return (claimed);
1572 0 : }
1573 :
1574 : int
1575 0 : re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m)
1576 : {
1577 : struct rl_txq *txq;
1578 : bus_dmamap_t map;
1579 : int error, seg, nsegs, curidx, lastidx, pad;
1580 0 : int off;
1581 : struct ip *ip;
1582 : struct rl_desc *d;
1583 : u_int32_t cmdstat, vlanctl = 0, csum_flags = 0;
1584 :
1585 : /*
1586 : * Set up checksum offload. Note: checksum offload bits must
1587 : * appear in all descriptors of a multi-descriptor transmit
1588 : * attempt. This is according to testing done with an 8169
1589 : * chip. This is a requirement.
1590 : */
1591 :
1592 : /*
1593 : * Set RL_TDESC_CMD_IPCSUM if any checksum offloading
1594 : * is requested. Otherwise, RL_TDESC_CMD_TCPCSUM/
1595 : * RL_TDESC_CMD_UDPCSUM does not take affect.
1596 : */
1597 :
1598 0 : if ((sc->rl_flags & RL_FLAG_JUMBOV2) &&
1599 0 : m->m_pkthdr.len > RL_MTU &&
1600 0 : (m->m_pkthdr.csum_flags &
1601 0 : (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) {
1602 0 : struct mbuf mh, *mp;
1603 :
1604 0 : mp = m_getptr(m, ETHER_HDR_LEN, &off);
1605 0 : mh.m_flags = 0;
1606 0 : mh.m_data = mtod(mp, caddr_t) + off;
1607 0 : mh.m_next = mp->m_next;
1608 0 : mh.m_pkthdr.len = mp->m_pkthdr.len - ETHER_HDR_LEN;
1609 0 : mh.m_len = mp->m_len - off;
1610 0 : ip = (struct ip *)mh.m_data;
1611 :
1612 0 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1613 0 : ip->ip_sum = in_cksum(&mh, sizeof(struct ip));
1614 0 : if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT))
1615 0 : in_delayed_cksum(&mh);
1616 :
1617 0 : m->m_pkthdr.csum_flags &=
1618 : ~(M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT);
1619 0 : }
1620 :
1621 0 : if ((m->m_pkthdr.csum_flags &
1622 0 : (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) {
1623 0 : if (sc->rl_flags & RL_FLAG_DESCV2) {
1624 : vlanctl |= RL_TDESC_CMD_IPCSUMV2;
1625 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1626 0 : vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
1627 0 : if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1628 0 : vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
1629 : } else {
1630 : csum_flags |= RL_TDESC_CMD_IPCSUM;
1631 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1632 0 : csum_flags |= RL_TDESC_CMD_TCPCSUM;
1633 0 : if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1634 0 : csum_flags |= RL_TDESC_CMD_UDPCSUM;
1635 : }
1636 : }
1637 :
1638 0 : txq = &sc->rl_ldata.rl_txq[idx];
1639 0 : map = txq->txq_dmamap;
1640 :
1641 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1642 : BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1643 0 : switch (error) {
1644 : case 0:
1645 : break;
1646 :
1647 : case EFBIG:
1648 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1649 0 : bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1650 0 : BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0)
1651 : break;
1652 :
1653 : /* FALLTHROUGH */
1654 : default:
1655 0 : return (0);
1656 : }
1657 :
1658 0 : bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1659 : BUS_DMASYNC_PREWRITE);
1660 :
1661 0 : nsegs = map->dm_nsegs;
1662 : pad = 0;
1663 :
1664 : /*
1665 : * With some of the RealTek chips, using the checksum offload
1666 : * support in conjunction with the autopadding feature results
1667 : * in the transmission of corrupt frames. For example, if we
1668 : * need to send a really small IP fragment that's less than 60
1669 : * bytes in size, and IP header checksumming is enabled, the
1670 : * resulting ethernet frame that appears on the wire will
1671 : * have garbled payload. To work around this, if TX IP checksum
1672 : * offload is enabled, we always manually pad short frames out
1673 : * to the minimum ethernet frame size.
1674 : */
1675 0 : if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
1676 0 : m->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
1677 0 : (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) != 0) {
1678 : pad = 1;
1679 0 : nsegs++;
1680 0 : }
1681 :
1682 : /*
1683 : * Set up hardware VLAN tagging. Note: vlan tag info must
1684 : * appear in all descriptors of a multi-descriptor
1685 : * transmission attempt.
1686 : */
1687 : #if NVLAN > 0
1688 0 : if (m->m_flags & M_VLANTAG)
1689 0 : vlanctl |= swap16(m->m_pkthdr.ether_vtag) |
1690 : RL_TDESC_VLANCTL_TAG;
1691 : #endif
1692 :
1693 : /*
1694 : * Map the segment array into descriptors. Note that we set the
1695 : * start-of-frame and end-of-frame markers for either TX or RX, but
1696 : * they really only have meaning in the TX case. (In the RX case,
1697 : * it's the chip that tells us where packets begin and end.)
1698 : * We also keep track of the end of the ring and set the
1699 : * end-of-ring bits as needed, and we set the ownership bits
1700 : * in all except the very first descriptor. (The caller will
1701 : * set this descriptor later when it start transmission or
1702 : * reception.)
1703 : */
1704 : curidx = idx;
1705 : cmdstat = RL_TDESC_CMD_SOF;
1706 :
1707 0 : for (seg = 0; seg < map->dm_nsegs; seg++) {
1708 0 : d = &sc->rl_ldata.rl_tx_list[curidx];
1709 :
1710 0 : RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE);
1711 :
1712 0 : d->rl_vlanctl = htole32(vlanctl);
1713 0 : re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1714 0 : cmdstat |= csum_flags | map->dm_segs[seg].ds_len;
1715 :
1716 0 : if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1)
1717 0 : cmdstat |= RL_TDESC_CMD_EOR;
1718 :
1719 0 : d->rl_cmdstat = htole32(cmdstat);
1720 :
1721 0 : RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE);
1722 :
1723 : lastidx = curidx;
1724 : cmdstat = RL_TDESC_CMD_OWN;
1725 0 : curidx = RL_NEXT_TX_DESC(sc, curidx);
1726 : }
1727 :
1728 0 : if (pad) {
1729 0 : d = &sc->rl_ldata.rl_tx_list[curidx];
1730 :
1731 0 : RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE);
1732 :
1733 0 : d->rl_vlanctl = htole32(vlanctl);
1734 0 : re_set_bufaddr(d, RL_TXPADDADDR(sc));
1735 0 : cmdstat = csum_flags |
1736 0 : RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF |
1737 0 : (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1738 :
1739 0 : if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1)
1740 0 : cmdstat |= RL_TDESC_CMD_EOR;
1741 :
1742 0 : d->rl_cmdstat = htole32(cmdstat);
1743 :
1744 0 : RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE);
1745 :
1746 : lastidx = curidx;
1747 0 : }
1748 :
1749 : /* d is already pointing at the last descriptor */
1750 0 : d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
1751 :
1752 : /* Transfer ownership of packet to the chip. */
1753 0 : d = &sc->rl_ldata.rl_tx_list[idx];
1754 :
1755 0 : RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE);
1756 0 : d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN);
1757 0 : RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE);
1758 :
1759 : /* update info of TX queue and descriptors */
1760 0 : txq->txq_mbuf = m;
1761 0 : txq->txq_descidx = lastidx;
1762 :
1763 0 : return (nsegs);
1764 0 : }
1765 :
1766 : void
1767 0 : re_txstart(void *xsc)
1768 : {
1769 0 : struct rl_softc *sc = xsc;
1770 :
1771 0 : CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
1772 0 : }
1773 :
1774 : /*
1775 : * Main transmit routine for C+ and gigE NICs.
1776 : */
1777 :
1778 : void
1779 0 : re_start(struct ifqueue *ifq)
1780 : {
1781 0 : struct ifnet *ifp = ifq->ifq_if;
1782 0 : struct rl_softc *sc = ifp->if_softc;
1783 : struct mbuf *m;
1784 : unsigned int idx;
1785 : unsigned int free, used;
1786 : int post = 0;
1787 :
1788 0 : if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) {
1789 0 : ifq_purge(ifq);
1790 0 : return;
1791 : }
1792 :
1793 0 : free = sc->rl_ldata.rl_txq_considx;
1794 0 : idx = sc->rl_ldata.rl_txq_prodidx;
1795 0 : if (free <= idx)
1796 0 : free += sc->rl_ldata.rl_tx_desc_cnt;
1797 0 : free -= idx;
1798 :
1799 0 : for (;;) {
1800 0 : if (sc->rl_ldata.rl_tx_ndescs >= free + 2) {
1801 0 : ifq_set_oactive(ifq);
1802 0 : break;
1803 : }
1804 :
1805 0 : m = ifq_dequeue(ifq);
1806 0 : if (m == NULL)
1807 : break;
1808 :
1809 0 : used = re_encap(sc, idx, m);
1810 0 : if (used == 0) {
1811 0 : m_freem(m);
1812 0 : continue;
1813 : }
1814 :
1815 : #if NBPFILTER > 0
1816 0 : if (ifp->if_bpf)
1817 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1818 : #endif
1819 :
1820 0 : KASSERT(used <= free);
1821 0 : free -= used;
1822 :
1823 0 : idx += used;
1824 0 : if (idx >= sc->rl_ldata.rl_tx_desc_cnt)
1825 0 : idx -= sc->rl_ldata.rl_tx_desc_cnt;
1826 :
1827 : post = 1;
1828 : }
1829 :
1830 0 : if (post == 0)
1831 0 : return;
1832 :
1833 0 : ifp->if_timer = 5;
1834 0 : sc->rl_ldata.rl_txq_prodidx = idx;
1835 0 : ifq_serialize(ifq, &sc->rl_start);
1836 0 : }
1837 :
1838 : int
1839 0 : re_init(struct ifnet *ifp)
1840 : {
1841 0 : struct rl_softc *sc = ifp->if_softc;
1842 : u_int16_t cfg;
1843 : uint32_t rxcfg;
1844 : int s;
1845 0 : union {
1846 : u_int32_t align_dummy;
1847 : u_char eaddr[ETHER_ADDR_LEN];
1848 : } eaddr;
1849 :
1850 0 : s = splnet();
1851 :
1852 : /*
1853 : * Cancel pending I/O and free all RX/TX buffers.
1854 : */
1855 0 : re_stop(ifp);
1856 :
1857 : /* Put controller into known state. */
1858 0 : re_reset(sc);
1859 :
1860 : /*
1861 : * Enable C+ RX and TX mode, as well as VLAN stripping and
1862 : * RX checksum offload. We must configure the C+ register
1863 : * before all others.
1864 : */
1865 : cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW |
1866 : RL_CPLUSCMD_RXCSUM_ENB;
1867 :
1868 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1869 0 : cfg |= RL_CPLUSCMD_VLANSTRIP;
1870 :
1871 0 : if (sc->rl_flags & RL_FLAG_MACSTAT)
1872 0 : cfg |= RL_CPLUSCMD_MACSTAT_DIS;
1873 : else
1874 0 : cfg |= RL_CPLUSCMD_RXENB;
1875 :
1876 0 : CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
1877 :
1878 : /*
1879 : * Init our MAC address. Even though the chipset
1880 : * documentation doesn't mention it, we need to enter "Config
1881 : * register write enable" mode to modify the ID registers.
1882 : */
1883 0 : bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN);
1884 0 : CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1885 0 : CSR_WRITE_4(sc, RL_IDR4,
1886 : htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
1887 0 : CSR_WRITE_4(sc, RL_IDR0,
1888 : htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
1889 : /*
1890 : * Default on PC Engines APU1 is to have all LEDs off unless
1891 : * there is network activity. Override to provide a link status
1892 : * LED.
1893 : */
1894 0 : if (sc->sc_hwrev == RL_HWREV_8168E &&
1895 0 : hw_vendor != NULL && hw_prod != NULL &&
1896 0 : strcmp(hw_vendor, "PC Engines") == 0 &&
1897 0 : strcmp(hw_prod, "APU") == 0) {
1898 0 : CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED);
1899 0 : CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4);
1900 0 : }
1901 : /*
1902 : * Protect config register again
1903 : */
1904 0 : CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1905 :
1906 0 : if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1907 0 : re_set_jumbo(sc);
1908 :
1909 : /*
1910 : * For C+ mode, initialize the RX descriptors and mbufs.
1911 : */
1912 0 : re_rx_list_init(sc);
1913 0 : re_tx_list_init(sc);
1914 :
1915 : /*
1916 : * Load the addresses of the RX and TX lists into the chip.
1917 : */
1918 0 : CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
1919 : RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1920 0 : CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
1921 : RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1922 :
1923 0 : CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
1924 : RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1925 0 : CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
1926 : RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1927 :
1928 0 : if (sc->rl_flags & RL_FLAG_RXDV_GATED)
1929 0 : CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
1930 : ~0x00080000);
1931 :
1932 : /*
1933 : * Set the initial TX and RX configuration.
1934 : */
1935 0 : CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1936 :
1937 0 : CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
1938 :
1939 : rxcfg = RL_RXCFG_CONFIG;
1940 0 : if (sc->rl_flags & RL_FLAG_EARLYOFF)
1941 0 : rxcfg |= RL_RXCFG_EARLYOFF;
1942 0 : else if (sc->rl_flags & RL_FLAG_EARLYOFFV2)
1943 0 : rxcfg |= RL_RXCFG_EARLYOFFV2;
1944 0 : CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1945 :
1946 : /*
1947 : * Enable transmit and receive.
1948 : */
1949 0 : CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
1950 :
1951 : /* Program promiscuous mode and multicast filters. */
1952 0 : re_iff(sc);
1953 :
1954 : /*
1955 : * Enable interrupts.
1956 : */
1957 0 : re_setup_intr(sc, 1, sc->rl_imtype);
1958 0 : CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype);
1959 :
1960 : /* Start RX/TX process. */
1961 0 : CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1962 :
1963 : /*
1964 : * For 8169 gigE NICs, set the max allowed RX packet
1965 : * size so we can receive jumbo frames.
1966 : */
1967 0 : if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
1968 0 : if (sc->rl_flags & RL_FLAG_PCIE &&
1969 0 : (sc->rl_flags & RL_FLAG_JUMBOV2) == 0)
1970 0 : CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
1971 : else
1972 0 : CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
1973 : }
1974 :
1975 0 : CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
1976 : RL_CFG1_DRVLOAD);
1977 :
1978 0 : ifp->if_flags |= IFF_RUNNING;
1979 0 : ifq_clr_oactive(&ifp->if_snd);
1980 :
1981 0 : splx(s);
1982 :
1983 0 : sc->rl_flags &= ~RL_FLAG_LINK;
1984 0 : mii_mediachg(&sc->sc_mii);
1985 :
1986 0 : timeout_add_sec(&sc->timer_handle, 1);
1987 :
1988 0 : return (0);
1989 0 : }
1990 :
1991 : /*
1992 : * Set media options.
1993 : */
1994 : int
1995 0 : re_ifmedia_upd(struct ifnet *ifp)
1996 : {
1997 : struct rl_softc *sc;
1998 :
1999 0 : sc = ifp->if_softc;
2000 :
2001 0 : return (mii_mediachg(&sc->sc_mii));
2002 : }
2003 :
2004 : /*
2005 : * Report current media status.
2006 : */
2007 : void
2008 0 : re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2009 : {
2010 : struct rl_softc *sc;
2011 :
2012 0 : sc = ifp->if_softc;
2013 :
2014 0 : mii_pollstat(&sc->sc_mii);
2015 0 : ifmr->ifm_active = sc->sc_mii.mii_media_active;
2016 0 : ifmr->ifm_status = sc->sc_mii.mii_media_status;
2017 0 : }
2018 :
2019 : int
2020 0 : re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2021 : {
2022 0 : struct rl_softc *sc = ifp->if_softc;
2023 0 : struct ifreq *ifr = (struct ifreq *) data;
2024 : int s, error = 0;
2025 :
2026 0 : s = splnet();
2027 :
2028 0 : switch(command) {
2029 : case SIOCSIFADDR:
2030 0 : ifp->if_flags |= IFF_UP;
2031 0 : if (!(ifp->if_flags & IFF_RUNNING))
2032 0 : re_init(ifp);
2033 : break;
2034 : case SIOCSIFFLAGS:
2035 0 : if (ifp->if_flags & IFF_UP) {
2036 0 : if (ifp->if_flags & IFF_RUNNING)
2037 0 : error = ENETRESET;
2038 : else
2039 0 : re_init(ifp);
2040 : } else {
2041 0 : if (ifp->if_flags & IFF_RUNNING)
2042 0 : re_stop(ifp);
2043 : }
2044 : break;
2045 : case SIOCGIFMEDIA:
2046 : case SIOCSIFMEDIA:
2047 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2048 0 : break;
2049 : case SIOCGIFRXR:
2050 0 : error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
2051 0 : NULL, RL_FRAMELEN(sc->rl_max_mtu), &sc->rl_ldata.rl_rx_ring);
2052 0 : break;
2053 : default:
2054 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2055 0 : }
2056 :
2057 0 : if (error == ENETRESET) {
2058 0 : if (ifp->if_flags & IFF_RUNNING)
2059 0 : re_iff(sc);
2060 : error = 0;
2061 0 : }
2062 :
2063 0 : splx(s);
2064 0 : return (error);
2065 : }
2066 :
2067 : void
2068 0 : re_watchdog(struct ifnet *ifp)
2069 : {
2070 : struct rl_softc *sc;
2071 : int s;
2072 :
2073 0 : sc = ifp->if_softc;
2074 0 : s = splnet();
2075 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2076 :
2077 0 : re_init(ifp);
2078 :
2079 0 : splx(s);
2080 0 : }
2081 :
2082 : /*
2083 : * Stop the adapter and free any mbufs allocated to the
2084 : * RX and TX lists.
2085 : */
2086 : void
2087 0 : re_stop(struct ifnet *ifp)
2088 : {
2089 : struct rl_softc *sc;
2090 : int i;
2091 :
2092 0 : sc = ifp->if_softc;
2093 :
2094 0 : ifp->if_timer = 0;
2095 0 : sc->rl_flags &= ~RL_FLAG_LINK;
2096 0 : sc->rl_timerintr = 0;
2097 :
2098 0 : timeout_del(&sc->timer_handle);
2099 0 : ifp->if_flags &= ~IFF_RUNNING;
2100 :
2101 : /*
2102 : * Disable accepting frames to put RX MAC into idle state.
2103 : * Otherwise it's possible to get frames while stop command
2104 : * execution is in progress and controller can DMA the frame
2105 : * to already freed RX buffer during that period.
2106 : */
2107 0 : CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
2108 : ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV |
2109 : RL_RXCFG_RX_MULTI));
2110 :
2111 0 : if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) {
2112 0 : for (i = RL_TIMEOUT; i > 0; i--) {
2113 0 : if ((CSR_READ_1(sc, sc->rl_txstart) &
2114 0 : RL_TXSTART_START) == 0)
2115 : break;
2116 0 : DELAY(20);
2117 : }
2118 0 : if (i == 0)
2119 0 : printf("%s: stopping TX poll timed out!\n",
2120 0 : sc->sc_dev.dv_xname);
2121 0 : CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2122 0 : } else if (sc->rl_flags & RL_FLAG_CMDSTOP) {
2123 0 : CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
2124 : RL_CMD_RX_ENB);
2125 0 : if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) {
2126 0 : for (i = RL_TIMEOUT; i > 0; i--) {
2127 0 : if ((CSR_READ_4(sc, RL_TXCFG) &
2128 0 : RL_TXCFG_QUEUE_EMPTY) != 0)
2129 : break;
2130 0 : DELAY(100);
2131 : }
2132 0 : if (i == 0)
2133 0 : printf("%s: stopping TXQ timed out!\n",
2134 0 : sc->sc_dev.dv_xname);
2135 : }
2136 : } else
2137 0 : CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2138 0 : DELAY(1000);
2139 0 : CSR_WRITE_2(sc, RL_IMR, 0x0000);
2140 0 : CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
2141 :
2142 0 : intr_barrier(sc->sc_ih);
2143 0 : ifq_barrier(&ifp->if_snd);
2144 :
2145 0 : ifq_clr_oactive(&ifp->if_snd);
2146 0 : mii_down(&sc->sc_mii);
2147 :
2148 0 : if (sc->rl_head != NULL) {
2149 0 : m_freem(sc->rl_head);
2150 0 : sc->rl_head = sc->rl_tail = NULL;
2151 0 : }
2152 :
2153 : /* Free the TX list buffers. */
2154 0 : for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
2155 0 : if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) {
2156 0 : bus_dmamap_unload(sc->sc_dmat,
2157 : sc->rl_ldata.rl_txq[i].txq_dmamap);
2158 0 : m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf);
2159 0 : sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
2160 0 : }
2161 : }
2162 :
2163 : /* Free the RX list buffers. */
2164 0 : for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2165 0 : if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) {
2166 0 : bus_dmamap_unload(sc->sc_dmat,
2167 : sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
2168 0 : m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf);
2169 0 : sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL;
2170 0 : }
2171 : }
2172 0 : }
2173 :
2174 : void
2175 0 : re_setup_hw_im(struct rl_softc *sc)
2176 : {
2177 0 : KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2178 :
2179 : /*
2180 : * Interrupt moderation
2181 : *
2182 : * 0xABCD
2183 : * A - unknown (maybe TX related)
2184 : * B - TX timer (unit: 25us)
2185 : * C - unknown (maybe RX related)
2186 : * D - RX timer (unit: 25us)
2187 : *
2188 : *
2189 : * re(4)'s interrupt moderation is actually controlled by
2190 : * two variables, like most other NICs (bge, bnx etc.)
2191 : * o timer
2192 : * o number of packets [P]
2193 : *
2194 : * The logic relationship between these two variables is
2195 : * similar to other NICs too:
2196 : * if (timer expire || packets > [P])
2197 : * Interrupt is delivered
2198 : *
2199 : * Currently we only know how to set 'timer', but not
2200 : * 'number of packets', which should be ~30, as far as I
2201 : * tested (sink ~900Kpps, interrupt rate is 30KHz)
2202 : */
2203 0 : CSR_WRITE_2(sc, RL_IM,
2204 : RL_IM_RXTIME(sc->rl_rx_time) |
2205 : RL_IM_TXTIME(sc->rl_tx_time) |
2206 : RL_IM_MAGIC);
2207 0 : }
2208 :
2209 : void
2210 0 : re_disable_hw_im(struct rl_softc *sc)
2211 : {
2212 0 : if (sc->rl_flags & RL_FLAG_HWIM)
2213 0 : CSR_WRITE_2(sc, RL_IM, 0);
2214 0 : }
2215 :
2216 : void
2217 0 : re_setup_sim_im(struct rl_softc *sc)
2218 : {
2219 0 : if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
2220 0 : CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */
2221 : else {
2222 : u_int32_t nticks;
2223 :
2224 : /*
2225 : * Datasheet says tick decreases at bus speed,
2226 : * but it seems the clock runs a little bit
2227 : * faster, so we do some compensation here.
2228 : */
2229 0 : nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5;
2230 0 : CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks);
2231 : }
2232 0 : CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */
2233 0 : sc->rl_timerintr = 1;
2234 0 : }
2235 :
2236 : void
2237 0 : re_disable_sim_im(struct rl_softc *sc)
2238 : {
2239 0 : if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
2240 0 : CSR_WRITE_4(sc, RL_TIMERINT, 0);
2241 : else
2242 0 : CSR_WRITE_4(sc, RL_TIMERINT_8169, 0);
2243 0 : sc->rl_timerintr = 0;
2244 0 : }
2245 :
2246 : void
2247 0 : re_config_imtype(struct rl_softc *sc, int imtype)
2248 : {
2249 0 : switch (imtype) {
2250 : case RL_IMTYPE_HW:
2251 0 : KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2252 : /* FALLTHROUGH */
2253 : case RL_IMTYPE_NONE:
2254 0 : sc->rl_intrs = RL_INTRS_CPLUS;
2255 0 : sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW |
2256 : RL_ISR_RX_OVERRUN;
2257 0 : sc->rl_tx_ack = RL_ISR_TX_OK;
2258 0 : break;
2259 :
2260 : case RL_IMTYPE_SIM:
2261 0 : sc->rl_intrs = RL_INTRS_TIMER;
2262 0 : sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED;
2263 0 : sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED;
2264 0 : break;
2265 :
2266 : default:
2267 0 : panic("%s: unknown imtype %d",
2268 0 : sc->sc_dev.dv_xname, imtype);
2269 : }
2270 0 : }
2271 :
2272 : void
2273 0 : re_set_jumbo(struct rl_softc *sc)
2274 : {
2275 0 : CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2276 0 : CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) |
2277 : RL_CFG3_JUMBO_EN0);
2278 :
2279 0 : switch (sc->sc_hwrev) {
2280 : case RL_HWREV_8168DP:
2281 : break;
2282 : case RL_HWREV_8168E:
2283 0 : CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2284 : RL_CFG4_8168E_JUMBO_EN1);
2285 0 : break;
2286 : default:
2287 0 : CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2288 : RL_CFG4_JUMBO_EN1);
2289 0 : break;
2290 : }
2291 :
2292 0 : CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2293 0 : }
2294 :
2295 : void
2296 0 : re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype)
2297 : {
2298 0 : re_config_imtype(sc, imtype);
2299 :
2300 0 : if (enable_intrs)
2301 0 : CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs);
2302 : else
2303 0 : CSR_WRITE_2(sc, RL_IMR, 0);
2304 :
2305 0 : switch (imtype) {
2306 : case RL_IMTYPE_NONE:
2307 0 : re_disable_sim_im(sc);
2308 0 : re_disable_hw_im(sc);
2309 0 : break;
2310 :
2311 : case RL_IMTYPE_HW:
2312 0 : KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2313 0 : re_disable_sim_im(sc);
2314 0 : re_setup_hw_im(sc);
2315 0 : break;
2316 :
2317 : case RL_IMTYPE_SIM:
2318 0 : re_disable_hw_im(sc);
2319 0 : re_setup_sim_im(sc);
2320 0 : break;
2321 :
2322 : default:
2323 0 : panic("%s: unknown imtype %d",
2324 0 : sc->sc_dev.dv_xname, imtype);
2325 : }
2326 0 : }
2327 :
2328 : #ifndef SMALL_KERNEL
2329 : int
2330 0 : re_wol(struct ifnet *ifp, int enable)
2331 : {
2332 0 : struct rl_softc *sc = ifp->if_softc;
2333 : u_int8_t val;
2334 :
2335 0 : if (enable) {
2336 0 : if ((CSR_READ_1(sc, sc->rl_cfg1) & RL_CFG1_PME) == 0) {
2337 0 : printf("%s: power management is disabled, "
2338 0 : "cannot do WOL\n", sc->sc_dev.dv_xname);
2339 0 : return (ENOTSUP);
2340 : }
2341 0 : if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_AUXPWR) == 0)
2342 0 : printf("%s: no auxiliary power, cannot do WOL from D3 "
2343 0 : "(power-off) state\n", sc->sc_dev.dv_xname);
2344 : }
2345 :
2346 0 : re_iff(sc);
2347 :
2348 : /* Temporarily enable write to configuration registers. */
2349 0 : CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2350 :
2351 : /* Always disable all wake events except magic packet. */
2352 0 : if (enable) {
2353 : val = CSR_READ_1(sc, sc->rl_cfg5);
2354 : val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST |
2355 : RL_CFG5_WOL_BCAST);
2356 : CSR_WRITE_1(sc, sc->rl_cfg5, val);
2357 :
2358 : val = CSR_READ_1(sc, sc->rl_cfg3);
2359 0 : val |= RL_CFG3_WOL_MAGIC;
2360 0 : val &= ~RL_CFG3_WOL_LINK;
2361 0 : CSR_WRITE_1(sc, sc->rl_cfg3, val);
2362 0 : } else {
2363 : val = CSR_READ_1(sc, sc->rl_cfg5);
2364 : val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST |
2365 : RL_CFG5_WOL_BCAST);
2366 : CSR_WRITE_1(sc, sc->rl_cfg5, val);
2367 :
2368 : val = CSR_READ_1(sc, sc->rl_cfg3);
2369 0 : val &= ~(RL_CFG3_WOL_MAGIC | RL_CFG3_WOL_LINK);
2370 0 : CSR_WRITE_1(sc, sc->rl_cfg3, val);
2371 : }
2372 :
2373 0 : CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2374 :
2375 0 : return (0);
2376 0 : }
2377 : #endif
|