Line data Source code
1 : /* $OpenBSD: if_se.c,v 1.20 2017/01/22 10:17:38 dlg Exp $ */
2 :
3 : /*-
4 : * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de>
5 : * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com>
6 : * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
7 : * Copyright (c) 1997, 1998, 1999
8 : * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
9 : *
10 : * Redistribution and use in source and binary forms, with or without
11 : * modification, are permitted provided that the following conditions
12 : * are met:
13 : * 1. Redistributions of source code must retain the above copyright
14 : * notice, this list of conditions and the following disclaimer.
15 : * 2. Redistributions in binary form must reproduce the above copyright
16 : * notice, this list of conditions and the following disclaimer in the
17 : * documentation and/or other materials provided with the distribution.
18 : * 3. All advertising materials mentioning features or use of this software
19 : * must display the following acknowledgement:
20 : * This product includes software developed by Bill Paul.
21 : * 4. Neither the name of the author nor the names of any co-contributors
22 : * may be used to endorse or promote products derived from this software
23 : * without specific prior written permission.
24 : *
25 : * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
26 : * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
28 : * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR
29 : * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 : * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 : * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34 : * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
36 : * OF THE POSSIBILITY OF SUCH DAMAGE.
37 : */
38 :
39 : /*
40 : * SiS 190/191 PCI Ethernet NIC driver.
41 : *
42 : * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
43 : * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
44 : * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
45 : * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
46 : * review and very useful comments.
47 : *
48 : * Ported to OpenBSD by Christopher Zimmermann 2009/10
49 : *
50 : * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
51 : * Linux and Solaris drivers.
52 : */
53 :
54 : #include "bpfilter.h"
55 :
56 : #include <sys/param.h>
57 : #include <sys/systm.h>
58 : #include <sys/device.h>
59 : #include <sys/ioctl.h>
60 : #include <sys/kernel.h>
61 : #include <sys/mbuf.h>
62 : #include <sys/socket.h>
63 : #include <sys/sockio.h>
64 : #include <sys/timeout.h>
65 :
66 : #include <net/if.h>
67 : #include <net/if_media.h>
68 :
69 : #include <netinet/in.h>
70 : #include <netinet/if_ether.h>
71 :
72 : #if NBPFILTER > 0
73 : #include <net/bpf.h>
74 : #endif
75 :
76 : #include <dev/mii/miivar.h>
77 :
78 : #include <dev/pci/pcidevs.h>
79 : #include <dev/pci/pcireg.h>
80 : #include <dev/pci/pcivar.h>
81 :
82 : #include <dev/pci/if_sereg.h>
83 :
84 : #define SE_RX_RING_CNT 256 /* [8, 1024] */
85 : #define SE_TX_RING_CNT 256 /* [8, 8192] */
86 : #define SE_RX_BUF_ALIGN sizeof(uint64_t)
87 :
88 : #define SE_RX_RING_SZ (SE_RX_RING_CNT * sizeof(struct se_desc))
89 : #define SE_TX_RING_SZ (SE_TX_RING_CNT * sizeof(struct se_desc))
90 :
91 : struct se_list_data {
92 : struct se_desc *se_rx_ring;
93 : struct se_desc *se_tx_ring;
94 : bus_dmamap_t se_rx_dmamap;
95 : bus_dmamap_t se_tx_dmamap;
96 : };
97 :
98 : struct se_chain_data {
99 : struct mbuf *se_rx_mbuf[SE_RX_RING_CNT];
100 : struct mbuf *se_tx_mbuf[SE_TX_RING_CNT];
101 : bus_dmamap_t se_rx_map[SE_RX_RING_CNT];
102 : bus_dmamap_t se_tx_map[SE_TX_RING_CNT];
103 : uint se_rx_prod;
104 : uint se_tx_prod;
105 : uint se_tx_cons;
106 : uint se_tx_cnt;
107 : };
108 :
109 : struct se_softc {
110 : struct device sc_dev;
111 : void *sc_ih;
112 : bus_space_tag_t sc_iot;
113 : bus_space_handle_t sc_ioh;
114 : bus_dma_tag_t sc_dmat;
115 :
116 : struct mii_data sc_mii;
117 : struct arpcom sc_ac;
118 :
119 : struct se_list_data se_ldata;
120 : struct se_chain_data se_cdata;
121 :
122 : struct timeout sc_tick_tmo;
123 :
124 : int sc_flags;
125 : #define SE_FLAG_FASTETHER 0x0001
126 : #define SE_FLAG_RGMII 0x0010
127 : #define SE_FLAG_LINK 0x8000
128 : };
129 :
130 : /*
131 : * Various supported device vendors/types and their names.
132 : */
133 : const struct pci_matchid se_devices[] = {
134 : { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 },
135 : { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 }
136 : };
137 :
138 : int se_match(struct device *, void *, void *);
139 : void se_attach(struct device *, struct device *, void *);
140 : int se_activate(struct device *, int);
141 :
142 : const struct cfattach se_ca = {
143 : sizeof(struct se_softc),
144 : se_match, se_attach, NULL, se_activate
145 : };
146 :
147 : struct cfdriver se_cd = {
148 : 0, "se", DV_IFNET
149 : };
150 :
151 : uint32_t
152 : se_miibus_cmd(struct se_softc *, uint32_t);
153 : int se_miibus_readreg(struct device *, int, int);
154 : void se_miibus_writereg(struct device *, int, int, int);
155 : void se_miibus_statchg(struct device *);
156 :
157 : int se_newbuf(struct se_softc *, uint);
158 : void se_discard_rxbuf(struct se_softc *, uint);
159 : int se_encap(struct se_softc *, struct mbuf *, uint *);
160 : void se_rxeof(struct se_softc *);
161 : void se_txeof(struct se_softc *);
162 : int se_intr(void *);
163 : void se_tick(void *);
164 : void se_start(struct ifnet *);
165 : int se_ioctl(struct ifnet *, u_long, caddr_t);
166 : int se_init(struct ifnet *);
167 : void se_stop(struct se_softc *);
168 : void se_watchdog(struct ifnet *);
169 : int se_ifmedia_upd(struct ifnet *);
170 : void se_ifmedia_sts(struct ifnet *, struct ifmediareq *);
171 :
172 : int se_pcib_match(struct pci_attach_args *);
173 : int se_get_mac_addr_apc(struct se_softc *, uint8_t *);
174 : int se_get_mac_addr_eeprom(struct se_softc *, uint8_t *);
175 : uint16_t
176 : se_read_eeprom(struct se_softc *, int);
177 :
178 : void se_iff(struct se_softc *);
179 : void se_reset(struct se_softc *);
180 : int se_list_rx_init(struct se_softc *);
181 : int se_list_rx_free(struct se_softc *);
182 : int se_list_tx_init(struct se_softc *);
183 : int se_list_tx_free(struct se_softc *);
184 :
185 : /*
186 : * Register space access macros.
187 : */
188 :
189 : #define CSR_WRITE_4(sc, reg, val) \
190 : bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val)
191 : #define CSR_WRITE_2(sc, reg, val) \
192 : bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val)
193 : #define CSR_WRITE_1(sc, reg, val) \
194 : bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val)
195 :
196 : #define CSR_READ_4(sc, reg) \
197 : bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg)
198 : #define CSR_READ_2(sc, reg) \
199 : bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg)
200 : #define CSR_READ_1(sc, reg) \
201 : bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg)
202 :
203 : /*
204 : * Read a sequence of words from the EEPROM.
205 : */
206 : uint16_t
207 0 : se_read_eeprom(struct se_softc *sc, int offset)
208 : {
209 : uint32_t val;
210 : int i;
211 :
212 0 : KASSERT(offset <= EI_OFFSET);
213 :
214 0 : CSR_WRITE_4(sc, ROMInterface,
215 : EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT));
216 0 : DELAY(500);
217 0 : for (i = 0; i < SE_TIMEOUT; i++) {
218 0 : val = CSR_READ_4(sc, ROMInterface);
219 0 : if ((val & EI_REQ) == 0)
220 : break;
221 0 : DELAY(100);
222 : }
223 0 : if (i == SE_TIMEOUT) {
224 0 : printf("%s: EEPROM read timeout: 0x%08x\n",
225 0 : sc->sc_dev.dv_xname, val);
226 0 : return 0xffff;
227 : }
228 :
229 0 : return (val & EI_DATA) >> EI_DATA_SHIFT;
230 0 : }
231 :
232 : int
233 0 : se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest)
234 : {
235 : uint16_t val;
236 : int i;
237 :
238 0 : val = se_read_eeprom(sc, EEPROMSignature);
239 0 : if (val == 0xffff || val == 0x0000) {
240 0 : printf("%s: invalid EEPROM signature : 0x%04x\n",
241 0 : sc->sc_dev.dv_xname, val);
242 0 : return (EINVAL);
243 : }
244 :
245 0 : for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
246 0 : val = se_read_eeprom(sc, EEPROMMACAddr + i / 2);
247 0 : dest[i + 0] = (uint8_t)val;
248 0 : dest[i + 1] = (uint8_t)(val >> 8);
249 : }
250 :
251 0 : if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0)
252 0 : sc->sc_flags |= SE_FLAG_RGMII;
253 0 : return (0);
254 0 : }
255 :
256 : /*
257 : * For SiS96x, APC CMOS RAM is used to store Ethernet address.
258 : * APC CMOS RAM is accessed through ISA bridge.
259 : */
260 : #if defined(__amd64__) || defined(__i386__)
261 : int
262 0 : se_pcib_match(struct pci_attach_args *pa)
263 : {
264 : const struct pci_matchid apc_devices[] = {
265 : { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 },
266 : { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 },
267 : { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 }
268 : };
269 :
270 0 : return pci_matchbyid(pa, apc_devices, nitems(apc_devices));
271 : }
272 : #endif
273 :
274 : int
275 0 : se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest)
276 : {
277 : #if defined(__amd64__) || defined(__i386__)
278 0 : struct pci_attach_args pa;
279 : pcireg_t reg;
280 0 : bus_space_handle_t ioh;
281 : int rc, i;
282 :
283 0 : if (pci_find_device(&pa, se_pcib_match) == 0) {
284 0 : printf("\n%s: couldn't find PCI-ISA bridge\n",
285 0 : sc->sc_dev.dv_xname);
286 0 : return EINVAL;
287 : }
288 :
289 : /* Enable port 0x78 and 0x79 to access APC registers. */
290 0 : reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
291 0 : pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02);
292 0 : DELAY(50);
293 0 : (void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
294 :
295 : /* XXX this abuses bus_space implementation knowledge */
296 0 : rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh);
297 0 : if (rc == 0) {
298 : /* Read stored Ethernet address. */
299 0 : for (i = 0; i < ETHER_ADDR_LEN; i++) {
300 0 : bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i);
301 0 : dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1);
302 : }
303 0 : bus_space_write_1(pa.pa_iot, ioh, 0, 0x12);
304 0 : if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0)
305 0 : sc->sc_flags |= SE_FLAG_RGMII;
306 0 : _bus_space_unmap(pa.pa_iot, ioh, 2, NULL);
307 0 : } else
308 : rc = EINVAL;
309 :
310 : /* Restore access to APC registers. */
311 0 : pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg);
312 :
313 0 : return rc;
314 : #endif
315 : return EINVAL;
316 0 : }
317 :
318 : uint32_t
319 0 : se_miibus_cmd(struct se_softc *sc, uint32_t ctrl)
320 : {
321 : int i;
322 : uint32_t val;
323 :
324 0 : CSR_WRITE_4(sc, GMIIControl, ctrl);
325 0 : DELAY(10);
326 0 : for (i = 0; i < SE_TIMEOUT; i++) {
327 0 : val = CSR_READ_4(sc, GMIIControl);
328 0 : if ((val & GMI_REQ) == 0)
329 0 : return val;
330 0 : DELAY(10);
331 : }
332 :
333 0 : return GMI_REQ;
334 0 : }
335 :
336 : int
337 0 : se_miibus_readreg(struct device *self, int phy, int reg)
338 : {
339 0 : struct se_softc *sc = (struct se_softc *)self;
340 : uint32_t ctrl, val;
341 :
342 0 : ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
343 0 : GMI_OP_RD | GMI_REQ;
344 0 : val = se_miibus_cmd(sc, ctrl);
345 0 : if ((val & GMI_REQ) != 0) {
346 0 : printf("%s: PHY read timeout : %d\n",
347 0 : sc->sc_dev.dv_xname, reg);
348 0 : return 0;
349 : }
350 0 : return (val & GMI_DATA) >> GMI_DATA_SHIFT;
351 0 : }
352 :
353 : void
354 0 : se_miibus_writereg(struct device *self, int phy, int reg, int data)
355 : {
356 0 : struct se_softc *sc = (struct se_softc *)self;
357 : uint32_t ctrl, val;
358 :
359 0 : ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
360 0 : GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ;
361 0 : val = se_miibus_cmd(sc, ctrl);
362 0 : if ((val & GMI_REQ) != 0) {
363 0 : printf("%s: PHY write timeout : %d\n",
364 0 : sc->sc_dev.dv_xname, reg);
365 0 : }
366 0 : }
367 :
368 : void
369 0 : se_miibus_statchg(struct device *self)
370 : {
371 0 : struct se_softc *sc = (struct se_softc *)self;
372 : #ifdef SE_DEBUG
373 : struct ifnet *ifp = &sc->sc_ac.ac_if;
374 : #endif
375 0 : struct mii_data *mii = &sc->sc_mii;
376 : uint32_t ctl, speed;
377 :
378 : speed = 0;
379 0 : sc->sc_flags &= ~SE_FLAG_LINK;
380 0 : if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
381 : (IFM_ACTIVE | IFM_AVALID)) {
382 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
383 : case IFM_10_T:
384 : #ifdef SE_DEBUG
385 : if (ifp->if_flags & IFF_DEBUG)
386 : printf("%s: 10baseT link\n", ifp->if_xname);
387 : #endif
388 0 : sc->sc_flags |= SE_FLAG_LINK;
389 : speed = SC_SPEED_10;
390 0 : break;
391 : case IFM_100_TX:
392 : #ifdef SE_DEBUG
393 : if (ifp->if_flags & IFF_DEBUG)
394 : printf("%s: 100baseTX link\n", ifp->if_xname);
395 : #endif
396 0 : sc->sc_flags |= SE_FLAG_LINK;
397 : speed = SC_SPEED_100;
398 0 : break;
399 : case IFM_1000_T:
400 : #ifdef SE_DEBUG
401 : if (ifp->if_flags & IFF_DEBUG)
402 : printf("%s: 1000baseT link\n", ifp->if_xname);
403 : #endif
404 0 : if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) {
405 0 : sc->sc_flags |= SE_FLAG_LINK;
406 : speed = SC_SPEED_1000;
407 0 : }
408 : break;
409 : default:
410 : break;
411 : }
412 : }
413 0 : if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
414 : #ifdef SE_DEBUG
415 : if (ifp->if_flags & IFF_DEBUG)
416 : printf("%s: no link\n", ifp->if_xname);
417 : #endif
418 0 : return;
419 : }
420 : /* Reprogram MAC to resolved speed/duplex/flow-control paramters. */
421 0 : ctl = CSR_READ_4(sc, StationControl);
422 0 : ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK);
423 0 : if (speed == SC_SPEED_1000)
424 0 : ctl |= 0x07000000;
425 : else
426 0 : ctl |= 0x04000000;
427 : #ifdef notyet
428 : if ((sc->sc_flags & SE_FLAG_GMII) != 0)
429 : ctl |= 0x03000000;
430 : #endif
431 0 : ctl |= speed;
432 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
433 0 : ctl |= SC_FDX;
434 0 : CSR_WRITE_4(sc, StationControl, ctl);
435 0 : if ((sc->sc_flags & SE_FLAG_RGMII) != 0) {
436 0 : CSR_WRITE_4(sc, RGMIIDelay, 0x0441);
437 0 : CSR_WRITE_4(sc, RGMIIDelay, 0x0440);
438 0 : }
439 0 : }
440 :
441 : void
442 0 : se_iff(struct se_softc *sc)
443 : {
444 0 : struct arpcom *ac = &sc->sc_ac;
445 0 : struct ifnet *ifp = &ac->ac_if;
446 : struct ether_multi *enm;
447 : struct ether_multistep step;
448 0 : uint32_t crc, hashes[2];
449 : uint16_t rxfilt;
450 :
451 0 : rxfilt = CSR_READ_2(sc, RxMacControl);
452 0 : rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast);
453 0 : ifp->if_flags &= ~IFF_ALLMULTI;
454 :
455 : /*
456 : * Always accept broadcast frames.
457 : * Always accept frames destined to our station address.
458 : */
459 0 : rxfilt |= AcceptBroadcast | AcceptMyPhys;
460 :
461 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
462 0 : ifp->if_flags |= IFF_ALLMULTI;
463 0 : if (ifp->if_flags & IFF_PROMISC)
464 0 : rxfilt |= AcceptAllPhys;
465 0 : rxfilt |= AcceptMulticast;
466 0 : hashes[0] = hashes[1] = 0xffffffff;
467 0 : } else {
468 0 : rxfilt |= AcceptMulticast;
469 0 : hashes[0] = hashes[1] = 0;
470 :
471 0 : ETHER_FIRST_MULTI(step, ac, enm);
472 0 : while (enm != NULL) {
473 0 : crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
474 :
475 0 : hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
476 :
477 0 : ETHER_NEXT_MULTI(step, enm);
478 : }
479 : }
480 :
481 0 : CSR_WRITE_2(sc, RxMacControl, rxfilt);
482 0 : CSR_WRITE_4(sc, RxHashTable, hashes[0]);
483 0 : CSR_WRITE_4(sc, RxHashTable2, hashes[1]);
484 0 : }
485 :
486 : void
487 0 : se_reset(struct se_softc *sc)
488 : {
489 0 : CSR_WRITE_4(sc, IntrMask, 0);
490 0 : CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
491 :
492 : /* Soft reset. */
493 0 : CSR_WRITE_4(sc, IntrControl, 0x8000);
494 0 : CSR_READ_4(sc, IntrControl);
495 0 : DELAY(100);
496 0 : CSR_WRITE_4(sc, IntrControl, 0);
497 : /* Stop MAC. */
498 0 : CSR_WRITE_4(sc, TX_CTL, 0x1a00);
499 0 : CSR_WRITE_4(sc, RX_CTL, 0x1a00);
500 :
501 0 : CSR_WRITE_4(sc, IntrMask, 0);
502 0 : CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
503 :
504 0 : CSR_WRITE_4(sc, GMIIControl, 0);
505 0 : }
506 :
507 : /*
508 : * Probe for an SiS chip. Check the PCI vendor and device
509 : * IDs against our list and return a device name if we find a match.
510 : */
511 : int
512 0 : se_match(struct device *parent, void *match, void *aux)
513 : {
514 0 : struct pci_attach_args *pa = (struct pci_attach_args *)aux;
515 :
516 0 : return pci_matchbyid(pa, se_devices, nitems(se_devices));
517 : }
518 :
519 : /*
520 : * Attach the interface. Do ifmedia setup and ethernet/BPF attach.
521 : */
522 : void
523 0 : se_attach(struct device *parent, struct device *self, void *aux)
524 : {
525 0 : struct se_softc *sc = (struct se_softc *)self;
526 0 : struct arpcom *ac = &sc->sc_ac;
527 : struct ifnet *ifp = &ac->ac_if;
528 0 : struct pci_attach_args *pa = (struct pci_attach_args *)aux;
529 0 : uint8_t eaddr[ETHER_ADDR_LEN];
530 : const char *intrstr;
531 0 : pci_intr_handle_t ih;
532 0 : bus_size_t iosize;
533 0 : bus_dma_segment_t seg;
534 : struct se_list_data *ld;
535 : struct se_chain_data *cd;
536 0 : int nseg;
537 : uint i;
538 : int rc;
539 :
540 0 : printf(": ");
541 :
542 : /*
543 : * Map control/status registers.
544 : */
545 :
546 0 : rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0,
547 0 : &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0);
548 0 : if (rc != 0) {
549 0 : printf("can't map i/o space\n");
550 0 : return;
551 : }
552 :
553 0 : if (pci_intr_map(pa, &ih)) {
554 0 : printf("can't map interrupt\n");
555 0 : goto fail1;
556 : }
557 0 : intrstr = pci_intr_string(pa->pa_pc, ih);
558 0 : sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc,
559 0 : self->dv_xname);
560 0 : if (sc->sc_ih == NULL) {
561 0 : printf("can't establish interrupt");
562 0 : if (intrstr != NULL)
563 0 : printf(" at %s", intrstr);
564 0 : printf("\n");
565 0 : goto fail1;
566 : }
567 :
568 0 : printf("%s", intrstr);
569 :
570 0 : if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190))
571 0 : sc->sc_flags |= SE_FLAG_FASTETHER;
572 :
573 : /* Reset the adapter. */
574 0 : se_reset(sc);
575 :
576 : /* Get MAC address from the EEPROM. */
577 0 : if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0)
578 0 : se_get_mac_addr_apc(sc, eaddr);
579 : else
580 0 : se_get_mac_addr_eeprom(sc, eaddr);
581 0 : printf(", address %s\n", ether_sprintf(eaddr));
582 0 : bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN);
583 :
584 : /*
585 : * Now do all the DMA mapping stuff
586 : */
587 :
588 0 : sc->sc_dmat = pa->pa_dmat;
589 0 : ld = &sc->se_ldata;
590 0 : cd = &sc->se_cdata;
591 :
592 : /* First create TX/RX busdma maps. */
593 0 : for (i = 0; i < SE_RX_RING_CNT; i++) {
594 0 : rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
595 : 0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]);
596 0 : if (rc != 0) {
597 0 : printf("%s: cannot init the RX map array\n",
598 : self->dv_xname);
599 0 : goto fail2;
600 : }
601 : }
602 :
603 0 : for (i = 0; i < SE_TX_RING_CNT; i++) {
604 0 : rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
605 : 0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]);
606 0 : if (rc != 0) {
607 0 : printf("%s: cannot init the TX map array\n",
608 : self->dv_xname);
609 0 : goto fail2;
610 : }
611 : }
612 :
613 : /*
614 : * Now allocate a chunk of DMA-able memory for RX and TX ring
615 : * descriptors, as a contiguous block of memory.
616 : * XXX fix deallocation upon error
617 : */
618 :
619 : /* RX */
620 0 : rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0,
621 : &seg, 1, &nseg, BUS_DMA_NOWAIT);
622 0 : if (rc != 0) {
623 0 : printf("%s: no memory for RX descriptors\n", self->dv_xname);
624 0 : goto fail2;
625 : }
626 :
627 0 : rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ,
628 : (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT);
629 0 : if (rc != 0) {
630 0 : printf("%s: can't map RX descriptors\n", self->dv_xname);
631 0 : goto fail2;
632 : }
633 :
634 0 : rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1,
635 : SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap);
636 0 : if (rc != 0) {
637 0 : printf("%s: can't alloc RX DMA map\n", self->dv_xname);
638 0 : goto fail2;
639 : }
640 :
641 0 : rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap,
642 : (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
643 0 : if (rc != 0) {
644 0 : printf("%s: can't load RX DMA map\n", self->dv_xname);
645 0 : bus_dmamem_unmap(sc->sc_dmat,
646 : (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ);
647 0 : bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap);
648 0 : bus_dmamem_free(sc->sc_dmat, &seg, nseg);
649 0 : goto fail2;
650 : }
651 :
652 : /* TX */
653 0 : rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0,
654 : &seg, 1, &nseg, BUS_DMA_NOWAIT);
655 0 : if (rc != 0) {
656 0 : printf("%s: no memory for TX descriptors\n", self->dv_xname);
657 0 : goto fail2;
658 : }
659 :
660 0 : rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ,
661 : (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT);
662 0 : if (rc != 0) {
663 0 : printf("%s: can't map TX descriptors\n", self->dv_xname);
664 0 : goto fail2;
665 : }
666 :
667 0 : rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1,
668 : SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap);
669 0 : if (rc != 0) {
670 0 : printf("%s: can't alloc TX DMA map\n", self->dv_xname);
671 0 : goto fail2;
672 : }
673 :
674 0 : rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap,
675 : (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
676 0 : if (rc != 0) {
677 0 : printf("%s: can't load TX DMA map\n", self->dv_xname);
678 0 : bus_dmamem_unmap(sc->sc_dmat,
679 : (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ);
680 0 : bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap);
681 0 : bus_dmamem_free(sc->sc_dmat, &seg, nseg);
682 0 : goto fail2;
683 : }
684 :
685 0 : timeout_set(&sc->sc_tick_tmo, se_tick, sc);
686 :
687 0 : ifp = &sc->sc_ac.ac_if;
688 0 : ifp->if_softc = sc;
689 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
690 0 : ifp->if_ioctl = se_ioctl;
691 0 : ifp->if_start = se_start;
692 0 : ifp->if_watchdog = se_watchdog;
693 0 : IFQ_SET_MAXLEN(&ifp->if_snd, SE_TX_RING_CNT - 1);
694 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
695 :
696 0 : ifp->if_capabilities = IFCAP_VLAN_MTU;
697 :
698 : /*
699 : * Do MII setup.
700 : */
701 :
702 0 : sc->sc_mii.mii_ifp = ifp;
703 0 : sc->sc_mii.mii_readreg = se_miibus_readreg;
704 0 : sc->sc_mii.mii_writereg = se_miibus_writereg;
705 0 : sc->sc_mii.mii_statchg = se_miibus_statchg;
706 0 : ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd,
707 : se_ifmedia_sts);
708 0 : mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
709 : MII_OFFSET_ANY, 0);
710 :
711 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
712 : /* No PHY attached */
713 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
714 : 0, NULL);
715 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
716 0 : } else
717 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
718 :
719 : /*
720 : * Call MI attach routine.
721 : */
722 0 : if_attach(ifp);
723 0 : ether_ifattach(ifp);
724 :
725 0 : return;
726 :
727 : fail2:
728 0 : pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
729 : fail1:
730 0 : bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
731 0 : }
732 :
733 : int
734 0 : se_activate(struct device *self, int act)
735 : {
736 0 : struct se_softc *sc = (struct se_softc *)self;
737 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
738 : int rv = 0;
739 :
740 0 : switch (act) {
741 : case DVACT_SUSPEND:
742 0 : if (ifp->if_flags & IFF_RUNNING)
743 0 : se_stop(sc);
744 0 : rv = config_activate_children(self, act);
745 0 : break;
746 : case DVACT_RESUME:
747 0 : if (ifp->if_flags & IFF_UP)
748 0 : (void)se_init(ifp);
749 : break;
750 : default:
751 0 : rv = config_activate_children(self, act);
752 0 : break;
753 : }
754 :
755 0 : return (rv);
756 : }
757 :
758 : /*
759 : * Initialize the TX descriptors.
760 : */
761 : int
762 0 : se_list_tx_init(struct se_softc *sc)
763 : {
764 0 : struct se_list_data *ld = &sc->se_ldata;
765 0 : struct se_chain_data *cd = &sc->se_cdata;
766 :
767 0 : bzero(ld->se_tx_ring, SE_TX_RING_SZ);
768 0 : ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END);
769 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
770 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
771 0 : cd->se_tx_prod = 0;
772 0 : cd->se_tx_cons = 0;
773 0 : cd->se_tx_cnt = 0;
774 :
775 0 : return 0;
776 : }
777 :
778 : int
779 0 : se_list_tx_free(struct se_softc *sc)
780 : {
781 0 : struct se_chain_data *cd = &sc->se_cdata;
782 : uint i;
783 :
784 0 : for (i = 0; i < SE_TX_RING_CNT; i++) {
785 0 : if (cd->se_tx_mbuf[i] != NULL) {
786 0 : bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
787 0 : m_free(cd->se_tx_mbuf[i]);
788 0 : cd->se_tx_mbuf[i] = NULL;
789 0 : }
790 : }
791 :
792 0 : return 0;
793 : }
794 :
795 : /*
796 : * Initialize the RX descriptors and allocate mbufs for them.
797 : */
798 : int
799 0 : se_list_rx_init(struct se_softc *sc)
800 : {
801 0 : struct se_list_data *ld = &sc->se_ldata;
802 0 : struct se_chain_data *cd = &sc->se_cdata;
803 : uint i;
804 :
805 0 : bzero(ld->se_rx_ring, SE_RX_RING_SZ);
806 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
807 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
808 0 : for (i = 0; i < SE_RX_RING_CNT; i++) {
809 0 : if (se_newbuf(sc, i) != 0)
810 0 : return ENOBUFS;
811 : }
812 :
813 0 : cd->se_rx_prod = 0;
814 :
815 0 : return 0;
816 0 : }
817 :
818 : int
819 0 : se_list_rx_free(struct se_softc *sc)
820 : {
821 0 : struct se_chain_data *cd = &sc->se_cdata;
822 : uint i;
823 :
824 0 : for (i = 0; i < SE_RX_RING_CNT; i++) {
825 0 : if (cd->se_rx_mbuf[i] != NULL) {
826 0 : bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]);
827 0 : m_free(cd->se_rx_mbuf[i]);
828 0 : cd->se_rx_mbuf[i] = NULL;
829 0 : }
830 : }
831 :
832 0 : return 0;
833 : }
834 :
835 : /*
836 : * Initialize an RX descriptor and attach an MBUF cluster.
837 : */
838 : int
839 0 : se_newbuf(struct se_softc *sc, uint i)
840 : {
841 : #ifdef SE_DEBUG
842 : struct ifnet *ifp = &sc->sc_ac.ac_if;
843 : #endif
844 0 : struct se_list_data *ld = &sc->se_ldata;
845 0 : struct se_chain_data *cd = &sc->se_cdata;
846 : struct se_desc *desc;
847 : struct mbuf *m;
848 : int rc;
849 :
850 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
851 0 : if (m == NULL) {
852 : #ifdef SE_DEBUG
853 : if (ifp->if_flags & IFF_DEBUG)
854 : printf("%s: MCLGETI failed\n", ifp->if_xname);
855 : #endif
856 0 : return ENOBUFS;
857 : }
858 0 : m->m_len = m->m_pkthdr.len = MCLBYTES;
859 0 : m_adj(m, SE_RX_BUF_ALIGN);
860 :
861 0 : rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i],
862 : m, BUS_DMA_NOWAIT);
863 0 : KASSERT(cd->se_rx_map[i]->dm_nsegs == 1);
864 0 : if (rc != 0) {
865 0 : m_freem(m);
866 0 : return ENOBUFS;
867 : }
868 0 : bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
869 : cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
870 :
871 0 : cd->se_rx_mbuf[i] = m;
872 0 : desc = &ld->se_rx_ring[i];
873 0 : desc->se_sts_size = 0;
874 0 : desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
875 0 : desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr);
876 0 : desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len);
877 0 : if (i == SE_RX_RING_CNT - 1)
878 0 : desc->se_flags |= htole32(RING_END);
879 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
880 : sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
881 :
882 0 : return 0;
883 0 : }
884 :
885 : void
886 0 : se_discard_rxbuf(struct se_softc *sc, uint i)
887 : {
888 0 : struct se_list_data *ld = &sc->se_ldata;
889 : struct se_desc *desc;
890 :
891 0 : desc = &ld->se_rx_ring[i];
892 0 : desc->se_sts_size = 0;
893 0 : desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
894 0 : desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN);
895 0 : if (i == SE_RX_RING_CNT - 1)
896 0 : desc->se_flags |= htole32(RING_END);
897 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
898 : sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
899 0 : }
900 :
901 : /*
902 : * A frame has been uploaded: pass the resulting mbuf chain up to
903 : * the higher level protocols.
904 : */
905 : void
906 0 : se_rxeof(struct se_softc *sc)
907 : {
908 : struct mbuf *m;
909 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
910 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
911 0 : struct se_list_data *ld = &sc->se_ldata;
912 0 : struct se_chain_data *cd = &sc->se_cdata;
913 : struct se_desc *cur_rx;
914 : uint32_t rxinfo, rxstat;
915 : uint i;
916 :
917 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
918 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
919 0 : for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) {
920 0 : cur_rx = &ld->se_rx_ring[i];
921 0 : rxinfo = letoh32(cur_rx->se_cmdsts);
922 0 : if ((rxinfo & RDC_OWN) != 0)
923 : break;
924 0 : rxstat = letoh32(cur_rx->se_sts_size);
925 :
926 : /*
927 : * If an error occurs, update stats, clear the
928 : * status word and leave the mbuf cluster in place:
929 : * it should simply get re-used next time this descriptor
930 : * comes up in the ring.
931 : */
932 0 : if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 ||
933 0 : SE_RX_NSEGS(rxstat) != 1) {
934 : /* XXX We don't support multi-segment frames yet. */
935 0 : if (ifp->if_flags & IFF_DEBUG)
936 0 : printf("%s: rx error %b\n",
937 0 : ifp->if_xname, rxstat, RX_ERR_BITS);
938 0 : se_discard_rxbuf(sc, i);
939 0 : ifp->if_ierrors++;
940 0 : continue;
941 : }
942 :
943 : /* No errors; receive the packet. */
944 0 : bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
945 : cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
946 0 : m = cd->se_rx_mbuf[i];
947 0 : if (se_newbuf(sc, i) != 0) {
948 0 : se_discard_rxbuf(sc, i);
949 0 : ifp->if_iqdrops++;
950 0 : continue;
951 : }
952 : /*
953 : * Account for 10 bytes auto padding which is used
954 : * to align IP header on a 32bit boundary. Also note,
955 : * CRC bytes are automatically removed by the hardware.
956 : */
957 0 : m->m_data += SE_RX_PAD_BYTES;
958 0 : m->m_pkthdr.len = m->m_len =
959 0 : SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES;
960 :
961 0 : ml_enqueue(&ml, m);
962 0 : }
963 :
964 0 : if_input(ifp, &ml);
965 :
966 0 : cd->se_rx_prod = i;
967 0 : }
968 :
969 : /*
970 : * A frame was downloaded to the chip. It's safe for us to clean up
971 : * the list buffers.
972 : */
973 :
974 : void
975 0 : se_txeof(struct se_softc *sc)
976 : {
977 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
978 0 : struct se_list_data *ld = &sc->se_ldata;
979 0 : struct se_chain_data *cd = &sc->se_cdata;
980 : struct se_desc *cur_tx;
981 : uint32_t txstat;
982 : uint i;
983 :
984 : /*
985 : * Go through our tx list and free mbufs for those
986 : * frames that have been transmitted.
987 : */
988 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
989 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
990 0 : for (i = cd->se_tx_cons; cd->se_tx_cnt > 0;
991 0 : cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) {
992 0 : cur_tx = &ld->se_tx_ring[i];
993 0 : txstat = letoh32(cur_tx->se_cmdsts);
994 0 : if ((txstat & TDC_OWN) != 0)
995 : break;
996 :
997 0 : ifq_clr_oactive(&ifp->if_snd);
998 :
999 0 : if (SE_TX_ERROR(txstat) != 0) {
1000 0 : if (ifp->if_flags & IFF_DEBUG)
1001 0 : printf("%s: tx error %b\n",
1002 0 : ifp->if_xname, txstat, TX_ERR_BITS);
1003 0 : ifp->if_oerrors++;
1004 : /* TODO: better error differentiation */
1005 0 : }
1006 :
1007 0 : if (cd->se_tx_mbuf[i] != NULL) {
1008 0 : bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1009 : cd->se_tx_map[i]->dm_mapsize,
1010 : BUS_DMASYNC_POSTWRITE);
1011 0 : bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
1012 0 : m_free(cd->se_tx_mbuf[i]);
1013 0 : cd->se_tx_mbuf[i] = NULL;
1014 0 : }
1015 :
1016 0 : cur_tx->se_sts_size = 0;
1017 0 : cur_tx->se_cmdsts = 0;
1018 0 : cur_tx->se_ptr = 0;
1019 0 : cur_tx->se_flags &= htole32(RING_END);
1020 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1021 : i * sizeof(*cur_tx), sizeof(*cur_tx),
1022 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1023 : }
1024 :
1025 0 : cd->se_tx_cons = i;
1026 0 : if (cd->se_tx_cnt == 0)
1027 0 : ifp->if_timer = 0;
1028 0 : }
1029 :
1030 : void
1031 0 : se_tick(void *xsc)
1032 : {
1033 0 : struct se_softc *sc = xsc;
1034 : struct mii_data *mii;
1035 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1036 : int s;
1037 :
1038 0 : s = splnet();
1039 0 : mii = &sc->sc_mii;
1040 0 : mii_tick(mii);
1041 0 : if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
1042 0 : se_miibus_statchg(&sc->sc_dev);
1043 0 : if ((sc->sc_flags & SE_FLAG_LINK) != 0 &&
1044 0 : !IFQ_IS_EMPTY(&ifp->if_snd))
1045 0 : se_start(ifp);
1046 : }
1047 0 : splx(s);
1048 :
1049 0 : timeout_add_sec(&sc->sc_tick_tmo, 1);
1050 0 : }
1051 :
1052 : int
1053 0 : se_intr(void *arg)
1054 : {
1055 0 : struct se_softc *sc = arg;
1056 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1057 : uint32_t status;
1058 :
1059 0 : status = CSR_READ_4(sc, IntrStatus);
1060 0 : if (status == 0xffffffff || (status & SE_INTRS) == 0) {
1061 : /* Not ours. */
1062 0 : return 0;
1063 : }
1064 : /* Ack interrupts/ */
1065 0 : CSR_WRITE_4(sc, IntrStatus, status);
1066 : /* Disable further interrupts. */
1067 0 : CSR_WRITE_4(sc, IntrMask, 0);
1068 :
1069 0 : for (;;) {
1070 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
1071 : break;
1072 0 : if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
1073 0 : se_rxeof(sc);
1074 : /* Wakeup Rx MAC. */
1075 0 : if ((status & INTR_RX_IDLE) != 0)
1076 0 : CSR_WRITE_4(sc, RX_CTL,
1077 : 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1078 : }
1079 0 : if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0)
1080 0 : se_txeof(sc);
1081 0 : status = CSR_READ_4(sc, IntrStatus);
1082 0 : if ((status & SE_INTRS) == 0)
1083 : break;
1084 : /* Ack interrupts. */
1085 0 : CSR_WRITE_4(sc, IntrStatus, status);
1086 : }
1087 :
1088 0 : if ((ifp->if_flags & IFF_RUNNING) != 0) {
1089 : /* Re-enable interrupts */
1090 0 : CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1091 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1092 0 : se_start(ifp);
1093 : }
1094 :
1095 0 : return 1;
1096 0 : }
1097 :
1098 : /*
1099 : * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1100 : * pointers to the fragment pointers.
1101 : */
1102 : int
1103 0 : se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1104 : {
1105 : #ifdef SE_DEBUG
1106 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1107 : #endif
1108 : struct mbuf *m;
1109 0 : struct se_list_data *ld = &sc->se_ldata;
1110 0 : struct se_chain_data *cd = &sc->se_cdata;
1111 : struct se_desc *desc;
1112 : uint i, cnt = 0;
1113 : int rc;
1114 :
1115 : /*
1116 : * If there's no way we can send any packets, return now.
1117 : */
1118 0 : if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) {
1119 : #ifdef SE_DEBUG
1120 : if (ifp->if_flags & IFF_DEBUG)
1121 : printf("%s: encap failed, not enough TX desc\n",
1122 : ifp->if_xname);
1123 : #endif
1124 0 : return ENOBUFS;
1125 : }
1126 :
1127 0 : if (m_defrag(m_head, M_DONTWAIT) != 0) {
1128 : #ifdef SE_DEBUG
1129 : if (ifp->if_flags & IFF_DEBUG)
1130 : printf("%s: m_defrag failed\n", ifp->if_xname);
1131 : #endif
1132 0 : return ENOBUFS; /* XXX should not be fatal */
1133 : }
1134 :
1135 : /*
1136 : * Start packing the mbufs in this chain into
1137 : * the fragment pointers. Stop when we run out
1138 : * of fragments or hit the end of the mbuf chain.
1139 : */
1140 0 : i = *txidx;
1141 :
1142 0 : for (m = m_head; m != NULL; m = m->m_next) {
1143 0 : if (m->m_len == 0)
1144 : continue;
1145 0 : if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) {
1146 : #ifdef SE_DEBUG
1147 : if (ifp->if_flags & IFF_DEBUG)
1148 : printf("%s: encap failed, not enough TX desc\n",
1149 : ifp->if_xname);
1150 : #endif
1151 0 : return ENOBUFS;
1152 : }
1153 0 : cd->se_tx_mbuf[i] = m;
1154 0 : rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i],
1155 : m, BUS_DMA_NOWAIT);
1156 0 : if (rc != 0)
1157 0 : return ENOBUFS;
1158 0 : KASSERT(cd->se_tx_map[i]->dm_nsegs == 1);
1159 0 : bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1160 : cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE);
1161 :
1162 0 : desc = &ld->se_tx_ring[i];
1163 0 : desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1164 0 : desc->se_ptr =
1165 0 : htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr);
1166 0 : desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1167 0 : if (i == SE_TX_RING_CNT - 1)
1168 0 : desc->se_flags |= htole32(RING_END);
1169 0 : desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF |
1170 : TDC_CRC | TDC_PAD | TDC_BST);
1171 0 : bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1172 : i * sizeof(*desc), sizeof(*desc),
1173 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1174 :
1175 0 : SE_INC(i, SE_TX_RING_CNT);
1176 0 : cnt++;
1177 0 : }
1178 :
1179 : /* can't happen */
1180 0 : if (m != NULL)
1181 0 : return ENOBUFS;
1182 :
1183 0 : cd->se_tx_cnt += cnt;
1184 0 : *txidx = i;
1185 :
1186 0 : return 0;
1187 0 : }
1188 :
1189 : /*
1190 : * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1191 : * to the mbuf data regions directly in the transmit lists. We also save a
1192 : * copy of the pointers since the transmit list fragment pointers are
1193 : * physical addresses.
1194 : */
1195 : void
1196 0 : se_start(struct ifnet *ifp)
1197 : {
1198 0 : struct se_softc *sc = ifp->if_softc;
1199 : struct mbuf *m_head = NULL;
1200 0 : struct se_chain_data *cd = &sc->se_cdata;
1201 0 : uint i, queued = 0;
1202 :
1203 0 : if ((sc->sc_flags & SE_FLAG_LINK) == 0 ||
1204 0 : !(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) {
1205 : #ifdef SE_DEBUG
1206 : if (ifp->if_flags & IFF_DEBUG)
1207 : printf("%s: can't tx, flags 0x%x 0x%04x\n",
1208 : ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags);
1209 : #endif
1210 0 : return;
1211 : }
1212 :
1213 0 : i = cd->se_tx_prod;
1214 :
1215 0 : while (cd->se_tx_mbuf[i] == NULL) {
1216 0 : m_head = ifq_deq_begin(&ifp->if_snd);
1217 0 : if (m_head == NULL)
1218 : break;
1219 :
1220 0 : if (se_encap(sc, m_head, &i) != 0) {
1221 0 : ifq_deq_rollback(&ifp->if_snd, m_head);
1222 0 : ifq_set_oactive(&ifp->if_snd);
1223 0 : break;
1224 : }
1225 :
1226 : /* now we are committed to transmit the packet */
1227 0 : ifq_deq_commit(&ifp->if_snd, m_head);
1228 0 : queued++;
1229 :
1230 : /*
1231 : * If there's a BPF listener, bounce a copy of this frame
1232 : * to him.
1233 : */
1234 : #if NBPFILTER > 0
1235 0 : if (ifp->if_bpf)
1236 0 : bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1237 : #endif
1238 : }
1239 :
1240 0 : if (queued > 0) {
1241 : /* Transmit */
1242 0 : cd->se_tx_prod = i;
1243 0 : CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL);
1244 0 : ifp->if_timer = 5;
1245 0 : }
1246 0 : }
1247 :
1248 : int
1249 0 : se_init(struct ifnet *ifp)
1250 : {
1251 0 : struct se_softc *sc = ifp->if_softc;
1252 : uint16_t rxfilt;
1253 : int i;
1254 :
1255 0 : splassert(IPL_NET);
1256 :
1257 : /*
1258 : * Cancel pending I/O and free all RX/TX buffers.
1259 : */
1260 0 : se_stop(sc);
1261 0 : se_reset(sc);
1262 :
1263 : /* Init circular RX list. */
1264 0 : if (se_list_rx_init(sc) == ENOBUFS) {
1265 0 : se_stop(sc); /* XXX necessary? */
1266 0 : return ENOBUFS;
1267 : }
1268 :
1269 : /* Init TX descriptors. */
1270 0 : se_list_tx_init(sc);
1271 :
1272 : /*
1273 : * Load the address of the RX and TX lists.
1274 : */
1275 0 : CSR_WRITE_4(sc, TX_DESC,
1276 : (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr);
1277 0 : CSR_WRITE_4(sc, RX_DESC,
1278 : (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr);
1279 :
1280 0 : CSR_WRITE_4(sc, TxMacControl, 0x60);
1281 0 : CSR_WRITE_4(sc, RxWakeOnLan, 0);
1282 0 : CSR_WRITE_4(sc, RxWakeOnLanData, 0);
1283 0 : CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN +
1284 : SE_RX_PAD_BYTES);
1285 :
1286 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
1287 0 : CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]);
1288 : /* Configure RX MAC. */
1289 : rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB;
1290 0 : CSR_WRITE_2(sc, RxMacControl, rxfilt);
1291 :
1292 : /* Program promiscuous mode and multicast filters. */
1293 0 : se_iff(sc);
1294 :
1295 : /*
1296 : * Clear and enable interrupts.
1297 : */
1298 0 : CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF);
1299 0 : CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1300 :
1301 : /* Enable receiver and transmitter. */
1302 0 : CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB);
1303 0 : CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1304 :
1305 0 : ifp->if_flags |= IFF_RUNNING;
1306 0 : ifq_clr_oactive(&ifp->if_snd);
1307 :
1308 0 : sc->sc_flags &= ~SE_FLAG_LINK;
1309 0 : mii_mediachg(&sc->sc_mii);
1310 0 : timeout_add_sec(&sc->sc_tick_tmo, 1);
1311 :
1312 0 : return 0;
1313 0 : }
1314 :
1315 : /*
1316 : * Set media options.
1317 : */
1318 : int
1319 0 : se_ifmedia_upd(struct ifnet *ifp)
1320 : {
1321 0 : struct se_softc *sc = ifp->if_softc;
1322 : struct mii_data *mii;
1323 :
1324 0 : mii = &sc->sc_mii;
1325 0 : sc->sc_flags &= ~SE_FLAG_LINK;
1326 0 : if (mii->mii_instance) {
1327 : struct mii_softc *miisc;
1328 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1329 0 : mii_phy_reset(miisc);
1330 0 : }
1331 0 : return mii_mediachg(mii);
1332 : }
1333 :
1334 : /*
1335 : * Report current media status.
1336 : */
1337 : void
1338 0 : se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1339 : {
1340 0 : struct se_softc *sc = ifp->if_softc;
1341 : struct mii_data *mii;
1342 :
1343 0 : mii = &sc->sc_mii;
1344 0 : mii_pollstat(mii);
1345 0 : ifmr->ifm_active = mii->mii_media_active;
1346 0 : ifmr->ifm_status = mii->mii_media_status;
1347 0 : }
1348 :
1349 : int
1350 0 : se_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1351 : {
1352 0 : struct se_softc *sc = ifp->if_softc;
1353 0 : struct ifreq *ifr = (struct ifreq *) data;
1354 : int s, rc = 0;
1355 :
1356 0 : s = splnet();
1357 :
1358 0 : switch (command) {
1359 : case SIOCSIFADDR:
1360 0 : ifp->if_flags |= IFF_UP;
1361 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
1362 0 : rc = se_init(ifp);
1363 : break;
1364 : case SIOCSIFFLAGS:
1365 0 : if (ifp->if_flags & IFF_UP) {
1366 0 : if (ifp->if_flags & IFF_RUNNING)
1367 0 : rc = ENETRESET;
1368 : else
1369 0 : rc = se_init(ifp);
1370 : } else {
1371 0 : if (ifp->if_flags & IFF_RUNNING)
1372 0 : se_stop(sc);
1373 : }
1374 : break;
1375 : case SIOCGIFMEDIA:
1376 : case SIOCSIFMEDIA:
1377 0 : rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1378 0 : break;
1379 : default:
1380 0 : rc = ether_ioctl(ifp, &sc->sc_ac, command, data);
1381 0 : break;
1382 : }
1383 :
1384 0 : if (rc == ENETRESET) {
1385 0 : if (ifp->if_flags & IFF_RUNNING)
1386 0 : se_iff(sc);
1387 : rc = 0;
1388 0 : }
1389 :
1390 0 : splx(s);
1391 0 : return rc;
1392 : }
1393 :
1394 : void
1395 0 : se_watchdog(struct ifnet *ifp)
1396 : {
1397 0 : struct se_softc *sc = ifp->if_softc;
1398 : int s;
1399 :
1400 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1401 0 : ifp->if_oerrors++;
1402 :
1403 0 : s = splnet();
1404 0 : se_init(ifp);
1405 0 : if (!IFQ_IS_EMPTY(&ifp->if_snd))
1406 0 : se_start(ifp);
1407 0 : splx(s);
1408 0 : }
1409 :
1410 : /*
1411 : * Stop the adapter and free any mbufs allocated to the
1412 : * RX and TX lists.
1413 : */
1414 : void
1415 0 : se_stop(struct se_softc *sc)
1416 : {
1417 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1418 :
1419 0 : ifp->if_timer = 0;
1420 0 : ifp->if_flags &= ~IFF_RUNNING;
1421 0 : ifq_clr_oactive(&ifp->if_snd);
1422 0 : timeout_del(&sc->sc_tick_tmo);
1423 0 : mii_down(&sc->sc_mii);
1424 :
1425 0 : CSR_WRITE_4(sc, IntrMask, 0);
1426 0 : CSR_READ_4(sc, IntrMask);
1427 0 : CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1428 : /* Stop TX/RX MAC. */
1429 0 : CSR_WRITE_4(sc, TX_CTL, 0x1a00);
1430 0 : CSR_WRITE_4(sc, RX_CTL, 0x1a00);
1431 : /* XXX Can we assume active DMA cycles gone? */
1432 0 : DELAY(2000);
1433 0 : CSR_WRITE_4(sc, IntrMask, 0);
1434 0 : CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1435 :
1436 0 : sc->sc_flags &= ~SE_FLAG_LINK;
1437 0 : se_list_rx_free(sc);
1438 0 : se_list_tx_free(sc);
1439 0 : }
|