Line data Source code
1 : /* $OpenBSD: if_pcn.c,v 1.43 2017/01/22 10:17:38 dlg Exp $ */
2 : /* $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $ */
3 :
4 : /*
5 : * Copyright (c) 2001 Wasabi Systems, Inc.
6 : * All rights reserved.
7 : *
8 : * Written by Jason R. Thorpe for Wasabi Systems, Inc.
9 : *
10 : * Redistribution and use in source and binary forms, with or without
11 : * modification, are permitted provided that the following conditions
12 : * are met:
13 : * 1. Redistributions of source code must retain the above copyright
14 : * notice, this list of conditions and the following disclaimer.
15 : * 2. Redistributions in binary form must reproduce the above copyright
16 : * notice, this list of conditions and the following disclaimer in the
17 : * documentation and/or other materials provided with the distribution.
18 : * 3. All advertising materials mentioning features or use of this software
19 : * must display the following acknowledgement:
20 : * This product includes software developed for the NetBSD Project by
21 : * Wasabi Systems, Inc.
22 : * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 : * or promote products derived from this software without specific prior
24 : * written permission.
25 : *
26 : * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 : * POSSIBILITY OF SUCH DAMAGE.
37 : */
38 :
39 : /*
40 : * Device driver for the AMD PCnet-PCI series of Ethernet
41 : * chips:
42 : *
43 : * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
44 : * Local Bus
45 : *
46 : * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
47 : * for PCI Local Bus
48 : *
49 : * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
50 : * Ethernet Controller for PCI Local Bus
51 : *
52 : * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
53 : * with OnNow Support
54 : *
55 : * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
56 : * Ethernet Controller with Integrated PHY
57 : *
58 : * This also supports the virtual PCnet-PCI Ethernet interface found
59 : * in VMware.
60 : *
61 : * TODO:
62 : *
63 : * * Split this into bus-specific and bus-independent portions.
64 : * The core could also be used for the ILACC (Am79900) 32-bit
65 : * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
66 : */
67 :
68 : #include "bpfilter.h"
69 :
70 : #include <sys/param.h>
71 : #include <sys/systm.h>
72 : #include <sys/timeout.h>
73 : #include <sys/mbuf.h>
74 : #include <sys/malloc.h>
75 : #include <sys/kernel.h>
76 : #include <sys/socket.h>
77 : #include <sys/ioctl.h>
78 : #include <sys/errno.h>
79 : #include <sys/device.h>
80 : #include <sys/queue.h>
81 : #include <sys/endian.h>
82 :
83 : #include <net/if.h>
84 : #include <net/if_dl.h>
85 :
86 : #include <netinet/in.h>
87 : #include <netinet/if_ether.h>
88 :
89 : #include <net/if_media.h>
90 :
91 : #if NBPFILTER > 0
92 : #include <net/bpf.h>
93 : #endif
94 :
95 : #include <machine/bus.h>
96 : #include <machine/intr.h>
97 :
98 : #include <dev/mii/miivar.h>
99 :
100 : #include <dev/ic/am79900reg.h>
101 : #include <dev/ic/lancereg.h>
102 :
103 : #include <dev/pci/pcireg.h>
104 : #include <dev/pci/pcivar.h>
105 : #include <dev/pci/pcidevs.h>
106 :
107 : /*
108 : * Register definitions for the AMD PCnet-PCI series of Ethernet
109 : * chips.
110 : *
111 : * These are only the registers that we access directly from PCI
112 : * space. Everything else (accessed via the RAP + RDP/BDP) is
113 : * defined in <dev/ic/lancereg.h>.
114 : */
115 :
116 : /*
117 : * PCI configuration space.
118 : */
119 :
120 : #define PCN_PCI_CBIO (PCI_MAPREG_START + 0x00)
121 : #define PCN_PCI_CBMEM (PCI_MAPREG_START + 0x04)
122 :
123 : /*
124 : * I/O map in Word I/O mode.
125 : */
126 :
127 : #define PCN16_APROM 0x00
128 : #define PCN16_RDP 0x10
129 : #define PCN16_RAP 0x12
130 : #define PCN16_RESET 0x14
131 : #define PCN16_BDP 0x16
132 :
133 : /*
134 : * I/O map in DWord I/O mode.
135 : */
136 :
137 : #define PCN32_APROM 0x00
138 : #define PCN32_RDP 0x10
139 : #define PCN32_RAP 0x14
140 : #define PCN32_RESET 0x18
141 : #define PCN32_BDP 0x1c
142 :
143 : /*
144 : * Transmit descriptor list size. This is arbitrary, but allocate
145 : * enough descriptors for 128 pending transmissions, and 4 segments
146 : * per packet. This MUST work out to a power of 2.
147 : *
148 : * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
149 : *
150 : * So we play a little trick here. We give each packet up to 16
151 : * DMA segments, but only allocate the max of 512 descriptors. The
152 : * transmit logic can deal with this, we just are hoping to sneak by.
153 : */
154 : #define PCN_NTXSEGS 16
155 :
156 : #define PCN_TXQUEUELEN 128
157 : #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1)
158 : #define PCN_NTXDESC 512
159 : #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1)
160 : #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK)
161 : #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK)
162 :
163 : /* Tx interrupt every N + 1 packets. */
164 : #define PCN_TXINTR_MASK 7
165 :
166 : /*
167 : * Receive descriptor list size. We have one Rx buffer per incoming
168 : * packet, so this logic is a little simpler.
169 : */
170 : #define PCN_NRXDESC 128
171 : #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1)
172 : #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK)
173 :
174 : /*
175 : * Control structures are DMA'd to the PCnet chip. We allocate them in
176 : * a single clump that maps to a single DMA segment to make several things
177 : * easier.
178 : */
179 : struct pcn_control_data {
180 : /* The transmit descriptors. */
181 : struct letmd pcd_txdescs[PCN_NTXDESC];
182 :
183 : /* The receive descriptors. */
184 : struct lermd pcd_rxdescs[PCN_NRXDESC];
185 :
186 : /* The init block. */
187 : struct leinit pcd_initblock;
188 : };
189 :
190 : #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x)
191 : #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)])
192 : #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)])
193 : #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock)
194 :
195 : /*
196 : * Software state for transmit jobs.
197 : */
198 : struct pcn_txsoft {
199 : struct mbuf *txs_mbuf; /* head of our mbuf chain */
200 : bus_dmamap_t txs_dmamap; /* our DMA map */
201 : int txs_firstdesc; /* first descriptor in packet */
202 : int txs_lastdesc; /* last descriptor in packet */
203 : };
204 :
205 : /*
206 : * Software state for receive jobs.
207 : */
208 : struct pcn_rxsoft {
209 : struct mbuf *rxs_mbuf; /* head of our mbuf chain */
210 : bus_dmamap_t rxs_dmamap; /* our DMA map */
211 : };
212 :
213 : /*
214 : * Description of Rx FIFO watermarks for various revisions.
215 : */
216 : static const char * const pcn_79c970_rcvfw[] = {
217 : "16 bytes",
218 : "64 bytes",
219 : "128 bytes",
220 : NULL,
221 : };
222 :
223 : static const char * const pcn_79c971_rcvfw[] = {
224 : "16 bytes",
225 : "64 bytes",
226 : "112 bytes",
227 : NULL,
228 : };
229 :
230 : /*
231 : * Description of Tx start points for various revisions.
232 : */
233 : static const char * const pcn_79c970_xmtsp[] = {
234 : "8 bytes",
235 : "64 bytes",
236 : "128 bytes",
237 : "248 bytes",
238 : };
239 :
240 : static const char * const pcn_79c971_xmtsp[] = {
241 : "20 bytes",
242 : "64 bytes",
243 : "128 bytes",
244 : "248 bytes",
245 : };
246 :
247 : static const char * const pcn_79c971_xmtsp_sram[] = {
248 : "44 bytes",
249 : "64 bytes",
250 : "128 bytes",
251 : "store-and-forward",
252 : };
253 :
254 : /*
255 : * Description of Tx FIFO watermarks for various revisions.
256 : */
257 : static const char * const pcn_79c970_xmtfw[] = {
258 : "16 bytes",
259 : "64 bytes",
260 : "128 bytes",
261 : NULL,
262 : };
263 :
264 : static const char * const pcn_79c971_xmtfw[] = {
265 : "16 bytes",
266 : "64 bytes",
267 : "108 bytes",
268 : NULL,
269 : };
270 :
271 : /*
272 : * Software state per device.
273 : */
274 : struct pcn_softc {
275 : struct device sc_dev; /* generic device information */
276 : bus_space_tag_t sc_st; /* bus space tag */
277 : bus_space_handle_t sc_sh; /* bus space handle */
278 : bus_dma_tag_t sc_dmat; /* bus DMA tag */
279 : struct arpcom sc_arpcom; /* Ethernet common data */
280 :
281 : /* Points to our media routines, etc. */
282 : const struct pcn_variant *sc_variant;
283 :
284 : void *sc_ih; /* interrupt cookie */
285 :
286 : struct mii_data sc_mii; /* MII/media information */
287 :
288 : struct timeout sc_tick_timeout; /* tick timeout */
289 :
290 : bus_dmamap_t sc_cddmamap; /* control data DMA map */
291 : #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
292 :
293 : /* Software state for transmit and receive descriptors. */
294 : struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN];
295 : struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC];
296 :
297 : /* Control data structures */
298 : struct pcn_control_data *sc_control_data;
299 : #define sc_txdescs sc_control_data->pcd_txdescs
300 : #define sc_rxdescs sc_control_data->pcd_rxdescs
301 : #define sc_initblock sc_control_data->pcd_initblock
302 :
303 : const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */
304 : int sc_rcvfw;
305 :
306 : const char * const *sc_xmtsp_desc; /* Tx start point info */
307 : int sc_xmtsp;
308 :
309 : const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */
310 : int sc_xmtfw;
311 :
312 : int sc_flags; /* misc. flags; see below */
313 : int sc_swstyle; /* the software style in use */
314 :
315 : int sc_txfree; /* number of free Tx descriptors */
316 : int sc_txnext; /* next ready Tx descriptor */
317 :
318 : int sc_txsfree; /* number of free Tx jobs */
319 : int sc_txsnext; /* next free Tx job */
320 : int sc_txsdirty; /* dirty Tx jobs */
321 :
322 : int sc_rxptr; /* next ready Rx descriptor/job */
323 :
324 : uint32_t sc_csr5; /* prototype CSR5 register */
325 : uint32_t sc_mode; /* prototype MODE register */
326 : };
327 :
328 : /* sc_flags */
329 : #define PCN_F_HAS_MII 0x0001 /* has MII */
330 :
331 : #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x)))
332 : #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x)))
333 : #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF)
334 :
335 : #define PCN_CDTXSYNC(sc, x, n, ops) \
336 : do { \
337 : int __x, __n; \
338 : \
339 : __x = (x); \
340 : __n = (n); \
341 : \
342 : /* If it will wrap around, sync to the end of the ring. */ \
343 : if ((__x + __n) > PCN_NTXDESC) { \
344 : bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
345 : PCN_CDTXOFF(__x), sizeof(struct letmd) * \
346 : (PCN_NTXDESC - __x), (ops)); \
347 : __n -= (PCN_NTXDESC - __x); \
348 : __x = 0; \
349 : } \
350 : \
351 : /* Now sync whatever is left. */ \
352 : bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
353 : PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \
354 : } while (/*CONSTCOND*/0)
355 :
356 : #define PCN_CDRXSYNC(sc, x, ops) \
357 : bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
358 : PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))
359 :
360 : #define PCN_CDINITSYNC(sc, ops) \
361 : bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
362 : PCN_CDINITOFF, sizeof(struct leinit), (ops))
363 :
364 : #define PCN_INIT_RXDESC(sc, x) \
365 : do { \
366 : struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
367 : struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \
368 : struct mbuf *__m = __rxs->rxs_mbuf; \
369 : \
370 : /* \
371 : * Note: We scoot the packet forward 2 bytes in the buffer \
372 : * so that the payload after the Ethernet header is aligned \
373 : * to a 4-byte boundary. \
374 : */ \
375 : __m->m_data = __m->m_ext.ext_buf + 2; \
376 : \
377 : if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \
378 : __rmd->rmd2 = \
379 : htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
380 : __rmd->rmd0 = 0; \
381 : } else { \
382 : __rmd->rmd2 = 0; \
383 : __rmd->rmd0 = \
384 : htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
385 : } \
386 : __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \
387 : (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \
388 : PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\
389 : } while(/*CONSTCOND*/0)
390 :
391 : void pcn_start(struct ifnet *);
392 : void pcn_watchdog(struct ifnet *);
393 : int pcn_ioctl(struct ifnet *, u_long, caddr_t);
394 : int pcn_init(struct ifnet *);
395 : void pcn_stop(struct ifnet *, int);
396 :
397 : void pcn_reset(struct pcn_softc *);
398 : void pcn_rxdrain(struct pcn_softc *);
399 : int pcn_add_rxbuf(struct pcn_softc *, int);
400 : void pcn_tick(void *);
401 :
402 : void pcn_spnd(struct pcn_softc *);
403 :
404 : void pcn_set_filter(struct pcn_softc *);
405 :
406 : int pcn_intr(void *);
407 : void pcn_txintr(struct pcn_softc *);
408 : int pcn_rxintr(struct pcn_softc *);
409 :
410 : int pcn_mii_readreg(struct device *, int, int);
411 : void pcn_mii_writereg(struct device *, int, int, int);
412 : void pcn_mii_statchg(struct device *);
413 :
414 : void pcn_79c970_mediainit(struct pcn_softc *);
415 : int pcn_79c970_mediachange(struct ifnet *);
416 : void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
417 :
418 : void pcn_79c971_mediainit(struct pcn_softc *);
419 : int pcn_79c971_mediachange(struct ifnet *);
420 : void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
421 :
422 : /*
423 : * Description of a PCnet-PCI variant. Used to select media access
424 : * method, mostly, and to print a nice description of the chip.
425 : */
426 : static const struct pcn_variant {
427 : const char *pcv_desc;
428 : void (*pcv_mediainit)(struct pcn_softc *);
429 : uint16_t pcv_chipid;
430 : } pcn_variants[] = {
431 : { "Am79c970",
432 : pcn_79c970_mediainit,
433 : PARTID_Am79c970 },
434 :
435 : { "Am79c970A",
436 : pcn_79c970_mediainit,
437 : PARTID_Am79c970A },
438 :
439 : { "Am79c971",
440 : pcn_79c971_mediainit,
441 : PARTID_Am79c971 },
442 :
443 : { "Am79c972",
444 : pcn_79c971_mediainit,
445 : PARTID_Am79c972 },
446 :
447 : { "Am79c973",
448 : pcn_79c971_mediainit,
449 : PARTID_Am79c973 },
450 :
451 : { "Am79c975",
452 : pcn_79c971_mediainit,
453 : PARTID_Am79c975 },
454 :
455 : { "Am79c976",
456 : pcn_79c971_mediainit,
457 : PARTID_Am79c976 },
458 :
459 : { "Am79c978",
460 : pcn_79c971_mediainit,
461 : PARTID_Am79c978 },
462 :
463 : { "Unknown",
464 : pcn_79c971_mediainit,
465 : 0 },
466 : };
467 :
468 : int pcn_copy_small = 0;
469 :
470 : int pcn_match(struct device *, void *, void *);
471 : void pcn_attach(struct device *, struct device *, void *);
472 :
473 : struct cfattach pcn_ca = {
474 : sizeof(struct pcn_softc), pcn_match, pcn_attach,
475 : };
476 :
477 : const struct pci_matchid pcn_devices[] = {
478 : { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI },
479 : { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCHOME_PCI }
480 : };
481 :
482 : struct cfdriver pcn_cd = {
483 : NULL, "pcn", DV_IFNET
484 : };
485 :
486 : /*
487 : * Routines to read and write the PCnet-PCI CSR/BCR space.
488 : */
489 :
490 : static __inline uint32_t
491 0 : pcn_csr_read(struct pcn_softc *sc, int reg)
492 : {
493 :
494 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
495 0 : return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP));
496 : }
497 :
498 : static __inline void
499 0 : pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
500 : {
501 :
502 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
503 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val);
504 0 : }
505 :
506 : static __inline uint32_t
507 0 : pcn_bcr_read(struct pcn_softc *sc, int reg)
508 : {
509 :
510 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
511 0 : return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP));
512 : }
513 :
514 : static __inline void
515 0 : pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
516 : {
517 :
518 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
519 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val);
520 0 : }
521 :
522 : static const struct pcn_variant *
523 0 : pcn_lookup_variant(uint16_t chipid)
524 : {
525 : const struct pcn_variant *pcv;
526 :
527 0 : for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
528 0 : if (chipid == pcv->pcv_chipid)
529 0 : return (pcv);
530 : }
531 :
532 : /*
533 : * This covers unknown chips, which we simply treat like
534 : * a generic PCnet-FAST.
535 : */
536 0 : return (pcv);
537 0 : }
538 :
539 : int
540 0 : pcn_match(struct device *parent, void *match, void *aux)
541 : {
542 0 : struct pci_attach_args *pa = aux;
543 :
544 : /*
545 : * IBM makes a PCI variant of this card which shows up as a
546 : * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25)
547 : * this card is truly a pcn card, so we have a special case match for
548 : * it.
549 : */
550 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TRIDENT &&
551 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TRIDENT_4DWAVE_DX &&
552 0 : PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
553 0 : return(1);
554 :
555 0 : return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices,
556 : nitems(pcn_devices)));
557 0 : }
558 :
559 : void
560 0 : pcn_attach(struct device *parent, struct device *self, void *aux)
561 : {
562 0 : struct pcn_softc *sc = (struct pcn_softc *) self;
563 0 : struct pci_attach_args *pa = aux;
564 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
565 0 : pci_chipset_tag_t pc = pa->pa_pc;
566 0 : pci_intr_handle_t ih;
567 : const char *intrstr = NULL;
568 0 : bus_space_tag_t iot, memt;
569 0 : bus_space_handle_t ioh, memh;
570 0 : bus_dma_segment_t seg;
571 : int ioh_valid, memh_valid;
572 0 : int i, rseg, error;
573 : uint32_t chipid, reg;
574 0 : uint8_t enaddr[ETHER_ADDR_LEN];
575 :
576 0 : timeout_set(&sc->sc_tick_timeout, pcn_tick, sc);
577 :
578 : /*
579 : * Map the device.
580 : */
581 0 : ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
582 0 : &iot, &ioh, NULL, NULL, 0) == 0);
583 0 : memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM,
584 : PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
585 0 : &memt, &memh, NULL, NULL, 0) == 0);
586 :
587 0 : if (memh_valid) {
588 0 : sc->sc_st = memt;
589 0 : sc->sc_sh = memh;
590 0 : } else if (ioh_valid) {
591 0 : sc->sc_st = iot;
592 0 : sc->sc_sh = ioh;
593 : } else {
594 0 : printf(": unable to map device registers\n");
595 0 : return;
596 : }
597 :
598 0 : sc->sc_dmat = pa->pa_dmat;
599 :
600 : /* Get it out of power save mode, if needed. */
601 0 : pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
602 :
603 : /*
604 : * Reset the chip to a known state. This also puts the
605 : * chip into 32-bit mode.
606 : */
607 0 : pcn_reset(sc);
608 :
609 : #if !defined(PCN_NO_PROM)
610 :
611 : /*
612 : * Read the Ethernet address from the EEPROM.
613 : */
614 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
615 0 : enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,
616 : PCN32_APROM + i);
617 : #else
618 : /*
619 : * The PROM is not used; instead we assume that the MAC address
620 : * has been programmed into the device's physical address
621 : * registers by the boot firmware
622 : */
623 :
624 : for (i=0; i < 3; i++) {
625 : uint32_t val;
626 : val = pcn_csr_read(sc, LE_CSR12 + i);
627 : enaddr[2*i] = val & 0x0ff;
628 : enaddr[2*i+1] = (val >> 8) & 0x0ff;
629 : }
630 : #endif
631 :
632 : /*
633 : * Now that the device is mapped, attempt to figure out what
634 : * kind of chip we have. Note that IDL has all 32 bits of
635 : * the chip ID when we're in 32-bit mode.
636 : */
637 0 : chipid = pcn_csr_read(sc, LE_CSR88);
638 0 : sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid));
639 :
640 : /*
641 : * Map and establish our interrupt.
642 : */
643 0 : if (pci_intr_map(pa, &ih)) {
644 0 : printf(": unable to map interrupt\n");
645 0 : return;
646 : }
647 0 : intrstr = pci_intr_string(pc, ih);
648 0 : sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc,
649 0 : self->dv_xname);
650 0 : if (sc->sc_ih == NULL) {
651 0 : printf(": unable to establish interrupt");
652 0 : if (intrstr != NULL)
653 0 : printf(" at %s", intrstr);
654 0 : printf("\n");
655 0 : return;
656 : }
657 :
658 : /*
659 : * Allocate the control data structures, and create and load the
660 : * DMA map for it.
661 : */
662 0 : if ((error = bus_dmamem_alloc(sc->sc_dmat,
663 : sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
664 0 : 0)) != 0) {
665 0 : printf(": unable to allocate control data, error = %d\n",
666 : error);
667 0 : return;
668 : }
669 :
670 0 : if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
671 : sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,
672 0 : BUS_DMA_COHERENT)) != 0) {
673 0 : printf(": unable to map control data, error = %d\n",
674 : error);
675 0 : goto fail_1;
676 : }
677 :
678 0 : if ((error = bus_dmamap_create(sc->sc_dmat,
679 : sizeof(struct pcn_control_data), 1,
680 0 : sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
681 0 : printf(": unable to create control data DMA map, "
682 : "error = %d\n", error);
683 0 : goto fail_2;
684 : }
685 :
686 0 : if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
687 : sc->sc_control_data, sizeof(struct pcn_control_data), NULL,
688 0 : 0)) != 0) {
689 0 : printf(": unable to load control data DMA map, error = %d\n",
690 : error);
691 0 : goto fail_3;
692 : }
693 :
694 : /* Create the transmit buffer DMA maps. */
695 0 : for (i = 0; i < PCN_TXQUEUELEN; i++) {
696 0 : if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
697 : PCN_NTXSEGS, MCLBYTES, 0, 0,
698 0 : &sc->sc_txsoft[i].txs_dmamap)) != 0) {
699 0 : printf(": unable to create tx DMA map %d, "
700 : "error = %d\n", i, error);
701 0 : goto fail_4;
702 : }
703 : }
704 :
705 : /* Create the receive buffer DMA maps. */
706 0 : for (i = 0; i < PCN_NRXDESC; i++) {
707 0 : if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
708 0 : MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
709 0 : printf(": unable to create rx DMA map %d, "
710 : "error = %d\n", i, error);
711 : goto fail_5;
712 : }
713 0 : sc->sc_rxsoft[i].rxs_mbuf = NULL;
714 : }
715 :
716 0 : printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc,
717 0 : CHIPID_VER(chipid), intrstr, ether_sprintf(enaddr));
718 :
719 : /* Initialize our media structures. */
720 0 : (*sc->sc_variant->pcv_mediainit)(sc);
721 :
722 : /*
723 : * Initialize FIFO watermark info.
724 : */
725 0 : switch (sc->sc_variant->pcv_chipid) {
726 : case PARTID_Am79c970:
727 : case PARTID_Am79c970A:
728 0 : sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
729 0 : sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
730 0 : sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
731 0 : break;
732 :
733 : default:
734 0 : sc->sc_rcvfw_desc = pcn_79c971_rcvfw;
735 : /*
736 : * Read BCR25 to determine how much SRAM is
737 : * on the board. If > 0, then we the chip
738 : * uses different Start Point thresholds.
739 : *
740 : * Note BCR25 and BCR26 are loaded from the
741 : * EEPROM on RST, and unaffected by S_RESET,
742 : * so we don't really have to worry about
743 : * them except for this.
744 : */
745 0 : reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff;
746 0 : if (reg != 0)
747 0 : sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
748 : else
749 0 : sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
750 0 : sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
751 0 : break;
752 : }
753 :
754 : /*
755 : * Set up defaults -- see the tables above for what these
756 : * values mean.
757 : *
758 : * XXX How should we tune RCVFW and XMTFW?
759 : */
760 0 : sc->sc_rcvfw = 1; /* minimum for full-duplex */
761 0 : sc->sc_xmtsp = 1;
762 0 : sc->sc_xmtfw = 0;
763 :
764 0 : ifp = &sc->sc_arpcom.ac_if;
765 0 : bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
766 0 : bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
767 0 : ifp->if_softc = sc;
768 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
769 0 : ifp->if_ioctl = pcn_ioctl;
770 0 : ifp->if_start = pcn_start;
771 0 : ifp->if_watchdog = pcn_watchdog;
772 0 : IFQ_SET_MAXLEN(&ifp->if_snd, PCN_NTXDESC -1);
773 :
774 : /* Attach the interface. */
775 0 : if_attach(ifp);
776 0 : ether_ifattach(ifp);
777 0 : return;
778 :
779 : /*
780 : * Free any resources we've allocated during the failed attach
781 : * attempt. Do this in reverse order and fall through.
782 : */
783 : fail_5:
784 0 : for (i = 0; i < PCN_NRXDESC; i++) {
785 0 : if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
786 0 : bus_dmamap_destroy(sc->sc_dmat,
787 : sc->sc_rxsoft[i].rxs_dmamap);
788 : }
789 : fail_4:
790 0 : for (i = 0; i < PCN_TXQUEUELEN; i++) {
791 0 : if (sc->sc_txsoft[i].txs_dmamap != NULL)
792 0 : bus_dmamap_destroy(sc->sc_dmat,
793 : sc->sc_txsoft[i].txs_dmamap);
794 : }
795 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
796 : fail_3:
797 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
798 : fail_2:
799 0 : bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
800 : sizeof(struct pcn_control_data));
801 : fail_1:
802 0 : bus_dmamem_free(sc->sc_dmat, &seg, rseg);
803 0 : }
804 :
805 : /*
806 : * pcn_start: [ifnet interface function]
807 : *
808 : * Start packet transmission on the interface.
809 : */
810 : void
811 0 : pcn_start(struct ifnet *ifp)
812 : {
813 0 : struct pcn_softc *sc = ifp->if_softc;
814 : struct mbuf *m0, *m;
815 : struct pcn_txsoft *txs;
816 : bus_dmamap_t dmamap;
817 : int error, nexttx, lasttx = -1, ofree, seg;
818 :
819 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
820 0 : return;
821 :
822 : /*
823 : * Remember the previous number of free descriptors and
824 : * the first descriptor we'll use.
825 : */
826 0 : ofree = sc->sc_txfree;
827 :
828 : /*
829 : * Loop through the send queue, setting up transmit descriptors
830 : * until we drain the queue, or use up all available transmit
831 : * descriptors.
832 : */
833 0 : for (;;) {
834 : /* Grab a packet off the queue. */
835 0 : m0 = ifq_deq_begin(&ifp->if_snd);
836 0 : if (m0 == NULL)
837 : break;
838 : m = NULL;
839 :
840 : /* Get a work queue entry. */
841 0 : if (sc->sc_txsfree == 0) {
842 0 : ifq_deq_rollback(&ifp->if_snd, m0);
843 0 : break;
844 : }
845 :
846 0 : txs = &sc->sc_txsoft[sc->sc_txsnext];
847 0 : dmamap = txs->txs_dmamap;
848 :
849 : /*
850 : * Load the DMA map. If this fails, the packet either
851 : * didn't fit in the alloted number of segments, or we
852 : * were short on resources. In this case, we'll copy
853 : * and try again.
854 : */
855 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
856 0 : BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
857 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
858 0 : if (m == NULL) {
859 0 : ifq_deq_rollback(&ifp->if_snd, m0);
860 0 : break;
861 : }
862 0 : if (m0->m_pkthdr.len > MHLEN) {
863 0 : MCLGET(m, M_DONTWAIT);
864 0 : if ((m->m_flags & M_EXT) == 0) {
865 0 : ifq_deq_rollback(&ifp->if_snd, m0);
866 0 : m_freem(m);
867 0 : break;
868 : }
869 : }
870 0 : m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
871 0 : m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
872 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
873 : m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
874 0 : if (error) {
875 0 : ifq_deq_rollback(&ifp->if_snd, m0);
876 0 : break;
877 : }
878 : }
879 :
880 : /*
881 : * Ensure we have enough descriptors free to describe
882 : * the packet. Note, we always reserve one descriptor
883 : * at the end of the ring as a termination point, to
884 : * prevent wrap-around.
885 : */
886 0 : if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
887 : /*
888 : * Not enough free descriptors to transmit this
889 : * packet. We haven't committed anything yet,
890 : * so just unload the DMA map, put the packet
891 : * back on the queue, and punt. Notify the upper
892 : * layer that there are not more slots left.
893 : *
894 : * XXX We could allocate an mbuf and copy, but
895 : * XXX is it worth it?
896 : */
897 0 : ifq_set_oactive(&ifp->if_snd);
898 0 : bus_dmamap_unload(sc->sc_dmat, dmamap);
899 0 : m_freem(m);
900 0 : ifq_deq_rollback(&ifp->if_snd, m0);
901 0 : break;
902 : }
903 :
904 0 : ifq_deq_commit(&ifp->if_snd, m0);
905 0 : if (m != NULL) {
906 0 : m_freem(m0);
907 : m0 = m;
908 0 : }
909 :
910 : /*
911 : * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
912 : */
913 :
914 : /* Sync the DMA map. */
915 0 : bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
916 : BUS_DMASYNC_PREWRITE);
917 :
918 : /*
919 : * Initialize the transmit descriptors.
920 : */
921 0 : if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {
922 0 : for (nexttx = sc->sc_txnext, seg = 0;
923 0 : seg < dmamap->dm_nsegs;
924 0 : seg++, nexttx = PCN_NEXTTX(nexttx)) {
925 : /*
926 : * If this is the first descriptor we're
927 : * enqueueing, don't set the OWN bit just
928 : * yet. That could cause a race condition.
929 : * We'll do it below.
930 : */
931 0 : sc->sc_txdescs[nexttx].tmd0 = 0;
932 0 : sc->sc_txdescs[nexttx].tmd2 =
933 0 : htole32(dmamap->dm_segs[seg].ds_addr);
934 0 : sc->sc_txdescs[nexttx].tmd1 =
935 0 : htole32(LE_T1_ONES |
936 : (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
937 : (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
938 : LE_T1_BCNT_MASK));
939 : lasttx = nexttx;
940 : }
941 : } else {
942 0 : for (nexttx = sc->sc_txnext, seg = 0;
943 0 : seg < dmamap->dm_nsegs;
944 0 : seg++, nexttx = PCN_NEXTTX(nexttx)) {
945 : /*
946 : * If this is the first descriptor we're
947 : * enqueueing, don't set the OWN bit just
948 : * yet. That could cause a race condition.
949 : * We'll do it below.
950 : */
951 0 : sc->sc_txdescs[nexttx].tmd0 =
952 0 : htole32(dmamap->dm_segs[seg].ds_addr);
953 0 : sc->sc_txdescs[nexttx].tmd2 = 0;
954 0 : sc->sc_txdescs[nexttx].tmd1 =
955 0 : htole32(LE_T1_ONES |
956 : (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
957 : (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
958 : LE_T1_BCNT_MASK));
959 : lasttx = nexttx;
960 : }
961 : }
962 :
963 0 : KASSERT(lasttx != -1);
964 : /* Interrupt on the packet, if appropriate. */
965 0 : if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0)
966 0 : sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT);
967 :
968 : /* Set `start of packet' and `end of packet' appropriately. */
969 0 : sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP);
970 0 : sc->sc_txdescs[sc->sc_txnext].tmd1 |=
971 : htole32(LE_T1_OWN|LE_T1_STP);
972 :
973 : /* Sync the descriptors we're using. */
974 0 : PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
975 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
976 :
977 : /* Kick the transmitter. */
978 0 : pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD);
979 :
980 : /*
981 : * Store a pointer to the packet so we can free it later,
982 : * and remember what txdirty will be once the packet is
983 : * done.
984 : */
985 0 : txs->txs_mbuf = m0;
986 0 : txs->txs_firstdesc = sc->sc_txnext;
987 0 : txs->txs_lastdesc = lasttx;
988 :
989 : /* Advance the tx pointer. */
990 0 : sc->sc_txfree -= dmamap->dm_nsegs;
991 0 : sc->sc_txnext = nexttx;
992 :
993 0 : sc->sc_txsfree--;
994 0 : sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext);
995 :
996 : #if NBPFILTER > 0
997 : /* Pass the packet to any BPF listeners. */
998 0 : if (ifp->if_bpf)
999 0 : bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1000 : #endif /* NBPFILTER > 0 */
1001 : }
1002 :
1003 0 : if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1004 : /* No more slots left; notify upper layer. */
1005 0 : ifq_set_oactive(&ifp->if_snd);
1006 0 : }
1007 :
1008 0 : if (sc->sc_txfree != ofree) {
1009 : /* Set a watchdog timer in case the chip flakes out. */
1010 0 : ifp->if_timer = 5;
1011 0 : }
1012 0 : }
1013 :
1014 : /*
1015 : * pcn_watchdog: [ifnet interface function]
1016 : *
1017 : * Watchdog timer handler.
1018 : */
1019 : void
1020 0 : pcn_watchdog(struct ifnet *ifp)
1021 : {
1022 0 : struct pcn_softc *sc = ifp->if_softc;
1023 :
1024 : /*
1025 : * Since we're not interrupting every packet, sweep
1026 : * up before we report an error.
1027 : */
1028 0 : pcn_txintr(sc);
1029 :
1030 0 : if (sc->sc_txfree != PCN_NTXDESC) {
1031 0 : printf("%s: device timeout (txfree %d txsfree %d)\n",
1032 0 : sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
1033 0 : ifp->if_oerrors++;
1034 :
1035 : /* Reset the interface. */
1036 0 : (void) pcn_init(ifp);
1037 0 : }
1038 :
1039 : /* Try to get more packets going. */
1040 0 : pcn_start(ifp);
1041 0 : }
1042 :
1043 : /*
1044 : * pcn_ioctl: [ifnet interface function]
1045 : *
1046 : * Handle control requests from the operator.
1047 : */
1048 : int
1049 0 : pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1050 : {
1051 0 : struct pcn_softc *sc = ifp->if_softc;
1052 0 : struct ifreq *ifr = (struct ifreq *) data;
1053 : int s, error = 0;
1054 :
1055 0 : s = splnet();
1056 :
1057 0 : switch (cmd) {
1058 : case SIOCSIFADDR:
1059 0 : ifp->if_flags |= IFF_UP;
1060 0 : if (!(ifp->if_flags & IFF_RUNNING))
1061 0 : pcn_init(ifp);
1062 : break;
1063 :
1064 : case SIOCSIFFLAGS:
1065 0 : if (ifp->if_flags & IFF_UP) {
1066 0 : if (ifp->if_flags & IFF_RUNNING)
1067 0 : error = ENETRESET;
1068 : else
1069 0 : pcn_init(ifp);
1070 : } else {
1071 0 : if (ifp->if_flags & IFF_RUNNING)
1072 0 : pcn_stop(ifp, 1);
1073 : }
1074 : break;
1075 :
1076 : case SIOCSIFMEDIA:
1077 : case SIOCGIFMEDIA:
1078 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1079 0 : break;
1080 :
1081 : default:
1082 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1083 0 : }
1084 :
1085 0 : if (error == ENETRESET) {
1086 0 : if (ifp->if_flags & IFF_RUNNING)
1087 0 : error = pcn_init(ifp);
1088 : else
1089 : error = 0;
1090 : }
1091 :
1092 0 : splx(s);
1093 0 : return (error);
1094 : }
1095 :
1096 : /*
1097 : * pcn_intr:
1098 : *
1099 : * Interrupt service routine.
1100 : */
1101 : int
1102 0 : pcn_intr(void *arg)
1103 : {
1104 0 : struct pcn_softc *sc = arg;
1105 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1106 : uint32_t csr0;
1107 : int wantinit, handled = 0;
1108 :
1109 0 : for (wantinit = 0; wantinit == 0;) {
1110 0 : csr0 = pcn_csr_read(sc, LE_CSR0);
1111 0 : if ((csr0 & LE_C0_INTR) == 0)
1112 : break;
1113 :
1114 : /* ACK the bits and re-enable interrupts. */
1115 0 : pcn_csr_write(sc, LE_CSR0, csr0 &
1116 : (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT|
1117 : LE_C0_TINT|LE_C0_IDON));
1118 :
1119 : handled = 1;
1120 :
1121 0 : if (csr0 & LE_C0_RINT)
1122 0 : wantinit = pcn_rxintr(sc);
1123 :
1124 0 : if (csr0 & LE_C0_TINT)
1125 0 : pcn_txintr(sc);
1126 :
1127 0 : if (csr0 & LE_C0_ERR) {
1128 0 : if (csr0 & LE_C0_BABL)
1129 0 : ifp->if_oerrors++;
1130 0 : if (csr0 & LE_C0_MISS)
1131 0 : ifp->if_ierrors++;
1132 0 : if (csr0 & LE_C0_MERR) {
1133 0 : printf("%s: memory error\n",
1134 0 : sc->sc_dev.dv_xname);
1135 : wantinit = 1;
1136 0 : break;
1137 : }
1138 : }
1139 :
1140 0 : if ((csr0 & LE_C0_RXON) == 0) {
1141 0 : printf("%s: receiver disabled\n",
1142 0 : sc->sc_dev.dv_xname);
1143 0 : ifp->if_ierrors++;
1144 : wantinit = 1;
1145 0 : }
1146 :
1147 0 : if ((csr0 & LE_C0_TXON) == 0) {
1148 0 : printf("%s: transmitter disabled\n",
1149 0 : sc->sc_dev.dv_xname);
1150 0 : ifp->if_oerrors++;
1151 : wantinit = 1;
1152 0 : }
1153 : }
1154 :
1155 0 : if (handled) {
1156 0 : if (wantinit)
1157 0 : pcn_init(ifp);
1158 :
1159 : /* Try to get more packets going. */
1160 0 : pcn_start(ifp);
1161 0 : }
1162 :
1163 0 : return (handled);
1164 : }
1165 :
1166 : /*
1167 : * pcn_spnd:
1168 : *
1169 : * Suspend the chip.
1170 : */
1171 : void
1172 0 : pcn_spnd(struct pcn_softc *sc)
1173 : {
1174 : int i;
1175 :
1176 0 : pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND);
1177 :
1178 0 : for (i = 0; i < 10000; i++) {
1179 0 : if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND)
1180 0 : return;
1181 0 : delay(5);
1182 : }
1183 :
1184 0 : printf("%s: WARNING: chip failed to enter suspended state\n",
1185 0 : sc->sc_dev.dv_xname);
1186 0 : }
1187 :
1188 : /*
1189 : * pcn_txintr:
1190 : *
1191 : * Helper; handle transmit interrupts.
1192 : */
1193 : void
1194 0 : pcn_txintr(struct pcn_softc *sc)
1195 : {
1196 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1197 : struct pcn_txsoft *txs;
1198 : uint32_t tmd1, tmd2, tmd;
1199 : int i, j;
1200 :
1201 0 : ifq_clr_oactive(&ifp->if_snd);
1202 :
1203 : /*
1204 : * Go through our Tx list and free mbufs for those
1205 : * frames which have been transmitted.
1206 : */
1207 0 : for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN;
1208 0 : i = PCN_NEXTTXS(i), sc->sc_txsfree++) {
1209 0 : txs = &sc->sc_txsoft[i];
1210 :
1211 0 : PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1212 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1213 :
1214 0 : tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1);
1215 0 : if (tmd1 & LE_T1_OWN)
1216 : break;
1217 :
1218 : /*
1219 : * Slightly annoying -- we have to loop through the
1220 : * descriptors we've used looking for ERR, since it
1221 : * can appear on any descriptor in the chain.
1222 : */
1223 0 : for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) {
1224 0 : tmd = letoh32(sc->sc_txdescs[j].tmd1);
1225 0 : if (tmd & LE_T1_ERR) {
1226 0 : ifp->if_oerrors++;
1227 0 : if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1228 0 : tmd2 = letoh32(sc->sc_txdescs[j].tmd0);
1229 : else
1230 0 : tmd2 = letoh32(sc->sc_txdescs[j].tmd2);
1231 0 : if (tmd2 & LE_T2_UFLO) {
1232 0 : if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) {
1233 0 : sc->sc_xmtsp++;
1234 0 : printf("%s: transmit "
1235 : "underrun; new threshold: "
1236 : "%s\n",
1237 0 : sc->sc_dev.dv_xname,
1238 0 : sc->sc_xmtsp_desc[
1239 : sc->sc_xmtsp]);
1240 0 : pcn_spnd(sc);
1241 0 : pcn_csr_write(sc, LE_CSR80,
1242 0 : LE_C80_RCVFW(sc->sc_rcvfw) |
1243 0 : LE_C80_XMTSP(sc->sc_xmtsp) |
1244 0 : LE_C80_XMTFW(sc->sc_xmtfw));
1245 0 : pcn_csr_write(sc, LE_CSR5,
1246 0 : sc->sc_csr5);
1247 0 : } else {
1248 0 : printf("%s: transmit "
1249 : "underrun\n",
1250 0 : sc->sc_dev.dv_xname);
1251 : }
1252 0 : } else if (tmd2 & LE_T2_BUFF) {
1253 0 : printf("%s: transmit buffer error\n",
1254 0 : sc->sc_dev.dv_xname);
1255 0 : }
1256 0 : if (tmd2 & LE_T2_LCOL)
1257 0 : ifp->if_collisions++;
1258 0 : if (tmd2 & LE_T2_RTRY)
1259 0 : ifp->if_collisions += 16;
1260 : goto next_packet;
1261 : }
1262 0 : if (j == txs->txs_lastdesc)
1263 : break;
1264 : }
1265 0 : if (tmd1 & LE_T1_ONE)
1266 0 : ifp->if_collisions++;
1267 0 : else if (tmd & LE_T1_MORE) {
1268 : /* Real number is unknown. */
1269 0 : ifp->if_collisions += 2;
1270 0 : }
1271 : next_packet:
1272 0 : sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1273 0 : bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1274 : 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1275 0 : bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1276 0 : m_freem(txs->txs_mbuf);
1277 0 : txs->txs_mbuf = NULL;
1278 : }
1279 :
1280 : /* Update the dirty transmit buffer pointer. */
1281 0 : sc->sc_txsdirty = i;
1282 :
1283 : /*
1284 : * If there are no more pending transmissions, cancel the watchdog
1285 : * timer.
1286 : */
1287 0 : if (sc->sc_txsfree == PCN_TXQUEUELEN)
1288 0 : ifp->if_timer = 0;
1289 0 : }
1290 :
1291 : /*
1292 : * pcn_rxintr:
1293 : *
1294 : * Helper; handle receive interrupts.
1295 : */
1296 : int
1297 0 : pcn_rxintr(struct pcn_softc *sc)
1298 : {
1299 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1300 : struct pcn_rxsoft *rxs;
1301 : struct mbuf *m;
1302 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1303 : uint32_t rmd1;
1304 : int i, len;
1305 : int rv = 0;
1306 :
1307 0 : for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) {
1308 0 : rxs = &sc->sc_rxsoft[i];
1309 :
1310 0 : PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1311 :
1312 0 : rmd1 = letoh32(sc->sc_rxdescs[i].rmd1);
1313 :
1314 0 : if (rmd1 & LE_R1_OWN)
1315 : break;
1316 :
1317 : /*
1318 : * Check for errors and make sure the packet fit into
1319 : * a single buffer. We have structured this block of
1320 : * code the way it is in order to compress it into
1321 : * one test in the common case (no error).
1322 : */
1323 0 : if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=
1324 : (LE_R1_STP|LE_R1_ENP))) {
1325 : /* Make sure the packet is in a single buffer. */
1326 0 : if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) !=
1327 : (LE_R1_STP|LE_R1_ENP)) {
1328 0 : printf("%s: packet spilled into next buffer\n",
1329 0 : sc->sc_dev.dv_xname);
1330 : rv = 1; /* pcn_intr() will re-init */
1331 0 : goto done;
1332 : }
1333 :
1334 : /*
1335 : * If the packet had an error, simple recycle the
1336 : * buffer.
1337 : */
1338 0 : if (rmd1 & LE_R1_ERR) {
1339 0 : ifp->if_ierrors++;
1340 : /*
1341 : * If we got an overflow error, chances
1342 : * are there will be a CRC error. In
1343 : * this case, just print the overflow
1344 : * error, and skip the others.
1345 : */
1346 0 : if (rmd1 & LE_R1_OFLO)
1347 0 : printf("%s: overflow error\n",
1348 0 : sc->sc_dev.dv_xname);
1349 : else {
1350 : #define PRINTIT(x, str) \
1351 : if (rmd1 & (x)) \
1352 : printf("%s: %s\n", \
1353 : sc->sc_dev.dv_xname, str);
1354 0 : PRINTIT(LE_R1_FRAM, "framing error");
1355 0 : PRINTIT(LE_R1_CRC, "CRC error");
1356 0 : PRINTIT(LE_R1_BUFF, "buffer error");
1357 : }
1358 : #undef PRINTIT
1359 0 : PCN_INIT_RXDESC(sc, i);
1360 0 : continue;
1361 : }
1362 : }
1363 :
1364 0 : bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1365 : rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1366 :
1367 : /*
1368 : * No errors; receive the packet.
1369 : */
1370 0 : if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1371 0 : len = letoh32(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK;
1372 : else
1373 0 : len = letoh32(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK;
1374 :
1375 : /*
1376 : * The LANCE family includes the CRC with every packet;
1377 : * trim it off here.
1378 : */
1379 0 : len -= ETHER_CRC_LEN;
1380 :
1381 : /*
1382 : * If the packet is small enough to fit in a
1383 : * single header mbuf, allocate one and copy
1384 : * the data into it. This greatly reduces
1385 : * memory consumption when we receive lots
1386 : * of small packets.
1387 : *
1388 : * Otherwise, we add a new buffer to the receive
1389 : * chain. If this fails, we drop the packet and
1390 : * recycle the old buffer.
1391 : */
1392 0 : if (pcn_copy_small != 0 && len <= (MHLEN - 2)) {
1393 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1394 0 : if (m == NULL)
1395 : goto dropit;
1396 0 : m->m_data += 2;
1397 0 : memcpy(mtod(m, caddr_t),
1398 : mtod(rxs->rxs_mbuf, caddr_t), len);
1399 0 : PCN_INIT_RXDESC(sc, i);
1400 0 : bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1401 : rxs->rxs_dmamap->dm_mapsize,
1402 : BUS_DMASYNC_PREREAD);
1403 0 : } else {
1404 0 : m = rxs->rxs_mbuf;
1405 0 : if (pcn_add_rxbuf(sc, i) != 0) {
1406 : dropit:
1407 0 : ifp->if_ierrors++;
1408 0 : PCN_INIT_RXDESC(sc, i);
1409 0 : bus_dmamap_sync(sc->sc_dmat,
1410 : rxs->rxs_dmamap, 0,
1411 : rxs->rxs_dmamap->dm_mapsize,
1412 : BUS_DMASYNC_PREREAD);
1413 0 : continue;
1414 : }
1415 : }
1416 :
1417 0 : m->m_pkthdr.len = m->m_len = len;
1418 :
1419 0 : ml_enqueue(&ml, m);
1420 0 : }
1421 :
1422 : /* Update the receive pointer. */
1423 0 : sc->sc_rxptr = i;
1424 : done:
1425 0 : if_input(ifp, &ml);
1426 0 : return (rv);
1427 0 : }
1428 :
1429 : /*
1430 : * pcn_tick:
1431 : *
1432 : * One second timer, used to tick the MII.
1433 : */
1434 : void
1435 0 : pcn_tick(void *arg)
1436 : {
1437 0 : struct pcn_softc *sc = arg;
1438 : int s;
1439 :
1440 0 : s = splnet();
1441 0 : mii_tick(&sc->sc_mii);
1442 0 : splx(s);
1443 :
1444 0 : timeout_add_sec(&sc->sc_tick_timeout, 1);
1445 0 : }
1446 :
1447 : /*
1448 : * pcn_reset:
1449 : *
1450 : * Perform a soft reset on the PCnet-PCI.
1451 : */
1452 : void
1453 0 : pcn_reset(struct pcn_softc *sc)
1454 : {
1455 :
1456 : /*
1457 : * The PCnet-PCI chip is reset by reading from the
1458 : * RESET register. Note that while the NE2100 LANCE
1459 : * boards require a write after the read, the PCnet-PCI
1460 : * chips do not require this.
1461 : *
1462 : * Since we don't know if we're in 16-bit or 32-bit
1463 : * mode right now, issue both (it's safe) in the
1464 : * hopes that one will succeed.
1465 : */
1466 0 : (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET);
1467 0 : (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET);
1468 :
1469 : /* Wait 1ms for it to finish. */
1470 0 : delay(1000);
1471 :
1472 : /*
1473 : * Select 32-bit I/O mode by issuing a 32-bit write to the
1474 : * RDP. Since the RAP is 0 after a reset, writing a 0
1475 : * to RDP is safe (since it simply clears CSR0).
1476 : */
1477 0 : bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0);
1478 0 : }
1479 :
1480 : /*
1481 : * pcn_init: [ifnet interface function]
1482 : *
1483 : * Initialize the interface. Must be called at splnet().
1484 : */
1485 : int
1486 0 : pcn_init(struct ifnet *ifp)
1487 : {
1488 0 : struct pcn_softc *sc = ifp->if_softc;
1489 : struct pcn_rxsoft *rxs;
1490 0 : uint8_t *enaddr = LLADDR(ifp->if_sadl);
1491 : int i, error = 0;
1492 : uint32_t reg;
1493 :
1494 : /* Cancel any pending I/O. */
1495 0 : pcn_stop(ifp, 0);
1496 :
1497 : /* Reset the chip to a known state. */
1498 0 : pcn_reset(sc);
1499 :
1500 : /*
1501 : * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
1502 : * else.
1503 : *
1504 : * XXX It'd be really nice to use SSTYLE 2 on all the chips,
1505 : * because the structure layout is compatible with ILACC,
1506 : * but the burst mode is only available in SSTYLE 3, and
1507 : * burst mode should provide some performance enhancement.
1508 : */
1509 0 : if (sc->sc_variant->pcv_chipid == PARTID_Am79c970)
1510 0 : sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2;
1511 : else
1512 0 : sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3;
1513 0 : pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle);
1514 :
1515 : /* Initialize the transmit descriptor ring. */
1516 0 : memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1517 0 : PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,
1518 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1519 0 : sc->sc_txfree = PCN_NTXDESC;
1520 0 : sc->sc_txnext = 0;
1521 :
1522 : /* Initialize the transmit job descriptors. */
1523 0 : for (i = 0; i < PCN_TXQUEUELEN; i++)
1524 0 : sc->sc_txsoft[i].txs_mbuf = NULL;
1525 0 : sc->sc_txsfree = PCN_TXQUEUELEN;
1526 0 : sc->sc_txsnext = 0;
1527 0 : sc->sc_txsdirty = 0;
1528 :
1529 : /*
1530 : * Initialize the receive descriptor and receive job
1531 : * descriptor rings.
1532 : */
1533 0 : for (i = 0; i < PCN_NRXDESC; i++) {
1534 0 : rxs = &sc->sc_rxsoft[i];
1535 0 : if (rxs->rxs_mbuf == NULL) {
1536 0 : if ((error = pcn_add_rxbuf(sc, i)) != 0) {
1537 0 : printf("%s: unable to allocate or map rx "
1538 : "buffer %d, error = %d\n",
1539 0 : sc->sc_dev.dv_xname, i, error);
1540 : /*
1541 : * XXX Should attempt to run with fewer receive
1542 : * XXX buffers instead of just failing.
1543 : */
1544 0 : pcn_rxdrain(sc);
1545 0 : goto out;
1546 : }
1547 : } else
1548 0 : PCN_INIT_RXDESC(sc, i);
1549 : }
1550 0 : sc->sc_rxptr = 0;
1551 :
1552 : /* Initialize MODE for the initialization block. */
1553 0 : sc->sc_mode = 0;
1554 :
1555 : /*
1556 : * If we have MII, simply select MII in the MODE register,
1557 : * and clear ASEL. Otherwise, let ASEL stand (for now),
1558 : * and leave PORTSEL alone (it is ignored with ASEL is set).
1559 : */
1560 0 : if (sc->sc_flags & PCN_F_HAS_MII) {
1561 0 : pcn_bcr_write(sc, LE_BCR2,
1562 0 : pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL);
1563 0 : sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII);
1564 :
1565 : /*
1566 : * Disable MII auto-negotiation. We handle that in
1567 : * our own MII layer.
1568 : */
1569 0 : pcn_bcr_write(sc, LE_BCR32,
1570 0 : pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS);
1571 0 : }
1572 :
1573 : /* Set the multicast filter in the init block. */
1574 0 : pcn_set_filter(sc);
1575 :
1576 : /*
1577 : * Set the Tx and Rx descriptor ring addresses in the init
1578 : * block, the TLEN and RLEN other fields of the init block
1579 : * MODE register.
1580 : */
1581 0 : sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0));
1582 0 : sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0));
1583 0 : sc->sc_initblock.init_mode = htole32(sc->sc_mode |
1584 : ((ffs(PCN_NTXDESC) - 1) << 28) |
1585 : ((ffs(PCN_NRXDESC) - 1) << 20));
1586 :
1587 : /* Set the station address in the init block. */
1588 0 : sc->sc_initblock.init_padr[0] = htole32(enaddr[0] |
1589 : (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24));
1590 0 : sc->sc_initblock.init_padr[1] = htole32(enaddr[4] |
1591 : (enaddr[5] << 8));
1592 :
1593 : /* Initialize CSR3. */
1594 0 : pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO);
1595 :
1596 : /* Initialize CSR4. */
1597 0 : pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT|
1598 : LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM);
1599 :
1600 : /* Initialize CSR5. */
1601 0 : sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE;
1602 0 : pcn_csr_write(sc, LE_CSR5, sc->sc_csr5);
1603 :
1604 : /*
1605 : * If we have an Am79c971 or greater, initialize CSR7.
1606 : *
1607 : * XXX Might be nice to use the MII auto-poll interrupt someday.
1608 : */
1609 0 : switch (sc->sc_variant->pcv_chipid) {
1610 : case PARTID_Am79c970:
1611 : case PARTID_Am79c970A:
1612 : /* Not available on these chips. */
1613 : break;
1614 :
1615 : default:
1616 0 : pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE);
1617 0 : break;
1618 : }
1619 :
1620 : /*
1621 : * On the Am79c970A and greater, initialize BCR18 to
1622 : * enable burst mode.
1623 : *
1624 : * Also enable the "no underflow" option on the Am79c971 and
1625 : * higher, which prevents the chip from generating transmit
1626 : * underflows, yet sill provides decent performance. Note if
1627 : * chip is not connected to external SRAM, then we still have
1628 : * to handle underflow errors (the NOUFLO bit is ignored in
1629 : * that case).
1630 : */
1631 0 : reg = pcn_bcr_read(sc, LE_BCR18);
1632 0 : switch (sc->sc_variant->pcv_chipid) {
1633 : case PARTID_Am79c970:
1634 : break;
1635 :
1636 : case PARTID_Am79c970A:
1637 0 : reg |= LE_B18_BREADE|LE_B18_BWRITE;
1638 0 : break;
1639 :
1640 : default:
1641 0 : reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO;
1642 0 : break;
1643 : }
1644 0 : pcn_bcr_write(sc, LE_BCR18, reg);
1645 :
1646 : /*
1647 : * Initialize CSR80 (FIFO thresholds for Tx and Rx).
1648 : */
1649 0 : pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) |
1650 0 : LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw));
1651 :
1652 : /*
1653 : * Send the init block to the chip, and wait for it
1654 : * to be processed.
1655 : */
1656 0 : PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE);
1657 0 : pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff);
1658 0 : pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff);
1659 0 : pcn_csr_write(sc, LE_CSR0, LE_C0_INIT);
1660 0 : delay(100);
1661 0 : for (i = 0; i < 10000; i++) {
1662 0 : if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON)
1663 : break;
1664 0 : delay(10);
1665 : }
1666 0 : PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE);
1667 0 : if (i == 10000) {
1668 0 : printf("%s: timeout processing init block\n",
1669 0 : sc->sc_dev.dv_xname);
1670 : error = EIO;
1671 0 : goto out;
1672 : }
1673 :
1674 : /* Set the media. */
1675 0 : (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1676 :
1677 : /* Enable interrupts and external activity (and ACK IDON). */
1678 0 : pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON);
1679 :
1680 0 : if (sc->sc_flags & PCN_F_HAS_MII) {
1681 : /* Start the one second MII clock. */
1682 0 : timeout_add_sec(&sc->sc_tick_timeout, 1);
1683 0 : }
1684 :
1685 : /* ...all done! */
1686 0 : ifp->if_flags |= IFF_RUNNING;
1687 0 : ifq_clr_oactive(&ifp->if_snd);
1688 :
1689 : out:
1690 0 : if (error)
1691 0 : printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1692 0 : return (error);
1693 : }
1694 :
1695 : /*
1696 : * pcn_rxdrain:
1697 : *
1698 : * Drain the receive queue.
1699 : */
1700 : void
1701 0 : pcn_rxdrain(struct pcn_softc *sc)
1702 : {
1703 : struct pcn_rxsoft *rxs;
1704 : int i;
1705 :
1706 0 : for (i = 0; i < PCN_NRXDESC; i++) {
1707 0 : rxs = &sc->sc_rxsoft[i];
1708 0 : if (rxs->rxs_mbuf != NULL) {
1709 0 : bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1710 0 : m_freem(rxs->rxs_mbuf);
1711 0 : rxs->rxs_mbuf = NULL;
1712 0 : }
1713 : }
1714 0 : }
1715 :
1716 : /*
1717 : * pcn_stop: [ifnet interface function]
1718 : *
1719 : * Stop transmission on the interface.
1720 : */
1721 : void
1722 0 : pcn_stop(struct ifnet *ifp, int disable)
1723 : {
1724 0 : struct pcn_softc *sc = ifp->if_softc;
1725 : struct pcn_txsoft *txs;
1726 : int i;
1727 :
1728 0 : if (sc->sc_flags & PCN_F_HAS_MII) {
1729 : /* Stop the one second clock. */
1730 0 : timeout_del(&sc->sc_tick_timeout);
1731 :
1732 : /* Down the MII. */
1733 0 : mii_down(&sc->sc_mii);
1734 0 : }
1735 :
1736 : /* Mark the interface as down and cancel the watchdog timer. */
1737 0 : ifp->if_flags &= ~IFF_RUNNING;
1738 0 : ifq_clr_oactive(&ifp->if_snd);
1739 0 : ifp->if_timer = 0;
1740 :
1741 : /* Stop the chip. */
1742 0 : pcn_csr_write(sc, LE_CSR0, LE_C0_STOP);
1743 :
1744 : /* Release any queued transmit buffers. */
1745 0 : for (i = 0; i < PCN_TXQUEUELEN; i++) {
1746 0 : txs = &sc->sc_txsoft[i];
1747 0 : if (txs->txs_mbuf != NULL) {
1748 0 : bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1749 0 : m_freem(txs->txs_mbuf);
1750 0 : txs->txs_mbuf = NULL;
1751 0 : }
1752 : }
1753 :
1754 0 : if (disable)
1755 0 : pcn_rxdrain(sc);
1756 0 : }
1757 :
1758 : /*
1759 : * pcn_add_rxbuf:
1760 : *
1761 : * Add a receive buffer to the indicated descriptor.
1762 : */
1763 : int
1764 0 : pcn_add_rxbuf(struct pcn_softc *sc, int idx)
1765 : {
1766 0 : struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
1767 : struct mbuf *m;
1768 : int error;
1769 :
1770 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
1771 0 : if (m == NULL)
1772 0 : return (ENOBUFS);
1773 :
1774 0 : MCLGET(m, M_DONTWAIT);
1775 0 : if ((m->m_flags & M_EXT) == 0) {
1776 0 : m_freem(m);
1777 0 : return (ENOBUFS);
1778 : }
1779 :
1780 0 : if (rxs->rxs_mbuf != NULL)
1781 0 : bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1782 :
1783 0 : rxs->rxs_mbuf = m;
1784 :
1785 0 : error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1786 : m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1787 : BUS_DMA_READ|BUS_DMA_NOWAIT);
1788 0 : if (error) {
1789 0 : printf("%s: can't load rx DMA map %d, error = %d\n",
1790 0 : sc->sc_dev.dv_xname, idx, error);
1791 0 : panic("pcn_add_rxbuf");
1792 : }
1793 :
1794 0 : bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1795 : rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1796 :
1797 0 : PCN_INIT_RXDESC(sc, idx);
1798 :
1799 0 : return (0);
1800 0 : }
1801 :
1802 : /*
1803 : * pcn_set_filter:
1804 : *
1805 : * Set up the receive filter.
1806 : */
1807 : void
1808 0 : pcn_set_filter(struct pcn_softc *sc)
1809 : {
1810 0 : struct arpcom *ac = &sc->sc_arpcom;
1811 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1812 : struct ether_multi *enm;
1813 : struct ether_multistep step;
1814 : uint32_t crc;
1815 :
1816 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1817 :
1818 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1819 0 : ifp->if_flags |= IFF_ALLMULTI;
1820 0 : if (ifp->if_flags & IFF_PROMISC)
1821 0 : sc->sc_mode |= LE_C15_PROM;
1822 0 : sc->sc_initblock.init_ladrf[0] =
1823 0 : sc->sc_initblock.init_ladrf[1] =
1824 0 : sc->sc_initblock.init_ladrf[2] =
1825 0 : sc->sc_initblock.init_ladrf[3] = 0xffff;
1826 0 : } else {
1827 0 : sc->sc_initblock.init_ladrf[0] =
1828 0 : sc->sc_initblock.init_ladrf[1] =
1829 0 : sc->sc_initblock.init_ladrf[2] =
1830 0 : sc->sc_initblock.init_ladrf[3] = 0;
1831 :
1832 : /*
1833 : * Set up the multicast address filter by passing all multicast
1834 : * addresses through a CRC generator, and then using the high
1835 : * order 6 bits as an index into the 64-bit logical address
1836 : * filter. The high order bits select the word, while the rest
1837 : * of the bits select the bit within the word.
1838 : */
1839 0 : ETHER_FIRST_MULTI(step, ac, enm);
1840 0 : while (enm != NULL) {
1841 0 : crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1842 :
1843 : /* Just want the 6 most significant bits. */
1844 0 : crc >>= 26;
1845 :
1846 : /* Set the corresponding bit in the filter. */
1847 0 : sc->sc_initblock.init_ladrf[crc >> 4] |=
1848 0 : htole16(1 << (crc & 0xf));
1849 :
1850 0 : ETHER_NEXT_MULTI(step, enm);
1851 : }
1852 : }
1853 0 : }
1854 :
1855 : /*
1856 : * pcn_79c970_mediainit:
1857 : *
1858 : * Initialize media for the Am79c970.
1859 : */
1860 : void
1861 0 : pcn_79c970_mediainit(struct pcn_softc *sc)
1862 : {
1863 0 : ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange,
1864 : pcn_79c970_mediastatus);
1865 :
1866 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5,
1867 : PORTSEL_AUI, NULL);
1868 0 : if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1869 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5|IFM_FDX,
1870 : PORTSEL_AUI, NULL);
1871 :
1872 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T,
1873 : PORTSEL_10T, NULL);
1874 0 : if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1875 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T|IFM_FDX,
1876 : PORTSEL_10T, NULL);
1877 :
1878 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO,
1879 : 0, NULL);
1880 0 : if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1881 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO|IFM_FDX,
1882 : 0, NULL);
1883 :
1884 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1885 0 : }
1886 :
1887 : /*
1888 : * pcn_79c970_mediastatus: [ifmedia interface function]
1889 : *
1890 : * Get the current interface media status (Am79c970 version).
1891 : */
1892 : void
1893 0 : pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1894 : {
1895 0 : struct pcn_softc *sc = ifp->if_softc;
1896 :
1897 : /*
1898 : * The currently selected media is always the active media.
1899 : * Note: We have no way to determine what media the AUTO
1900 : * process picked.
1901 : */
1902 0 : ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
1903 0 : }
1904 :
1905 : /*
1906 : * pcn_79c970_mediachange: [ifmedia interface function]
1907 : *
1908 : * Set hardware to newly-selected media (Am79c970 version).
1909 : */
1910 : int
1911 0 : pcn_79c970_mediachange(struct ifnet *ifp)
1912 : {
1913 0 : struct pcn_softc *sc = ifp->if_softc;
1914 : uint32_t reg;
1915 :
1916 0 : if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) {
1917 : /*
1918 : * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL.
1919 : */
1920 : reg = pcn_bcr_read(sc, LE_BCR2);
1921 0 : reg |= LE_B2_ASEL;
1922 0 : pcn_bcr_write(sc, LE_BCR2, reg);
1923 0 : } else {
1924 : /*
1925 : * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
1926 : */
1927 : reg = pcn_bcr_read(sc, LE_BCR2);
1928 0 : reg &= ~LE_B2_ASEL;
1929 0 : pcn_bcr_write(sc, LE_BCR2, reg);
1930 :
1931 0 : reg = pcn_csr_read(sc, LE_CSR15);
1932 0 : reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) |
1933 0 : LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data);
1934 0 : pcn_csr_write(sc, LE_CSR15, reg);
1935 : }
1936 :
1937 0 : if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) {
1938 : reg = LE_B9_FDEN;
1939 0 : if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5)
1940 0 : reg |= LE_B9_AUIFD;
1941 0 : pcn_bcr_write(sc, LE_BCR9, reg);
1942 0 : } else
1943 0 : pcn_bcr_write(sc, LE_BCR9, 0);
1944 :
1945 0 : return (0);
1946 : }
1947 :
1948 : /*
1949 : * pcn_79c971_mediainit:
1950 : *
1951 : * Initialize media for the Am79c971.
1952 : */
1953 : void
1954 0 : pcn_79c971_mediainit(struct pcn_softc *sc)
1955 : {
1956 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1957 :
1958 : /* We have MII. */
1959 0 : sc->sc_flags |= PCN_F_HAS_MII;
1960 :
1961 : /*
1962 : * The built-in 10BASE-T interface is mapped to the MII
1963 : * on the PCNet-FAST. Unfortunately, there's no EEPROM
1964 : * word that tells us which PHY to use.
1965 : * This driver used to ignore all but the first PHY to
1966 : * answer, but this code was removed to support multiple
1967 : * external PHYs. As the default instance will be the first
1968 : * one to answer, no harm is done by letting the possibly
1969 : * non-connected internal PHY show up.
1970 : */
1971 :
1972 : /* Initialize our media structures and probe the MII. */
1973 0 : sc->sc_mii.mii_ifp = ifp;
1974 0 : sc->sc_mii.mii_readreg = pcn_mii_readreg;
1975 0 : sc->sc_mii.mii_writereg = pcn_mii_writereg;
1976 0 : sc->sc_mii.mii_statchg = pcn_mii_statchg;
1977 0 : ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
1978 : pcn_79c971_mediastatus);
1979 :
1980 0 : mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1981 : MII_OFFSET_ANY, 0);
1982 0 : if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1983 0 : ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1984 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1985 0 : } else
1986 0 : ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1987 0 : }
1988 :
1989 : /*
1990 : * pcn_79c971_mediastatus: [ifmedia interface function]
1991 : *
1992 : * Get the current interface media status (Am79c971 version).
1993 : */
1994 : void
1995 0 : pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1996 : {
1997 0 : struct pcn_softc *sc = ifp->if_softc;
1998 :
1999 0 : mii_pollstat(&sc->sc_mii);
2000 0 : ifmr->ifm_status = sc->sc_mii.mii_media_status;
2001 0 : ifmr->ifm_active = sc->sc_mii.mii_media_active;
2002 0 : }
2003 :
2004 : /*
2005 : * pcn_79c971_mediachange: [ifmedia interface function]
2006 : *
2007 : * Set hardware to newly-selected media (Am79c971 version).
2008 : */
2009 : int
2010 0 : pcn_79c971_mediachange(struct ifnet *ifp)
2011 : {
2012 0 : struct pcn_softc *sc = ifp->if_softc;
2013 :
2014 0 : if (ifp->if_flags & IFF_UP)
2015 0 : mii_mediachg(&sc->sc_mii);
2016 0 : return (0);
2017 : }
2018 :
2019 : /*
2020 : * pcn_mii_readreg: [mii interface function]
2021 : *
2022 : * Read a PHY register on the MII.
2023 : */
2024 : int
2025 0 : pcn_mii_readreg(struct device *self, int phy, int reg)
2026 : {
2027 0 : struct pcn_softc *sc = (void *) self;
2028 : uint32_t rv;
2029 :
2030 0 : pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
2031 0 : rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD;
2032 0 : if (rv == 0xffff)
2033 0 : return (0);
2034 :
2035 0 : return (rv);
2036 0 : }
2037 :
2038 : /*
2039 : * pcn_mii_writereg: [mii interface function]
2040 : *
2041 : * Write a PHY register on the MII.
2042 : */
2043 : void
2044 0 : pcn_mii_writereg(struct device *self, int phy, int reg, int val)
2045 : {
2046 0 : struct pcn_softc *sc = (void *) self;
2047 :
2048 0 : pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
2049 0 : pcn_bcr_write(sc, LE_BCR34, val);
2050 0 : }
2051 :
2052 : /*
2053 : * pcn_mii_statchg: [mii interface function]
2054 : *
2055 : * Callback from MII layer when media changes.
2056 : */
2057 : void
2058 0 : pcn_mii_statchg(struct device *self)
2059 : {
2060 0 : struct pcn_softc *sc = (void *) self;
2061 :
2062 0 : if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2063 0 : pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN);
2064 : else
2065 0 : pcn_bcr_write(sc, LE_BCR9, 0);
2066 0 : }
|