Line data Source code
1 : /* $OpenBSD: if_jme.c,v 1.50 2017/09/08 05:36:52 deraadt Exp $ */
2 : /*-
3 : * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4 : * All rights reserved.
5 : *
6 : * Redistribution and use in source and binary forms, with or without
7 : * modification, are permitted provided that the following conditions
8 : * are met:
9 : * 1. Redistributions of source code must retain the above copyright
10 : * notice unmodified, this list of conditions, and the following
11 : * disclaimer.
12 : * 2. Redistributions in binary form must reproduce the above copyright
13 : * notice, this list of conditions and the following disclaimer in the
14 : * documentation and/or other materials provided with the distribution.
15 : *
16 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 : * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 : * SUCH DAMAGE.
27 : *
28 : * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
29 : * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.7 2008/09/13 04:04:39 sephe Exp $
30 : */
31 :
32 : #include "bpfilter.h"
33 : #include "vlan.h"
34 :
35 : #include <sys/param.h>
36 : #include <sys/endian.h>
37 : #include <sys/systm.h>
38 : #include <sys/sockio.h>
39 : #include <sys/mbuf.h>
40 : #include <sys/queue.h>
41 : #include <sys/kernel.h>
42 : #include <sys/device.h>
43 : #include <sys/timeout.h>
44 : #include <sys/socket.h>
45 :
46 : #include <machine/bus.h>
47 :
48 : #include <net/if.h>
49 : #include <net/if_dl.h>
50 : #include <net/if_media.h>
51 :
52 : #include <netinet/in.h>
53 : #include <netinet/if_ether.h>
54 :
55 : #if NBPFILTER > 0
56 : #include <net/bpf.h>
57 : #endif
58 :
59 : #include <dev/mii/miivar.h>
60 : #include <dev/mii/jmphyreg.h>
61 :
62 : #include <dev/pci/pcireg.h>
63 : #include <dev/pci/pcivar.h>
64 : #include <dev/pci/pcidevs.h>
65 :
66 : #include <dev/pci/if_jmereg.h>
67 : #include <dev/pci/if_jmevar.h>
68 :
69 : /* Define the following to disable printing Rx errors. */
70 : #undef JME_SHOW_ERRORS
71 :
72 : int jme_match(struct device *, void *, void *);
73 : void jme_map_intr_vector(struct jme_softc *);
74 : void jme_attach(struct device *, struct device *, void *);
75 : int jme_detach(struct device *, int);
76 :
77 : int jme_miibus_readreg(struct device *, int, int);
78 : void jme_miibus_writereg(struct device *, int, int, int);
79 : void jme_miibus_statchg(struct device *);
80 :
81 : int jme_init(struct ifnet *);
82 : int jme_ioctl(struct ifnet *, u_long, caddr_t);
83 :
84 : void jme_start(struct ifnet *);
85 : void jme_watchdog(struct ifnet *);
86 : void jme_mediastatus(struct ifnet *, struct ifmediareq *);
87 : int jme_mediachange(struct ifnet *);
88 :
89 : int jme_intr(void *);
90 : void jme_txeof(struct jme_softc *);
91 : void jme_rxeof(struct jme_softc *);
92 :
93 : int jme_dma_alloc(struct jme_softc *);
94 : void jme_dma_free(struct jme_softc *);
95 : int jme_init_rx_ring(struct jme_softc *);
96 : void jme_init_tx_ring(struct jme_softc *);
97 : void jme_init_ssb(struct jme_softc *);
98 : int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
99 : int jme_encap(struct jme_softc *, struct mbuf *);
100 : void jme_rxpkt(struct jme_softc *);
101 :
102 : void jme_tick(void *);
103 : void jme_stop(struct jme_softc *);
104 : void jme_reset(struct jme_softc *);
105 : void jme_set_vlan(struct jme_softc *);
106 : void jme_iff(struct jme_softc *);
107 : void jme_stop_tx(struct jme_softc *);
108 : void jme_stop_rx(struct jme_softc *);
109 : void jme_mac_config(struct jme_softc *);
110 : void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
111 : int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
112 : int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 : void jme_discard_rxbufs(struct jme_softc *, int, int);
114 : #ifdef notyet
115 : void jme_setwol(struct jme_softc *);
116 : void jme_setlinkspeed(struct jme_softc *);
117 : #endif
118 :
119 : /*
120 : * Devices supported by this driver.
121 : */
122 : const struct pci_matchid jme_devices[] = {
123 : { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250 },
124 : { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260 }
125 : };
126 :
127 : struct cfattach jme_ca = {
128 : sizeof (struct jme_softc), jme_match, jme_attach
129 : };
130 :
131 : struct cfdriver jme_cd = {
132 : NULL, "jme", DV_IFNET
133 : };
134 :
135 : int jmedebug = 0;
136 : #define DPRINTF(x) do { if (jmedebug) printf x; } while (0)
137 :
138 : /*
139 : * Read a PHY register on the MII of the JMC250.
140 : */
141 : int
142 0 : jme_miibus_readreg(struct device *dev, int phy, int reg)
143 : {
144 0 : struct jme_softc *sc = (struct jme_softc *)dev;
145 : uint32_t val;
146 : int i;
147 :
148 : /* For FPGA version, PHY address 0 should be ignored. */
149 0 : if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
150 0 : return (0);
151 :
152 0 : CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
153 : SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
154 :
155 0 : for (i = JME_PHY_TIMEOUT; i > 0; i--) {
156 0 : DELAY(1);
157 0 : if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
158 : break;
159 : }
160 0 : if (i == 0) {
161 0 : printf("%s: phy read timeout: phy %d, reg %d\n",
162 0 : sc->sc_dev.dv_xname, phy, reg);
163 0 : return (0);
164 : }
165 :
166 0 : return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
167 0 : }
168 :
169 : /*
170 : * Write a PHY register on the MII of the JMC250.
171 : */
172 : void
173 0 : jme_miibus_writereg(struct device *dev, int phy, int reg, int val)
174 : {
175 0 : struct jme_softc *sc = (struct jme_softc *)dev;
176 : int i;
177 :
178 : /* For FPGA version, PHY address 0 should be ignored. */
179 0 : if ((sc->jme_caps & JME_CAP_FPGA) && phy == 0)
180 0 : return;
181 :
182 0 : CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
183 : ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
184 : SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
185 :
186 0 : for (i = JME_PHY_TIMEOUT; i > 0; i--) {
187 0 : DELAY(1);
188 0 : if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
189 : break;
190 : }
191 0 : if (i == 0) {
192 0 : printf("%s: phy write timeout: phy %d, reg %d\n",
193 0 : sc->sc_dev.dv_xname, phy, reg);
194 0 : }
195 0 : }
196 :
197 : /*
198 : * Callback from MII layer when media changes.
199 : */
200 : void
201 0 : jme_miibus_statchg(struct device *dev)
202 : {
203 0 : struct jme_softc *sc = (struct jme_softc *)dev;
204 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
205 : struct mii_data *mii;
206 : struct jme_txdesc *txd;
207 : bus_addr_t paddr;
208 : int i;
209 :
210 0 : if ((ifp->if_flags & IFF_RUNNING) == 0)
211 0 : return;
212 :
213 0 : mii = &sc->sc_miibus;
214 :
215 0 : sc->jme_flags &= ~JME_FLAG_LINK;
216 0 : if ((mii->mii_media_status & IFM_AVALID) != 0) {
217 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
218 : case IFM_10_T:
219 : case IFM_100_TX:
220 0 : sc->jme_flags |= JME_FLAG_LINK;
221 0 : break;
222 : case IFM_1000_T:
223 0 : if (sc->jme_caps & JME_CAP_FASTETH)
224 : break;
225 0 : sc->jme_flags |= JME_FLAG_LINK;
226 0 : break;
227 : default:
228 : break;
229 : }
230 : }
231 :
232 : /*
233 : * Disabling Rx/Tx MACs have a side-effect of resetting
234 : * JME_TXNDA/JME_RXNDA register to the first address of
235 : * Tx/Rx descriptor address. So driver should reset its
236 : * internal procucer/consumer pointer and reclaim any
237 : * allocated resources. Note, just saving the value of
238 : * JME_TXNDA and JME_RXNDA registers before stopping MAC
239 : * and restoring JME_TXNDA/JME_RXNDA register is not
240 : * sufficient to make sure correct MAC state because
241 : * stopping MAC operation can take a while and hardware
242 : * might have updated JME_TXNDA/JME_RXNDA registers
243 : * during the stop operation.
244 : */
245 :
246 : /* Disable interrupts */
247 0 : CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
248 :
249 : /* Stop driver */
250 0 : ifp->if_flags &= ~IFF_RUNNING;
251 0 : ifq_clr_oactive(&ifp->if_snd);
252 0 : ifp->if_timer = 0;
253 0 : timeout_del(&sc->jme_tick_ch);
254 :
255 : /* Stop receiver/transmitter. */
256 0 : jme_stop_rx(sc);
257 0 : jme_stop_tx(sc);
258 :
259 0 : jme_rxeof(sc);
260 0 : m_freem(sc->jme_cdata.jme_rxhead);
261 0 : JME_RXCHAIN_RESET(sc);
262 :
263 0 : jme_txeof(sc);
264 0 : if (sc->jme_cdata.jme_tx_cnt != 0) {
265 : /* Remove queued packets for transmit. */
266 0 : for (i = 0; i < JME_TX_RING_CNT; i++) {
267 0 : txd = &sc->jme_cdata.jme_txdesc[i];
268 0 : if (txd->tx_m != NULL) {
269 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
270 0 : m_freem(txd->tx_m);
271 0 : txd->tx_m = NULL;
272 0 : txd->tx_ndesc = 0;
273 0 : ifp->if_oerrors++;
274 0 : }
275 : }
276 : }
277 :
278 : /*
279 : * Reuse configured Rx descriptors and reset
280 : * procuder/consumer index.
281 : */
282 0 : sc->jme_cdata.jme_rx_cons = 0;
283 :
284 0 : jme_init_tx_ring(sc);
285 :
286 : /* Initialize shadow status block. */
287 0 : jme_init_ssb(sc);
288 :
289 : /* Program MAC with resolved speed/duplex/flow-control. */
290 0 : if (sc->jme_flags & JME_FLAG_LINK) {
291 0 : jme_mac_config(sc);
292 :
293 0 : CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
294 0 : CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
295 :
296 : /* Set Tx ring address to the hardware. */
297 0 : paddr = JME_TX_RING_ADDR(sc, 0);
298 0 : CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
299 0 : CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
300 :
301 : /* Set Rx ring address to the hardware. */
302 0 : paddr = JME_RX_RING_ADDR(sc, 0);
303 0 : CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
304 0 : CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
305 :
306 : /* Restart receiver/transmitter. */
307 0 : CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
308 : RXCSR_RXQ_START);
309 0 : CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
310 0 : }
311 :
312 0 : ifp->if_flags |= IFF_RUNNING;
313 0 : ifq_clr_oactive(&ifp->if_snd);
314 0 : timeout_add_sec(&sc->jme_tick_ch, 1);
315 :
316 : /* Reenable interrupts. */
317 0 : CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
318 0 : }
319 :
320 : /*
321 : * Get the current interface media status.
322 : */
323 : void
324 0 : jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
325 : {
326 0 : struct jme_softc *sc = ifp->if_softc;
327 0 : struct mii_data *mii = &sc->sc_miibus;
328 :
329 0 : mii_pollstat(mii);
330 0 : ifmr->ifm_status = mii->mii_media_status;
331 0 : ifmr->ifm_active = mii->mii_media_active;
332 0 : }
333 :
334 : /*
335 : * Set hardware to newly-selected media.
336 : */
337 : int
338 0 : jme_mediachange(struct ifnet *ifp)
339 : {
340 0 : struct jme_softc *sc = ifp->if_softc;
341 0 : struct mii_data *mii = &sc->sc_miibus;
342 : int error;
343 :
344 0 : if (mii->mii_instance != 0) {
345 : struct mii_softc *miisc;
346 :
347 0 : LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
348 0 : mii_phy_reset(miisc);
349 0 : }
350 0 : error = mii_mediachg(mii);
351 :
352 0 : return (error);
353 : }
354 :
355 : int
356 0 : jme_match(struct device *dev, void *match, void *aux)
357 : {
358 0 : return pci_matchbyid((struct pci_attach_args *)aux, jme_devices,
359 : sizeof (jme_devices) / sizeof (jme_devices[0]));
360 : }
361 :
362 : int
363 0 : jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
364 : {
365 : uint32_t reg;
366 : int i;
367 :
368 0 : *val = 0;
369 0 : for (i = JME_TIMEOUT; i > 0; i--) {
370 0 : reg = CSR_READ_4(sc, JME_SMBCSR);
371 0 : if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
372 : break;
373 0 : DELAY(1);
374 : }
375 :
376 0 : if (i == 0) {
377 0 : printf("%s: EEPROM idle timeout!\n", sc->sc_dev.dv_xname);
378 0 : return (ETIMEDOUT);
379 : }
380 :
381 0 : reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
382 0 : CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
383 0 : for (i = JME_TIMEOUT; i > 0; i--) {
384 0 : DELAY(1);
385 0 : reg = CSR_READ_4(sc, JME_SMBINTF);
386 0 : if ((reg & SMBINTF_CMD_TRIGGER) == 0)
387 : break;
388 : }
389 :
390 0 : if (i == 0) {
391 0 : printf("%s: EEPROM read timeout!\n", sc->sc_dev.dv_xname);
392 0 : return (ETIMEDOUT);
393 : }
394 :
395 0 : reg = CSR_READ_4(sc, JME_SMBINTF);
396 0 : *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
397 :
398 0 : return (0);
399 0 : }
400 :
401 : int
402 0 : jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
403 : {
404 0 : uint8_t fup, reg, val;
405 : uint32_t offset;
406 : int match;
407 :
408 : offset = 0;
409 0 : if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
410 0 : fup != JME_EEPROM_SIG0)
411 0 : return (ENOENT);
412 0 : if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
413 0 : fup != JME_EEPROM_SIG1)
414 0 : return (ENOENT);
415 : match = 0;
416 0 : do {
417 0 : if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
418 : break;
419 0 : if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
420 0 : (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
421 0 : if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
422 : break;
423 0 : if (reg >= JME_PAR0 &&
424 0 : reg < JME_PAR0 + ETHER_ADDR_LEN) {
425 0 : if (jme_eeprom_read_byte(sc, offset + 2,
426 0 : &val) != 0)
427 : break;
428 0 : eaddr[reg - JME_PAR0] = val;
429 0 : match++;
430 0 : }
431 : }
432 : /* Check for the end of EEPROM descriptor. */
433 0 : if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
434 : break;
435 : /* Try next eeprom descriptor. */
436 0 : offset += JME_EEPROM_DESC_BYTES;
437 0 : } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
438 :
439 0 : if (match == ETHER_ADDR_LEN)
440 0 : return (0);
441 :
442 0 : return (ENOENT);
443 0 : }
444 :
445 : void
446 0 : jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
447 : {
448 : uint32_t par0, par1;
449 :
450 : /* Read station address. */
451 0 : par0 = CSR_READ_4(sc, JME_PAR0);
452 0 : par1 = CSR_READ_4(sc, JME_PAR1);
453 0 : par1 &= 0xFFFF;
454 :
455 0 : eaddr[0] = (par0 >> 0) & 0xFF;
456 0 : eaddr[1] = (par0 >> 8) & 0xFF;
457 0 : eaddr[2] = (par0 >> 16) & 0xFF;
458 0 : eaddr[3] = (par0 >> 24) & 0xFF;
459 0 : eaddr[4] = (par1 >> 0) & 0xFF;
460 0 : eaddr[5] = (par1 >> 8) & 0xFF;
461 0 : }
462 :
463 : void
464 0 : jme_map_intr_vector(struct jme_softc *sc)
465 : {
466 : uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
467 :
468 : bzero(map, sizeof(map));
469 :
470 : /* Map Tx interrupts source to MSI/MSIX vector 2. */
471 : map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
472 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
473 : map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
474 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
475 : map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
476 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
477 : map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
478 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
479 : map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
480 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
481 : map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
482 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
483 : map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
484 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
485 : map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
486 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
487 : map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
488 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
489 : map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
490 : MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
491 :
492 : /* Map Rx interrupts source to MSI/MSIX vector 1. */
493 : map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
494 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
495 : map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
496 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
497 : map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
498 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
499 : map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
500 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
501 : map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
502 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
503 : map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
504 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
505 : map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
506 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
507 : map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
508 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
509 : map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
510 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
511 : map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
512 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
513 : map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
514 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
515 : map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
516 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
517 : map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
518 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
519 : map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
520 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
521 : map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
522 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
523 : map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
524 : MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
525 :
526 : /* Map all other interrupts source to MSI/MSIX vector 0. */
527 0 : CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
528 0 : CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
529 0 : CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
530 0 : CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
531 0 : }
532 :
533 : void
534 0 : jme_attach(struct device *parent, struct device *self, void *aux)
535 : {
536 0 : struct jme_softc *sc = (struct jme_softc *)self;
537 0 : struct pci_attach_args *pa = aux;
538 0 : pci_chipset_tag_t pc = pa->pa_pc;
539 0 : pci_intr_handle_t ih;
540 : const char *intrstr;
541 : pcireg_t memtype;
542 :
543 : struct ifnet *ifp;
544 : uint32_t reg;
545 : int error = 0;
546 :
547 : /*
548 : * Allocate IO memory
549 : *
550 : * JMC250 supports both memory mapped and I/O register space
551 : * access. Because I/O register access should use different
552 : * BARs to access registers it's waste of time to use I/O
553 : * register spce access. JMC250 uses 16K to map entire memory
554 : * space.
555 : */
556 :
557 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, JME_PCIR_BAR);
558 0 : if (pci_mapreg_map(pa, JME_PCIR_BAR, memtype, 0, &sc->jme_mem_bt,
559 0 : &sc->jme_mem_bh, NULL, &sc->jme_mem_size, 0)) {
560 0 : printf(": can't map mem space\n");
561 0 : return;
562 : }
563 :
564 0 : if (pci_intr_map_msi(pa, &ih) == 0)
565 0 : jme_map_intr_vector(sc);
566 0 : else if (pci_intr_map(pa, &ih) != 0) {
567 0 : printf(": can't map interrupt\n");
568 0 : return;
569 : }
570 :
571 : /*
572 : * Allocate IRQ
573 : */
574 0 : intrstr = pci_intr_string(pc, ih);
575 0 : sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, jme_intr, sc,
576 0 : sc->sc_dev.dv_xname);
577 0 : if (sc->sc_irq_handle == NULL) {
578 0 : printf(": could not establish interrupt");
579 0 : if (intrstr != NULL)
580 0 : printf(" at %s", intrstr);
581 0 : printf("\n");
582 0 : return;
583 : }
584 0 : printf(": %s", intrstr);
585 :
586 0 : sc->sc_dmat = pa->pa_dmat;
587 0 : sc->jme_pct = pa->pa_pc;
588 0 : sc->jme_pcitag = pa->pa_tag;
589 :
590 : /*
591 : * Extract FPGA revision
592 : */
593 0 : reg = CSR_READ_4(sc, JME_CHIPMODE);
594 0 : if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
595 : CHIPMODE_NOT_FPGA) {
596 0 : sc->jme_caps |= JME_CAP_FPGA;
597 :
598 0 : if (jmedebug) {
599 0 : printf("%s: FPGA revision : 0x%04x\n",
600 : sc->sc_dev.dv_xname,
601 : (reg & CHIPMODE_FPGA_REV_MASK) >>
602 : CHIPMODE_FPGA_REV_SHIFT);
603 0 : }
604 : }
605 :
606 0 : sc->jme_revfm = (reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT;
607 :
608 0 : if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_JMICRON_JMC250 &&
609 0 : PCI_REVISION(pa->pa_class) == JME_REV_JMC250_A2)
610 0 : sc->jme_workaround |= JME_WA_CRCERRORS | JME_WA_PACKETLOSS;
611 :
612 : /* Reset the ethernet controller. */
613 0 : jme_reset(sc);
614 :
615 : /* Get station address. */
616 0 : reg = CSR_READ_4(sc, JME_SMBCSR);
617 0 : if (reg & SMBCSR_EEPROM_PRESENT)
618 0 : error = jme_eeprom_macaddr(sc, sc->sc_arpcom.ac_enaddr);
619 0 : if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
620 0 : if (error != 0 && (jmedebug)) {
621 0 : printf("%s: ethernet hardware address "
622 : "not found in EEPROM.\n", sc->sc_dev.dv_xname);
623 0 : }
624 0 : jme_reg_macaddr(sc, sc->sc_arpcom.ac_enaddr);
625 0 : }
626 :
627 0 : printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
628 :
629 : /*
630 : * Save PHY address.
631 : * Integrated JR0211 has fixed PHY address whereas FPGA version
632 : * requires PHY probing to get correct PHY address.
633 : */
634 0 : if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
635 0 : sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
636 : GPREG0_PHY_ADDR_MASK;
637 0 : if (jmedebug) {
638 0 : printf("%s: PHY is at address %d.\n",
639 : sc->sc_dev.dv_xname, sc->jme_phyaddr);
640 0 : }
641 : } else {
642 0 : sc->jme_phyaddr = 0;
643 : }
644 :
645 : /* Set max allowable DMA size. */
646 0 : sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
647 0 : sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
648 :
649 : #ifdef notyet
650 : if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
651 : sc->jme_caps |= JME_CAP_PMCAP;
652 : #endif
653 :
654 : /* Allocate DMA stuffs */
655 0 : error = jme_dma_alloc(sc);
656 0 : if (error)
657 : goto fail;
658 :
659 0 : ifp = &sc->sc_arpcom.ac_if;
660 0 : ifp->if_softc = sc;
661 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
662 0 : ifp->if_ioctl = jme_ioctl;
663 0 : ifp->if_start = jme_start;
664 0 : ifp->if_watchdog = jme_watchdog;
665 0 : IFQ_SET_MAXLEN(&ifp->if_snd, JME_TX_RING_CNT - 1);
666 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
667 :
668 0 : ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
669 : IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 |
670 : IFCAP_CSUM_UDPv6;
671 :
672 : #if NVLAN > 0
673 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
674 : #endif
675 :
676 : /* Set up MII bus. */
677 0 : sc->sc_miibus.mii_ifp = ifp;
678 0 : sc->sc_miibus.mii_readreg = jme_miibus_readreg;
679 0 : sc->sc_miibus.mii_writereg = jme_miibus_writereg;
680 0 : sc->sc_miibus.mii_statchg = jme_miibus_statchg;
681 :
682 0 : ifmedia_init(&sc->sc_miibus.mii_media, 0, jme_mediachange,
683 : jme_mediastatus);
684 0 : mii_attach(self, &sc->sc_miibus, 0xffffffff,
685 0 : sc->jme_caps & JME_CAP_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
686 : MII_OFFSET_ANY, MIIF_DOPAUSE);
687 :
688 0 : if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
689 0 : printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
690 0 : ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
691 : 0, NULL);
692 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
693 0 : } else
694 0 : ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
695 :
696 : /*
697 : * Save PHYADDR for FPGA mode PHY not handled, not production hw
698 : */
699 :
700 0 : if_attach(ifp);
701 0 : ether_ifattach(ifp);
702 :
703 0 : timeout_set(&sc->jme_tick_ch, jme_tick, sc);
704 :
705 0 : return;
706 : fail:
707 0 : jme_detach(&sc->sc_dev, 0);
708 0 : }
709 :
710 : int
711 0 : jme_detach(struct device *self, int flags)
712 : {
713 0 : struct jme_softc *sc = (struct jme_softc *)self;
714 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
715 : int s;
716 :
717 0 : s = splnet();
718 0 : jme_stop(sc);
719 0 : splx(s);
720 :
721 0 : mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
722 :
723 : /* Delete all remaining media. */
724 0 : ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
725 :
726 0 : ether_ifdetach(ifp);
727 0 : if_detach(ifp);
728 0 : jme_dma_free(sc);
729 :
730 0 : if (sc->sc_irq_handle != NULL) {
731 0 : pci_intr_disestablish(sc->jme_pct, sc->sc_irq_handle);
732 0 : sc->sc_irq_handle = NULL;
733 0 : }
734 :
735 0 : return (0);
736 : }
737 :
738 : int
739 0 : jme_dma_alloc(struct jme_softc *sc)
740 : {
741 : struct jme_txdesc *txd;
742 : struct jme_rxdesc *rxd;
743 0 : int error, i, nsegs;
744 :
745 : /*
746 : * Create DMA stuffs for TX ring
747 : */
748 :
749 0 : error = bus_dmamap_create(sc->sc_dmat, JME_TX_RING_SIZE, 1,
750 : JME_TX_RING_SIZE, 0, BUS_DMA_NOWAIT,
751 : &sc->jme_cdata.jme_tx_ring_map);
752 0 : if (error)
753 0 : return (ENOBUFS);
754 :
755 : /* Allocate DMA'able memory for TX ring */
756 0 : error = bus_dmamem_alloc(sc->sc_dmat, JME_TX_RING_SIZE, ETHER_ALIGN, 0,
757 : &sc->jme_rdata.jme_tx_ring_seg, 1, &nsegs,
758 : BUS_DMA_WAITOK);
759 : /* XXX zero */
760 0 : if (error) {
761 0 : printf("%s: could not allocate DMA'able memory for Tx ring.\n",
762 0 : sc->sc_dev.dv_xname);
763 0 : return error;
764 : }
765 :
766 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_tx_ring_seg,
767 : nsegs, JME_TX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_tx_ring,
768 : BUS_DMA_NOWAIT);
769 0 : if (error)
770 0 : return (ENOBUFS);
771 :
772 : /* Load the DMA map for Tx ring. */
773 0 : error = bus_dmamap_load(sc->sc_dmat,
774 : sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
775 : JME_TX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
776 0 : if (error) {
777 0 : printf("%s: could not load DMA'able memory for Tx ring.\n",
778 0 : sc->sc_dev.dv_xname);
779 0 : bus_dmamem_free(sc->sc_dmat,
780 : (bus_dma_segment_t *)&sc->jme_rdata.jme_tx_ring, 1);
781 0 : return error;
782 : }
783 0 : sc->jme_rdata.jme_tx_ring_paddr =
784 0 : sc->jme_cdata.jme_tx_ring_map->dm_segs[0].ds_addr;
785 :
786 : /*
787 : * Create DMA stuffs for RX ring
788 : */
789 :
790 0 : error = bus_dmamap_create(sc->sc_dmat, JME_RX_RING_SIZE, 1,
791 : JME_RX_RING_SIZE, 0, BUS_DMA_NOWAIT,
792 : &sc->jme_cdata.jme_rx_ring_map);
793 0 : if (error)
794 0 : return (ENOBUFS);
795 :
796 : /* Allocate DMA'able memory for RX ring */
797 0 : error = bus_dmamem_alloc(sc->sc_dmat, JME_RX_RING_SIZE, ETHER_ALIGN, 0,
798 : &sc->jme_rdata.jme_rx_ring_seg, 1, &nsegs,
799 : BUS_DMA_WAITOK | BUS_DMA_ZERO);
800 : /* XXX zero */
801 0 : if (error) {
802 0 : printf("%s: could not allocate DMA'able memory for Rx ring.\n",
803 0 : sc->sc_dev.dv_xname);
804 0 : return error;
805 : }
806 :
807 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_rx_ring_seg,
808 : nsegs, JME_RX_RING_SIZE, (caddr_t *)&sc->jme_rdata.jme_rx_ring,
809 : BUS_DMA_NOWAIT);
810 0 : if (error)
811 0 : return (ENOBUFS);
812 :
813 : /* Load the DMA map for Rx ring. */
814 0 : error = bus_dmamap_load(sc->sc_dmat,
815 : sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
816 : JME_RX_RING_SIZE, NULL, BUS_DMA_NOWAIT);
817 0 : if (error) {
818 0 : printf("%s: could not load DMA'able memory for Rx ring.\n",
819 0 : sc->sc_dev.dv_xname);
820 0 : bus_dmamem_free(sc->sc_dmat,
821 : (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
822 0 : return error;
823 : }
824 0 : sc->jme_rdata.jme_rx_ring_paddr =
825 0 : sc->jme_cdata.jme_rx_ring_map->dm_segs[0].ds_addr;
826 :
827 : #if 0
828 : /* Tx/Rx descriptor queue should reside within 4GB boundary. */
829 : tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
830 : rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
831 : if ((JME_ADDR_HI(tx_ring_end) !=
832 : JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
833 : (JME_ADDR_HI(rx_ring_end) !=
834 : JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
835 : printf("%s: 4GB boundary crossed, switching to 32bit "
836 : "DMA address mode.\n", sc->sc_dev.dv_xname);
837 : jme_dma_free(sc);
838 : /* Limit DMA address space to 32bit and try again. */
839 : lowaddr = BUS_SPACE_MAXADDR_32BIT;
840 : goto again;
841 : }
842 : #endif
843 :
844 : /*
845 : * Create DMA stuffs for shadow status block
846 : */
847 :
848 0 : error = bus_dmamap_create(sc->sc_dmat, JME_SSB_SIZE, 1,
849 : JME_SSB_SIZE, 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_ssb_map);
850 0 : if (error)
851 0 : return (ENOBUFS);
852 :
853 : /* Allocate DMA'able memory for shared status block. */
854 0 : error = bus_dmamem_alloc(sc->sc_dmat, JME_SSB_SIZE, 1, 0,
855 : &sc->jme_rdata.jme_ssb_block_seg, 1, &nsegs, BUS_DMA_WAITOK);
856 0 : if (error) {
857 0 : printf("%s: could not allocate DMA'able "
858 0 : "memory for shared status block.\n", sc->sc_dev.dv_xname);
859 0 : return error;
860 : }
861 :
862 0 : error = bus_dmamem_map(sc->sc_dmat, &sc->jme_rdata.jme_ssb_block_seg,
863 : nsegs, JME_SSB_SIZE, (caddr_t *)&sc->jme_rdata.jme_ssb_block,
864 : BUS_DMA_NOWAIT);
865 0 : if (error)
866 0 : return (ENOBUFS);
867 :
868 : /* Load the DMA map for shared status block */
869 0 : error = bus_dmamap_load(sc->sc_dmat,
870 : sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
871 : JME_SSB_SIZE, NULL, BUS_DMA_NOWAIT);
872 0 : if (error) {
873 0 : printf("%s: could not load DMA'able memory "
874 0 : "for shared status block.\n", sc->sc_dev.dv_xname);
875 0 : bus_dmamem_free(sc->sc_dmat,
876 : (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
877 0 : return error;
878 : }
879 0 : sc->jme_rdata.jme_ssb_block_paddr =
880 0 : sc->jme_cdata.jme_ssb_map->dm_segs[0].ds_addr;
881 :
882 : /*
883 : * Create DMA stuffs for TX buffers
884 : */
885 :
886 : /* Create DMA maps for Tx buffers. */
887 0 : for (i = 0; i < JME_TX_RING_CNT; i++) {
888 0 : txd = &sc->jme_cdata.jme_txdesc[i];
889 0 : error = bus_dmamap_create(sc->sc_dmat, JME_TSO_MAXSIZE,
890 : JME_MAXTXSEGS, JME_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
891 : &txd->tx_dmamap);
892 0 : if (error) {
893 : int j;
894 :
895 0 : printf("%s: could not create %dth Tx dmamap.\n",
896 0 : sc->sc_dev.dv_xname, i);
897 :
898 0 : for (j = 0; j < i; ++j) {
899 0 : txd = &sc->jme_cdata.jme_txdesc[j];
900 0 : bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
901 : }
902 : return error;
903 : }
904 :
905 : }
906 :
907 : /*
908 : * Create DMA stuffs for RX buffers
909 : */
910 :
911 : /* Create DMA maps for Rx buffers. */
912 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
913 : 0, BUS_DMA_NOWAIT, &sc->jme_cdata.jme_rx_sparemap);
914 0 : if (error) {
915 0 : printf("%s: could not create spare Rx dmamap.\n",
916 0 : sc->sc_dev.dv_xname);
917 0 : return error;
918 : }
919 0 : for (i = 0; i < JME_RX_RING_CNT; i++) {
920 0 : rxd = &sc->jme_cdata.jme_rxdesc[i];
921 0 : error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
922 : 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
923 0 : if (error) {
924 : int j;
925 :
926 0 : printf("%s: could not create %dth Rx dmamap.\n",
927 0 : sc->sc_dev.dv_xname, i);
928 :
929 0 : for (j = 0; j < i; ++j) {
930 0 : rxd = &sc->jme_cdata.jme_rxdesc[j];
931 0 : bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
932 : }
933 0 : bus_dmamap_destroy(sc->sc_dmat,
934 : sc->jme_cdata.jme_rx_sparemap);
935 0 : sc->jme_cdata.jme_rx_tag = NULL;
936 : return error;
937 : }
938 : }
939 :
940 0 : return 0;
941 0 : }
942 :
943 : void
944 0 : jme_dma_free(struct jme_softc *sc)
945 : {
946 : struct jme_txdesc *txd;
947 : struct jme_rxdesc *rxd;
948 : int i;
949 :
950 : /* Tx ring */
951 0 : bus_dmamap_unload(sc->sc_dmat,
952 : sc->jme_cdata.jme_tx_ring_map);
953 0 : bus_dmamem_free(sc->sc_dmat,
954 : (bus_dma_segment_t *)sc->jme_rdata.jme_tx_ring, 1);
955 :
956 : /* Rx ring */
957 0 : bus_dmamap_unload(sc->sc_dmat,
958 : sc->jme_cdata.jme_rx_ring_map);
959 0 : bus_dmamem_free(sc->sc_dmat,
960 : (bus_dma_segment_t *)sc->jme_rdata.jme_rx_ring, 1);
961 :
962 : /* Tx buffers */
963 0 : for (i = 0; i < JME_TX_RING_CNT; i++) {
964 0 : txd = &sc->jme_cdata.jme_txdesc[i];
965 0 : bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
966 : }
967 :
968 : /* Rx buffers */
969 0 : for (i = 0; i < JME_RX_RING_CNT; i++) {
970 0 : rxd = &sc->jme_cdata.jme_rxdesc[i];
971 0 : bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
972 : }
973 0 : bus_dmamap_destroy(sc->sc_dmat,
974 : sc->jme_cdata.jme_rx_sparemap);
975 :
976 : /* Shadow status block. */
977 0 : bus_dmamap_unload(sc->sc_dmat,
978 : sc->jme_cdata.jme_ssb_map);
979 0 : bus_dmamem_free(sc->sc_dmat,
980 : (bus_dma_segment_t *)sc->jme_rdata.jme_ssb_block, 1);
981 0 : }
982 :
983 : #ifdef notyet
984 : /*
985 : * Unlike other ethernet controllers, JMC250 requires
986 : * explicit resetting link speed to 10/100Mbps as gigabit
987 : * link will cunsume more power than 375mA.
988 : * Note, we reset the link speed to 10/100Mbps with
989 : * auto-negotiation but we don't know whether that operation
990 : * would succeed or not as we have no control after powering
991 : * off. If the renegotiation fail WOL may not work. Running
992 : * at 1Gbps draws more power than 375mA at 3.3V which is
993 : * specified in PCI specification and that would result in
994 : * complete shutdowning power to ethernet controller.
995 : *
996 : * TODO
997 : * Save current negotiated media speed/duplex/flow-control
998 : * to softc and restore the same link again after resuming.
999 : * PHY handling such as power down/resetting to 100Mbps
1000 : * may be better handled in suspend method in phy driver.
1001 : */
1002 : void
1003 : jme_setlinkspeed(struct jme_softc *sc)
1004 : {
1005 : struct mii_data *mii;
1006 : int aneg, i;
1007 :
1008 : JME_LOCK_ASSERT(sc);
1009 :
1010 : mii = &sc->sc_miibus;
1011 : mii_pollstat(mii);
1012 : aneg = 0;
1013 : if ((mii->mii_media_status & IFM_AVALID) != 0) {
1014 : switch IFM_SUBTYPE(mii->mii_media_active) {
1015 : case IFM_10_T:
1016 : case IFM_100_TX:
1017 : return;
1018 : case IFM_1000_T:
1019 : aneg++;
1020 : default:
1021 : break;
1022 : }
1023 : }
1024 : jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1025 : jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_ANAR,
1026 : ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1027 : jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr, MII_BMCR,
1028 : BMCR_AUTOEN | BMCR_STARTNEG);
1029 : DELAY(1000);
1030 : if (aneg != 0) {
1031 : /* Poll link state until jme(4) get a 10/100 link. */
1032 : for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1033 : mii_pollstat(mii);
1034 : if ((mii->mii_media_status & IFM_AVALID) != 0) {
1035 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
1036 : case IFM_10_T:
1037 : case IFM_100_TX:
1038 : jme_mac_config(sc);
1039 : return;
1040 : default:
1041 : break;
1042 : }
1043 : }
1044 : JME_UNLOCK(sc);
1045 : pause("jmelnk", hz);
1046 : JME_LOCK(sc);
1047 : }
1048 : if (i == MII_ANEGTICKS_GIGE)
1049 : printf("%s: establishing link failed, "
1050 : "WOL may not work!\n", sc->sc_dev.dv_xname);
1051 : }
1052 : /*
1053 : * No link, force MAC to have 100Mbps, full-duplex link.
1054 : * This is the last resort and may/may not work.
1055 : */
1056 : mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1057 : mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1058 : jme_mac_config(sc);
1059 : }
1060 :
1061 : void
1062 : jme_setwol(struct jme_softc *sc)
1063 : {
1064 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1065 : uint32_t gpr, pmcs;
1066 : uint16_t pmstat;
1067 : int pmc;
1068 :
1069 : if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) != 0) {
1070 : /* No PME capability, PHY power down. */
1071 : jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1072 : MII_BMCR, BMCR_PDOWN);
1073 : return;
1074 : }
1075 :
1076 : gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1077 : pmcs = CSR_READ_4(sc, JME_PMCS);
1078 : pmcs &= ~PMCS_WOL_ENB_MASK;
1079 : if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1080 : pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1081 : /* Enable PME message. */
1082 : gpr |= GPREG0_PME_ENB;
1083 : /* For gigabit controllers, reset link speed to 10/100. */
1084 : if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1085 : jme_setlinkspeed(sc);
1086 : }
1087 :
1088 : CSR_WRITE_4(sc, JME_PMCS, pmcs);
1089 : CSR_WRITE_4(sc, JME_GPREG0, gpr);
1090 :
1091 : /* Request PME. */
1092 : pmstat = pci_read_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, 2);
1093 : pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1094 : if ((ifp->if_capenable & IFCAP_WOL) != 0)
1095 : pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1096 : pci_write_config(sc->sc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1097 : if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1098 : /* No WOL, PHY power down. */
1099 : jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1100 : MII_BMCR, BMCR_PDOWN);
1101 : }
1102 : }
1103 : #endif
1104 :
1105 : int
1106 0 : jme_encap(struct jme_softc *sc, struct mbuf *m)
1107 : {
1108 : struct jme_txdesc *txd;
1109 : struct jme_desc *desc;
1110 : int error, i, prod;
1111 : uint32_t cflags;
1112 :
1113 0 : prod = sc->jme_cdata.jme_tx_prod;
1114 0 : txd = &sc->jme_cdata.jme_txdesc[prod];
1115 :
1116 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1117 : m, BUS_DMA_NOWAIT);
1118 0 : if (error != 0 && error != EFBIG)
1119 : goto drop;
1120 0 : if (error != 0) {
1121 0 : if (m_defrag(m, M_DONTWAIT)) {
1122 : error = ENOBUFS;
1123 0 : goto drop;
1124 : }
1125 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
1126 : m, BUS_DMA_NOWAIT);
1127 0 : if (error != 0)
1128 : goto drop;
1129 : }
1130 :
1131 : cflags = 0;
1132 :
1133 : /* Configure checksum offload. */
1134 0 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1135 0 : cflags |= JME_TD_IPCSUM;
1136 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1137 0 : cflags |= JME_TD_TCPCSUM;
1138 0 : if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1139 0 : cflags |= JME_TD_UDPCSUM;
1140 :
1141 : #if NVLAN > 0
1142 : /* Configure VLAN. */
1143 0 : if (m->m_flags & M_VLANTAG) {
1144 0 : cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1145 0 : cflags |= JME_TD_VLAN_TAG;
1146 0 : }
1147 : #endif
1148 :
1149 0 : desc = &sc->jme_rdata.jme_tx_ring[prod];
1150 0 : desc->flags = htole32(cflags);
1151 0 : desc->buflen = 0;
1152 0 : desc->addr_hi = htole32(m->m_pkthdr.len);
1153 0 : desc->addr_lo = 0;
1154 0 : sc->jme_cdata.jme_tx_cnt++;
1155 0 : JME_DESC_INC(prod, JME_TX_RING_CNT);
1156 0 : for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
1157 0 : desc = &sc->jme_rdata.jme_tx_ring[prod];
1158 0 : desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1159 0 : desc->buflen = htole32(txd->tx_dmamap->dm_segs[i].ds_len);
1160 0 : desc->addr_hi =
1161 0 : htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
1162 0 : desc->addr_lo =
1163 0 : htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
1164 0 : sc->jme_cdata.jme_tx_cnt++;
1165 0 : JME_DESC_INC(prod, JME_TX_RING_CNT);
1166 : }
1167 :
1168 : /* Update producer index. */
1169 0 : sc->jme_cdata.jme_tx_prod = prod;
1170 : /*
1171 : * Finally request interrupt and give the first descriptor
1172 : * owenership to hardware.
1173 : */
1174 0 : desc = txd->tx_desc;
1175 0 : desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1176 :
1177 0 : txd->tx_m = m;
1178 0 : txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD;
1179 :
1180 : /* Sync descriptors. */
1181 0 : bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1182 : txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1183 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1184 : sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1185 :
1186 0 : return (0);
1187 :
1188 : drop:
1189 0 : m_freem(m);
1190 0 : return (error);
1191 0 : }
1192 :
1193 : void
1194 0 : jme_start(struct ifnet *ifp)
1195 : {
1196 0 : struct jme_softc *sc = ifp->if_softc;
1197 : struct mbuf *m;
1198 : int enq = 0;
1199 :
1200 : /* Reclaim transmitted frames. */
1201 0 : if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1202 0 : jme_txeof(sc);
1203 :
1204 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1205 0 : return;
1206 0 : if ((sc->jme_flags & JME_FLAG_LINK) == 0)
1207 0 : return;
1208 0 : if (IFQ_IS_EMPTY(&ifp->if_snd))
1209 0 : return;
1210 :
1211 0 : for (;;) {
1212 : /*
1213 : * Check number of available TX descs, always
1214 : * leave JME_TXD_RSVD free TX descs.
1215 : */
1216 0 : if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD >
1217 : JME_TX_RING_CNT - JME_TXD_RSVD) {
1218 0 : ifq_set_oactive(&ifp->if_snd);
1219 0 : break;
1220 : }
1221 :
1222 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1223 0 : if (m == NULL)
1224 : break;
1225 :
1226 : /*
1227 : * Pack the data into the transmit ring. If we
1228 : * don't have room, set the OACTIVE flag and wait
1229 : * for the NIC to drain the ring.
1230 : */
1231 0 : if (jme_encap(sc, m) != 0) {
1232 0 : ifp->if_oerrors++;
1233 0 : continue;
1234 : }
1235 :
1236 0 : enq++;
1237 :
1238 : #if NBPFILTER > 0
1239 : /*
1240 : * If there's a BPF listener, bounce a copy of this frame
1241 : * to him.
1242 : */
1243 0 : if (ifp->if_bpf != NULL)
1244 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1245 : #endif
1246 : }
1247 :
1248 0 : if (enq > 0) {
1249 : /*
1250 : * Reading TXCSR takes very long time under heavy load
1251 : * so cache TXCSR value and writes the ORed value with
1252 : * the kick command to the TXCSR. This saves one register
1253 : * access cycle.
1254 : */
1255 0 : CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1256 : TXCSR_TXQ_N_START(TXCSR_TXQ0));
1257 : /* Set a timeout in case the chip goes out to lunch. */
1258 0 : ifp->if_timer = JME_TX_TIMEOUT;
1259 0 : }
1260 0 : }
1261 :
1262 : void
1263 0 : jme_watchdog(struct ifnet *ifp)
1264 : {
1265 0 : struct jme_softc *sc = ifp->if_softc;
1266 :
1267 0 : if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1268 0 : printf("%s: watchdog timeout (missed link)\n",
1269 0 : sc->sc_dev.dv_xname);
1270 0 : ifp->if_oerrors++;
1271 0 : jme_init(ifp);
1272 0 : return;
1273 : }
1274 :
1275 0 : jme_txeof(sc);
1276 0 : if (sc->jme_cdata.jme_tx_cnt == 0) {
1277 0 : printf("%s: watchdog timeout (missed Tx interrupts) "
1278 : "-- recovering\n", sc->sc_dev.dv_xname);
1279 0 : jme_start(ifp);
1280 0 : return;
1281 : }
1282 :
1283 0 : printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1284 0 : ifp->if_oerrors++;
1285 0 : jme_init(ifp);
1286 0 : jme_start(ifp);
1287 0 : }
1288 :
1289 : int
1290 0 : jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1291 : {
1292 0 : struct jme_softc *sc = ifp->if_softc;
1293 0 : struct mii_data *mii = &sc->sc_miibus;
1294 0 : struct ifreq *ifr = (struct ifreq *)data;
1295 : int error = 0, s;
1296 :
1297 0 : s = splnet();
1298 :
1299 0 : switch (cmd) {
1300 : case SIOCSIFADDR:
1301 0 : ifp->if_flags |= IFF_UP;
1302 0 : if (!(ifp->if_flags & IFF_RUNNING))
1303 0 : jme_init(ifp);
1304 : break;
1305 :
1306 : case SIOCSIFFLAGS:
1307 0 : if (ifp->if_flags & IFF_UP) {
1308 0 : if (ifp->if_flags & IFF_RUNNING)
1309 0 : error = ENETRESET;
1310 : else
1311 0 : jme_init(ifp);
1312 : } else {
1313 0 : if (ifp->if_flags & IFF_RUNNING)
1314 0 : jme_stop(sc);
1315 : }
1316 : break;
1317 :
1318 : case SIOCSIFMEDIA:
1319 : case SIOCGIFMEDIA:
1320 0 : error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1321 0 : break;
1322 :
1323 : default:
1324 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1325 0 : }
1326 :
1327 0 : if (error == ENETRESET) {
1328 0 : if (ifp->if_flags & IFF_RUNNING)
1329 0 : jme_iff(sc);
1330 : error = 0;
1331 0 : }
1332 :
1333 0 : splx(s);
1334 0 : return (error);
1335 : }
1336 :
1337 : void
1338 0 : jme_mac_config(struct jme_softc *sc)
1339 : {
1340 : struct mii_data *mii;
1341 : uint32_t ghc, rxmac, txmac, txpause, gp1;
1342 : int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1343 :
1344 0 : mii = &sc->sc_miibus;
1345 :
1346 0 : CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1347 0 : DELAY(10);
1348 0 : CSR_WRITE_4(sc, JME_GHC, 0);
1349 : ghc = 0;
1350 0 : rxmac = CSR_READ_4(sc, JME_RXMAC);
1351 0 : rxmac &= ~RXMAC_FC_ENB;
1352 0 : txmac = CSR_READ_4(sc, JME_TXMAC);
1353 0 : txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1354 0 : txpause = CSR_READ_4(sc, JME_TXPFC);
1355 0 : txpause &= ~TXPFC_PAUSE_ENB;
1356 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1357 : ghc |= GHC_FULL_DUPLEX;
1358 0 : rxmac &= ~RXMAC_COLL_DET_ENB;
1359 0 : txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1360 : TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1361 : TXMAC_FRAME_BURST);
1362 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1363 0 : txpause |= TXPFC_PAUSE_ENB;
1364 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1365 0 : rxmac |= RXMAC_FC_ENB;
1366 : /* Disable retry transmit timer/retry limit. */
1367 0 : CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1368 : ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1369 0 : } else {
1370 0 : rxmac |= RXMAC_COLL_DET_ENB;
1371 0 : txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1372 : /* Enable retry transmit timer/retry limit. */
1373 0 : CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1374 : TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1375 : }
1376 :
1377 : /*
1378 : * Reprogram Tx/Rx MACs with resolved speed/duplex.
1379 : */
1380 0 : gp1 = CSR_READ_4(sc, JME_GPREG1);
1381 0 : gp1 &= ~GPREG1_HALF_PATCH;
1382 :
1383 0 : if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1384 0 : hdx = 1;
1385 :
1386 0 : switch (IFM_SUBTYPE(mii->mii_media_active)) {
1387 : case IFM_10_T:
1388 0 : ghc |= GHC_SPEED_10;
1389 0 : if (hdx)
1390 0 : gp1 |= GPREG1_HALF_PATCH;
1391 : break;
1392 :
1393 : case IFM_100_TX:
1394 0 : ghc |= GHC_SPEED_100;
1395 0 : if (hdx)
1396 0 : gp1 |= GPREG1_HALF_PATCH;
1397 :
1398 : /*
1399 : * Use extended FIFO depth to workaround CRC errors
1400 : * emitted by chips before JMC250B
1401 : */
1402 : phyconf = JMPHY_CONF_EXTFIFO;
1403 0 : break;
1404 :
1405 : case IFM_1000_T:
1406 0 : if (sc->jme_caps & JME_CAP_FASTETH)
1407 : break;
1408 :
1409 0 : ghc |= GHC_SPEED_1000;
1410 0 : if (hdx)
1411 0 : txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1412 : break;
1413 :
1414 : default:
1415 : break;
1416 : }
1417 :
1418 0 : if (sc->jme_revfm >= 2) {
1419 : /* set clock sources for tx mac and offload engine */
1420 0 : if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1421 0 : ghc |= GHC_TCPCK_1000 | GHC_TXCK_1000;
1422 : else
1423 0 : ghc |= GHC_TCPCK_10_100 | GHC_TXCK_10_100;
1424 : }
1425 :
1426 0 : CSR_WRITE_4(sc, JME_GHC, ghc);
1427 0 : CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1428 0 : CSR_WRITE_4(sc, JME_TXMAC, txmac);
1429 0 : CSR_WRITE_4(sc, JME_TXPFC, txpause);
1430 :
1431 0 : if (sc->jme_workaround & JME_WA_CRCERRORS) {
1432 0 : jme_miibus_writereg(&sc->sc_dev, sc->jme_phyaddr,
1433 : JMPHY_CONF, phyconf);
1434 0 : }
1435 0 : if (sc->jme_workaround & JME_WA_PACKETLOSS)
1436 0 : CSR_WRITE_4(sc, JME_GPREG1, gp1);
1437 0 : }
1438 :
1439 : int
1440 0 : jme_intr(void *xsc)
1441 : {
1442 0 : struct jme_softc *sc = xsc;
1443 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1444 : uint32_t status;
1445 : int claimed = 0;
1446 :
1447 0 : status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1448 0 : if (status == 0 || status == 0xFFFFFFFF)
1449 0 : return (0);
1450 :
1451 : /* Disable interrupts. */
1452 0 : CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1453 :
1454 0 : status = CSR_READ_4(sc, JME_INTR_STATUS);
1455 0 : if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1456 : goto back;
1457 :
1458 : /* Reset PCC counter/timer and Ack interrupts. */
1459 0 : status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1460 0 : if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1461 0 : status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1462 0 : if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1463 0 : status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1464 0 : CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1465 :
1466 0 : if (ifp->if_flags & IFF_RUNNING) {
1467 0 : if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1468 0 : jme_rxeof(sc);
1469 :
1470 0 : if (status & INTR_RXQ_DESC_EMPTY) {
1471 : /*
1472 : * Notify hardware availability of new Rx buffers.
1473 : * Reading RXCSR takes very long time under heavy
1474 : * load so cache RXCSR value and writes the ORed
1475 : * value with the kick command to the RXCSR. This
1476 : * saves one register access cycle.
1477 : */
1478 0 : CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1479 : RXCSR_RX_ENB | RXCSR_RXQ_START);
1480 0 : }
1481 :
1482 0 : if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1483 0 : jme_txeof(sc);
1484 0 : jme_start(ifp);
1485 0 : }
1486 : }
1487 0 : claimed = 1;
1488 : back:
1489 : /* Reenable interrupts. */
1490 0 : CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1491 :
1492 0 : return (claimed);
1493 0 : }
1494 :
1495 : void
1496 0 : jme_txeof(struct jme_softc *sc)
1497 : {
1498 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1499 : struct jme_txdesc *txd;
1500 : uint32_t status;
1501 : int cons, nsegs;
1502 :
1503 0 : cons = sc->jme_cdata.jme_tx_cons;
1504 0 : if (cons == sc->jme_cdata.jme_tx_prod)
1505 0 : return;
1506 :
1507 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1508 : sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1509 :
1510 : /*
1511 : * Go through our Tx list and free mbufs for those
1512 : * frames which have been transmitted.
1513 : */
1514 0 : while (cons != sc->jme_cdata.jme_tx_prod) {
1515 0 : txd = &sc->jme_cdata.jme_txdesc[cons];
1516 :
1517 0 : if (txd->tx_m == NULL)
1518 0 : panic("%s: freeing NULL mbuf!", sc->sc_dev.dv_xname);
1519 :
1520 0 : status = letoh32(txd->tx_desc->flags);
1521 0 : if ((status & JME_TD_OWN) == JME_TD_OWN)
1522 : break;
1523 :
1524 0 : if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1525 0 : ifp->if_oerrors++;
1526 0 : } else {
1527 0 : if (status & JME_TD_COLLISION) {
1528 0 : ifp->if_collisions +=
1529 0 : letoh32(txd->tx_desc->buflen) &
1530 : JME_TD_BUF_LEN_MASK;
1531 0 : }
1532 : }
1533 :
1534 : /*
1535 : * Only the first descriptor of multi-descriptor
1536 : * transmission is updated so driver have to skip entire
1537 : * chained buffers for the transmiited frame. In other
1538 : * words, JME_TD_OWN bit is valid only at the first
1539 : * descriptor of a multi-descriptor transmission.
1540 : */
1541 0 : for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1542 0 : sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1543 0 : JME_DESC_INC(cons, JME_TX_RING_CNT);
1544 : }
1545 :
1546 : /* Reclaim transferred mbufs. */
1547 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1548 0 : m_freem(txd->tx_m);
1549 0 : txd->tx_m = NULL;
1550 0 : sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1551 0 : if (sc->jme_cdata.jme_tx_cnt < 0)
1552 0 : panic("%s: Active Tx desc counter was garbled",
1553 0 : sc->sc_dev.dv_xname);
1554 0 : txd->tx_ndesc = 0;
1555 : }
1556 0 : sc->jme_cdata.jme_tx_cons = cons;
1557 :
1558 0 : if (sc->jme_cdata.jme_tx_cnt == 0)
1559 0 : ifp->if_timer = 0;
1560 :
1561 0 : if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <=
1562 : JME_TX_RING_CNT - JME_TXD_RSVD)
1563 0 : ifq_clr_oactive(&ifp->if_snd);
1564 :
1565 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
1566 : sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1567 0 : }
1568 :
1569 : void
1570 0 : jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
1571 : {
1572 : int i;
1573 :
1574 0 : for (i = 0; i < count; ++i) {
1575 0 : struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
1576 :
1577 0 : desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1578 0 : desc->buflen = htole32(MCLBYTES);
1579 0 : JME_DESC_INC(cons, JME_RX_RING_CNT);
1580 : }
1581 0 : }
1582 :
1583 : /* Receive a frame. */
1584 : void
1585 0 : jme_rxpkt(struct jme_softc *sc)
1586 : {
1587 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1588 : struct jme_desc *desc;
1589 : struct jme_rxdesc *rxd;
1590 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1591 : struct mbuf *mp, *m;
1592 : uint32_t flags, status;
1593 : int cons, count, nsegs;
1594 :
1595 0 : cons = sc->jme_cdata.jme_rx_cons;
1596 0 : desc = &sc->jme_rdata.jme_rx_ring[cons];
1597 0 : flags = letoh32(desc->flags);
1598 0 : status = letoh32(desc->buflen);
1599 0 : nsegs = JME_RX_NSEGS(status);
1600 :
1601 0 : if (status & JME_RX_ERR_STAT) {
1602 0 : ifp->if_ierrors++;
1603 0 : jme_discard_rxbufs(sc, cons, nsegs);
1604 : #ifdef JME_SHOW_ERRORS
1605 : printf("%s : receive error = 0x%b\n",
1606 : sc->sc_dev.dv_xname, JME_RX_ERR(status), JME_RX_ERR_BITS);
1607 : #endif
1608 0 : sc->jme_cdata.jme_rx_cons += nsegs;
1609 0 : sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1610 0 : return;
1611 : }
1612 :
1613 0 : sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
1614 0 : for (count = 0; count < nsegs; count++,
1615 0 : JME_DESC_INC(cons, JME_RX_RING_CNT)) {
1616 0 : rxd = &sc->jme_cdata.jme_rxdesc[cons];
1617 0 : mp = rxd->rx_m;
1618 :
1619 : /* Add a new receive buffer to the ring. */
1620 0 : if (jme_newbuf(sc, rxd) != 0) {
1621 0 : ifp->if_iqdrops++;
1622 : /* Reuse buffer. */
1623 0 : jme_discard_rxbufs(sc, cons, nsegs - count);
1624 0 : if (sc->jme_cdata.jme_rxhead != NULL) {
1625 0 : m_freem(sc->jme_cdata.jme_rxhead);
1626 0 : JME_RXCHAIN_RESET(sc);
1627 0 : }
1628 : break;
1629 : }
1630 :
1631 : /*
1632 : * Assume we've received a full sized frame.
1633 : * Actual size is fixed when we encounter the end of
1634 : * multi-segmented frame.
1635 : */
1636 0 : mp->m_len = MCLBYTES;
1637 :
1638 : /* Chain received mbufs. */
1639 0 : if (sc->jme_cdata.jme_rxhead == NULL) {
1640 0 : sc->jme_cdata.jme_rxhead = mp;
1641 0 : sc->jme_cdata.jme_rxtail = mp;
1642 0 : } else {
1643 : /*
1644 : * Receive processor can receive a maximum frame
1645 : * size of 65535 bytes.
1646 : */
1647 0 : mp->m_flags &= ~M_PKTHDR;
1648 0 : sc->jme_cdata.jme_rxtail->m_next = mp;
1649 0 : sc->jme_cdata.jme_rxtail = mp;
1650 : }
1651 :
1652 0 : if (count == nsegs - 1) {
1653 : /* Last desc. for this frame. */
1654 0 : m = sc->jme_cdata.jme_rxhead;
1655 : /* XXX assert PKTHDR? */
1656 0 : m->m_flags |= M_PKTHDR;
1657 0 : m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
1658 0 : if (nsegs > 1) {
1659 : /* Set first mbuf size. */
1660 0 : m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
1661 : /* Set last mbuf size. */
1662 0 : mp->m_len = sc->jme_cdata.jme_rxlen -
1663 0 : ((MCLBYTES - JME_RX_PAD_BYTES) +
1664 0 : (MCLBYTES * (nsegs - 2)));
1665 0 : } else {
1666 0 : m->m_len = sc->jme_cdata.jme_rxlen;
1667 : }
1668 :
1669 : /*
1670 : * Account for 10bytes auto padding which is used
1671 : * to align IP header on 32bit boundary. Also note,
1672 : * CRC bytes is automatically removed by the
1673 : * hardware.
1674 : */
1675 0 : m->m_data += JME_RX_PAD_BYTES;
1676 :
1677 : /* Set checksum information. */
1678 0 : if (flags & (JME_RD_IPV4|JME_RD_IPV6)) {
1679 0 : if ((flags & JME_RD_IPV4) &&
1680 0 : (flags & JME_RD_IPCSUM))
1681 0 : m->m_pkthdr.csum_flags |=
1682 : M_IPV4_CSUM_IN_OK;
1683 0 : if ((flags & JME_RD_MORE_FRAG) == 0 &&
1684 0 : ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
1685 0 : (JME_RD_TCP | JME_RD_TCPCSUM) ||
1686 0 : (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
1687 : (JME_RD_UDP | JME_RD_UDPCSUM))) {
1688 0 : m->m_pkthdr.csum_flags |=
1689 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1690 0 : }
1691 : }
1692 :
1693 : #if NVLAN > 0
1694 : /* Check for VLAN tagged packets. */
1695 0 : if (flags & JME_RD_VLAN_TAG) {
1696 0 : m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK;
1697 0 : m->m_flags |= M_VLANTAG;
1698 0 : }
1699 : #endif
1700 :
1701 0 : ml_enqueue(&ml, m);
1702 :
1703 : /* Reset mbuf chains. */
1704 0 : JME_RXCHAIN_RESET(sc);
1705 0 : }
1706 : }
1707 :
1708 0 : if_input(ifp, &ml);
1709 :
1710 0 : sc->jme_cdata.jme_rx_cons += nsegs;
1711 0 : sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
1712 0 : }
1713 :
1714 : void
1715 0 : jme_rxeof(struct jme_softc *sc)
1716 : {
1717 : struct jme_desc *desc;
1718 : int nsegs, prog, pktlen;
1719 :
1720 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1721 : sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1722 :
1723 : prog = 0;
1724 0 : for (;;) {
1725 0 : desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
1726 0 : if ((letoh32(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
1727 : break;
1728 0 : if ((letoh32(desc->buflen) & JME_RD_VALID) == 0)
1729 : break;
1730 :
1731 : /*
1732 : * Check number of segments against received bytes.
1733 : * Non-matching value would indicate that hardware
1734 : * is still trying to update Rx descriptors. I'm not
1735 : * sure whether this check is needed.
1736 : */
1737 0 : nsegs = JME_RX_NSEGS(letoh32(desc->buflen));
1738 0 : pktlen = JME_RX_BYTES(letoh32(desc->buflen));
1739 0 : if (nsegs != howmany(pktlen, MCLBYTES)) {
1740 0 : printf("%s: RX fragment count(%d) "
1741 : "and packet size(%d) mismach\n",
1742 0 : sc->sc_dev.dv_xname, nsegs, pktlen);
1743 0 : break;
1744 : }
1745 :
1746 : /* Received a frame. */
1747 0 : jme_rxpkt(sc);
1748 0 : prog++;
1749 : }
1750 :
1751 0 : if (prog > 0) {
1752 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
1753 : sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1754 0 : }
1755 0 : }
1756 :
1757 : void
1758 0 : jme_tick(void *xsc)
1759 : {
1760 0 : struct jme_softc *sc = xsc;
1761 0 : struct mii_data *mii = &sc->sc_miibus;
1762 : int s;
1763 :
1764 0 : s = splnet();
1765 0 : mii_tick(mii);
1766 0 : timeout_add_sec(&sc->jme_tick_ch, 1);
1767 0 : splx(s);
1768 0 : }
1769 :
1770 : void
1771 0 : jme_reset(struct jme_softc *sc)
1772 : {
1773 : #ifdef foo
1774 : /* Stop receiver, transmitter. */
1775 : jme_stop_rx(sc);
1776 : jme_stop_tx(sc);
1777 : #endif
1778 0 : CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1779 0 : DELAY(10);
1780 0 : CSR_WRITE_4(sc, JME_GHC, 0);
1781 0 : }
1782 :
1783 : int
1784 0 : jme_init(struct ifnet *ifp)
1785 : {
1786 0 : struct jme_softc *sc = ifp->if_softc;
1787 : struct mii_data *mii;
1788 0 : uint8_t eaddr[ETHER_ADDR_LEN];
1789 : bus_addr_t paddr;
1790 : uint32_t reg;
1791 : int error;
1792 :
1793 : /*
1794 : * Cancel any pending I/O.
1795 : */
1796 0 : jme_stop(sc);
1797 :
1798 : /*
1799 : * Reset the chip to a known state.
1800 : */
1801 0 : jme_reset(sc);
1802 :
1803 : /* Init descriptors. */
1804 0 : error = jme_init_rx_ring(sc);
1805 0 : if (error != 0) {
1806 0 : printf("%s: initialization failed: no memory for Rx buffers.\n",
1807 0 : sc->sc_dev.dv_xname);
1808 0 : jme_stop(sc);
1809 0 : return (error);
1810 : }
1811 0 : jme_init_tx_ring(sc);
1812 :
1813 : /* Initialize shadow status block. */
1814 0 : jme_init_ssb(sc);
1815 :
1816 : /* Reprogram the station address. */
1817 0 : bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1818 0 : CSR_WRITE_4(sc, JME_PAR0,
1819 : eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
1820 0 : CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
1821 :
1822 : /*
1823 : * Configure Tx queue.
1824 : * Tx priority queue weight value : 0
1825 : * Tx FIFO threshold for processing next packet : 16QW
1826 : * Maximum Tx DMA length : 512
1827 : * Allow Tx DMA burst.
1828 : */
1829 0 : sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
1830 : sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
1831 0 : sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
1832 0 : sc->jme_txcsr |= sc->jme_tx_dma_size;
1833 0 : sc->jme_txcsr |= TXCSR_DMA_BURST;
1834 0 : CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
1835 :
1836 : /* Set Tx descriptor counter. */
1837 0 : CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
1838 :
1839 : /* Set Tx ring address to the hardware. */
1840 0 : paddr = JME_TX_RING_ADDR(sc, 0);
1841 0 : CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
1842 0 : CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
1843 :
1844 : /* Configure TxMAC parameters. */
1845 : reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
1846 : reg |= TXMAC_THRESH_1_PKT;
1847 : reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
1848 0 : CSR_WRITE_4(sc, JME_TXMAC, reg);
1849 :
1850 : /*
1851 : * Configure Rx queue.
1852 : * FIFO full threshold for transmitting Tx pause packet : 128T
1853 : * FIFO threshold for processing next packet : 128QW
1854 : * Rx queue 0 select
1855 : * Max Rx DMA length : 128
1856 : * Rx descriptor retry : 32
1857 : * Rx descriptor retry time gap : 256ns
1858 : * Don't receive runt/bad frame.
1859 : */
1860 0 : sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
1861 :
1862 : /*
1863 : * Since Rx FIFO size is 4K bytes, receiving frames larger
1864 : * than 4K bytes will suffer from Rx FIFO overruns. So
1865 : * decrease FIFO threshold to reduce the FIFO overruns for
1866 : * frames larger than 4000 bytes.
1867 : * For best performance of standard MTU sized frames use
1868 : * maximum allowable FIFO threshold, which is 32QW for
1869 : * chips with a full mask >= 2 otherwise 128QW. FIFO
1870 : * thresholds of 64QW and 128QW are not valid for chips
1871 : * with a full mask >= 2.
1872 : */
1873 0 : if (sc->jme_revfm >= 2)
1874 0 : sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1875 : else {
1876 0 : if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1877 0 : ETHER_VLAN_ENCAP_LEN) > JME_RX_FIFO_SIZE)
1878 0 : sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
1879 : else
1880 0 : sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
1881 : }
1882 0 : sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
1883 0 : sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
1884 0 : sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
1885 : /* XXX TODO DROP_BAD */
1886 0 : CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
1887 :
1888 : /* Set Rx descriptor counter. */
1889 0 : CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
1890 :
1891 : /* Set Rx ring address to the hardware. */
1892 0 : paddr = JME_RX_RING_ADDR(sc, 0);
1893 0 : CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
1894 0 : CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
1895 :
1896 : /* Clear receive filter. */
1897 0 : CSR_WRITE_4(sc, JME_RXMAC, 0);
1898 :
1899 : /* Set up the receive filter. */
1900 0 : jme_iff(sc);
1901 :
1902 0 : jme_set_vlan(sc);
1903 :
1904 : /*
1905 : * Disable all WOL bits as WOL can interfere normal Rx
1906 : * operation. Also clear WOL detection status bits.
1907 : */
1908 0 : reg = CSR_READ_4(sc, JME_PMCS);
1909 0 : reg &= ~PMCS_WOL_ENB_MASK;
1910 0 : CSR_WRITE_4(sc, JME_PMCS, reg);
1911 :
1912 : /*
1913 : * Pad 10bytes right before received frame. This will greatly
1914 : * help Rx performance on strict-alignment architectures as
1915 : * it does not need to copy the frame to align the payload.
1916 : */
1917 0 : reg = CSR_READ_4(sc, JME_RXMAC);
1918 0 : reg |= RXMAC_PAD_10BYTES;
1919 0 : reg |= RXMAC_CSUM_ENB;
1920 0 : CSR_WRITE_4(sc, JME_RXMAC, reg);
1921 :
1922 : /* Configure general purpose reg0 */
1923 0 : reg = CSR_READ_4(sc, JME_GPREG0);
1924 0 : reg &= ~GPREG0_PCC_UNIT_MASK;
1925 : /* Set PCC timer resolution to micro-seconds unit. */
1926 0 : reg |= GPREG0_PCC_UNIT_US;
1927 : /*
1928 : * Disable all shadow register posting as we have to read
1929 : * JME_INTR_STATUS register in jme_intr. Also it seems
1930 : * that it's hard to synchronize interrupt status between
1931 : * hardware and software with shadow posting due to
1932 : * requirements of bus_dmamap_sync(9).
1933 : */
1934 0 : reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
1935 : GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
1936 : GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
1937 : GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
1938 : /* Disable posting of DW0. */
1939 0 : reg &= ~GPREG0_POST_DW0_ENB;
1940 : /* Clear PME message. */
1941 0 : reg &= ~GPREG0_PME_ENB;
1942 : /* Set PHY address. */
1943 0 : reg &= ~GPREG0_PHY_ADDR_MASK;
1944 0 : reg |= sc->jme_phyaddr;
1945 0 : CSR_WRITE_4(sc, JME_GPREG0, reg);
1946 :
1947 : /* Configure Tx queue 0 packet completion coalescing. */
1948 0 : sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1949 : reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
1950 : PCCTX_COAL_TO_MASK;
1951 0 : sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1952 : reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
1953 : PCCTX_COAL_PKT_MASK;
1954 : reg |= PCCTX_COAL_TXQ0;
1955 0 : CSR_WRITE_4(sc, JME_PCCTX, reg);
1956 :
1957 : /* Configure Rx queue 0 packet completion coalescing. */
1958 0 : sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1959 : reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
1960 : PCCRX_COAL_TO_MASK;
1961 0 : sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1962 : reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
1963 : PCCRX_COAL_PKT_MASK;
1964 0 : CSR_WRITE_4(sc, JME_PCCRX0, reg);
1965 :
1966 : /* Configure shadow status block but don't enable posting. */
1967 0 : paddr = sc->jme_rdata.jme_ssb_block_paddr;
1968 0 : CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
1969 0 : CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
1970 :
1971 : /* Disable Timer 1 and Timer 2. */
1972 0 : CSR_WRITE_4(sc, JME_TIMER1, 0);
1973 0 : CSR_WRITE_4(sc, JME_TIMER2, 0);
1974 :
1975 : /* Configure retry transmit period, retry limit value. */
1976 0 : CSR_WRITE_4(sc, JME_TXTRHD,
1977 : ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
1978 : TXTRHD_RT_PERIOD_MASK) |
1979 : ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
1980 : TXTRHD_RT_LIMIT_SHIFT));
1981 :
1982 : /* Disable RSS. */
1983 0 : CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
1984 :
1985 : /* Initialize the interrupt mask. */
1986 0 : CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1987 0 : CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
1988 :
1989 : /*
1990 : * Enabling Tx/Rx DMA engines and Rx queue processing is
1991 : * done after detection of valid link in jme_miibus_statchg.
1992 : */
1993 0 : sc->jme_flags &= ~JME_FLAG_LINK;
1994 :
1995 : /* Set the current media. */
1996 0 : mii = &sc->sc_miibus;
1997 0 : mii_mediachg(mii);
1998 :
1999 0 : timeout_add_sec(&sc->jme_tick_ch, 1);
2000 :
2001 0 : ifp->if_flags |= IFF_RUNNING;
2002 0 : ifq_clr_oactive(&ifp->if_snd);
2003 :
2004 0 : return (0);
2005 0 : }
2006 :
2007 : void
2008 0 : jme_stop(struct jme_softc *sc)
2009 : {
2010 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2011 : struct jme_txdesc *txd;
2012 : struct jme_rxdesc *rxd;
2013 : int i;
2014 :
2015 : /*
2016 : * Mark the interface down and cancel the watchdog timer.
2017 : */
2018 0 : ifp->if_flags &= ~IFF_RUNNING;
2019 0 : ifq_clr_oactive(&ifp->if_snd);
2020 0 : ifp->if_timer = 0;
2021 :
2022 0 : timeout_del(&sc->jme_tick_ch);
2023 0 : sc->jme_flags &= ~JME_FLAG_LINK;
2024 :
2025 : /*
2026 : * Disable interrupts.
2027 : */
2028 0 : CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2029 0 : CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2030 :
2031 : /* Disable updating shadow status block. */
2032 0 : CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2033 : CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2034 :
2035 : /* Stop receiver, transmitter. */
2036 0 : jme_stop_rx(sc);
2037 0 : jme_stop_tx(sc);
2038 :
2039 : #ifdef foo
2040 : /* Reclaim Rx/Tx buffers that have been completed. */
2041 : jme_rxeof(sc);
2042 : m_freem(sc->jme_cdata.jme_rxhead);
2043 : JME_RXCHAIN_RESET(sc);
2044 : jme_txeof(sc);
2045 : #endif
2046 :
2047 : /*
2048 : * Free partial finished RX segments
2049 : */
2050 0 : m_freem(sc->jme_cdata.jme_rxhead);
2051 0 : JME_RXCHAIN_RESET(sc);
2052 :
2053 : /*
2054 : * Free RX and TX mbufs still in the queues.
2055 : */
2056 0 : for (i = 0; i < JME_RX_RING_CNT; i++) {
2057 0 : rxd = &sc->jme_cdata.jme_rxdesc[i];
2058 0 : if (rxd->rx_m != NULL) {
2059 0 : bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2060 0 : m_freem(rxd->rx_m);
2061 0 : rxd->rx_m = NULL;
2062 0 : }
2063 : }
2064 0 : for (i = 0; i < JME_TX_RING_CNT; i++) {
2065 0 : txd = &sc->jme_cdata.jme_txdesc[i];
2066 0 : if (txd->tx_m != NULL) {
2067 0 : bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2068 0 : m_freem(txd->tx_m);
2069 0 : txd->tx_m = NULL;
2070 0 : txd->tx_ndesc = 0;
2071 0 : }
2072 : }
2073 0 : }
2074 :
2075 : void
2076 0 : jme_stop_tx(struct jme_softc *sc)
2077 : {
2078 : uint32_t reg;
2079 : int i;
2080 :
2081 0 : reg = CSR_READ_4(sc, JME_TXCSR);
2082 0 : if ((reg & TXCSR_TX_ENB) == 0)
2083 0 : return;
2084 0 : reg &= ~TXCSR_TX_ENB;
2085 0 : CSR_WRITE_4(sc, JME_TXCSR, reg);
2086 0 : for (i = JME_TIMEOUT; i > 0; i--) {
2087 0 : DELAY(1);
2088 0 : if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2089 : break;
2090 : }
2091 0 : if (i == 0)
2092 0 : printf("%s: stopping transmitter timeout!\n",
2093 0 : sc->sc_dev.dv_xname);
2094 0 : }
2095 :
2096 : void
2097 0 : jme_stop_rx(struct jme_softc *sc)
2098 : {
2099 : uint32_t reg;
2100 : int i;
2101 :
2102 0 : reg = CSR_READ_4(sc, JME_RXCSR);
2103 0 : if ((reg & RXCSR_RX_ENB) == 0)
2104 0 : return;
2105 0 : reg &= ~RXCSR_RX_ENB;
2106 0 : CSR_WRITE_4(sc, JME_RXCSR, reg);
2107 0 : for (i = JME_TIMEOUT; i > 0; i--) {
2108 0 : DELAY(1);
2109 0 : if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2110 : break;
2111 : }
2112 0 : if (i == 0)
2113 0 : printf("%s: stopping recevier timeout!\n", sc->sc_dev.dv_xname);
2114 0 : }
2115 :
2116 : void
2117 0 : jme_init_tx_ring(struct jme_softc *sc)
2118 : {
2119 : struct jme_ring_data *rd;
2120 : struct jme_txdesc *txd;
2121 : int i;
2122 :
2123 0 : sc->jme_cdata.jme_tx_prod = 0;
2124 0 : sc->jme_cdata.jme_tx_cons = 0;
2125 0 : sc->jme_cdata.jme_tx_cnt = 0;
2126 :
2127 0 : rd = &sc->jme_rdata;
2128 0 : bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2129 0 : for (i = 0; i < JME_TX_RING_CNT; i++) {
2130 0 : txd = &sc->jme_cdata.jme_txdesc[i];
2131 0 : txd->tx_m = NULL;
2132 0 : txd->tx_desc = &rd->jme_tx_ring[i];
2133 0 : txd->tx_ndesc = 0;
2134 : }
2135 :
2136 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_tx_ring_map, 0,
2137 : sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2138 0 : }
2139 :
2140 : void
2141 0 : jme_init_ssb(struct jme_softc *sc)
2142 : {
2143 : struct jme_ring_data *rd;
2144 :
2145 0 : rd = &sc->jme_rdata;
2146 0 : bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2147 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_ssb_map, 0,
2148 : sc->jme_cdata.jme_ssb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2149 0 : }
2150 :
2151 : int
2152 0 : jme_init_rx_ring(struct jme_softc *sc)
2153 : {
2154 : struct jme_ring_data *rd;
2155 : struct jme_rxdesc *rxd;
2156 : int i;
2157 :
2158 0 : KASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2159 : sc->jme_cdata.jme_rxtail == NULL &&
2160 : sc->jme_cdata.jme_rxlen == 0);
2161 0 : sc->jme_cdata.jme_rx_cons = 0;
2162 :
2163 0 : rd = &sc->jme_rdata;
2164 0 : bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2165 0 : for (i = 0; i < JME_RX_RING_CNT; i++) {
2166 : int error;
2167 :
2168 0 : rxd = &sc->jme_cdata.jme_rxdesc[i];
2169 0 : rxd->rx_m = NULL;
2170 0 : rxd->rx_desc = &rd->jme_rx_ring[i];
2171 0 : error = jme_newbuf(sc, rxd);
2172 0 : if (error)
2173 0 : return (error);
2174 0 : }
2175 :
2176 0 : bus_dmamap_sync(sc->sc_dmat, sc->jme_cdata.jme_rx_ring_map, 0,
2177 : sc->jme_cdata.jme_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2178 :
2179 0 : return (0);
2180 0 : }
2181 :
2182 : int
2183 0 : jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2184 : {
2185 : struct jme_desc *desc;
2186 : struct mbuf *m;
2187 : bus_dmamap_t map;
2188 : int error;
2189 :
2190 0 : MGETHDR(m, M_DONTWAIT, MT_DATA);
2191 0 : if (m == NULL)
2192 0 : return (ENOBUFS);
2193 0 : MCLGET(m, M_DONTWAIT);
2194 0 : if (!(m->m_flags & M_EXT)) {
2195 0 : m_freem(m);
2196 0 : return (ENOBUFS);
2197 : }
2198 :
2199 : /*
2200 : * JMC250 has 64bit boundary alignment limitation so jme(4)
2201 : * takes advantage of 10 bytes padding feature of hardware
2202 : * in order not to copy entire frame to align IP header on
2203 : * 32bit boundary.
2204 : */
2205 0 : m->m_len = m->m_pkthdr.len = MCLBYTES;
2206 :
2207 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat,
2208 : sc->jme_cdata.jme_rx_sparemap, m, BUS_DMA_NOWAIT);
2209 :
2210 0 : if (error != 0) {
2211 0 : m_freem(m);
2212 0 : printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2213 0 : return (error);
2214 : }
2215 :
2216 0 : if (rxd->rx_m != NULL) {
2217 0 : bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2218 : rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2219 0 : bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2220 0 : }
2221 0 : map = rxd->rx_dmamap;
2222 0 : rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2223 0 : sc->jme_cdata.jme_rx_sparemap = map;
2224 0 : rxd->rx_m = m;
2225 :
2226 0 : desc = rxd->rx_desc;
2227 0 : desc->buflen = htole32(rxd->rx_dmamap->dm_segs[0].ds_len);
2228 0 : desc->addr_lo =
2229 0 : htole32(JME_ADDR_LO(rxd->rx_dmamap->dm_segs[0].ds_addr));
2230 0 : desc->addr_hi =
2231 0 : htole32(JME_ADDR_HI(rxd->rx_dmamap->dm_segs[0].ds_addr));
2232 0 : desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2233 :
2234 0 : return (0);
2235 0 : }
2236 :
2237 : void
2238 0 : jme_set_vlan(struct jme_softc *sc)
2239 : {
2240 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2241 : uint32_t reg;
2242 :
2243 0 : reg = CSR_READ_4(sc, JME_RXMAC);
2244 0 : reg &= ~RXMAC_VLAN_ENB;
2245 0 : if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
2246 0 : reg |= RXMAC_VLAN_ENB;
2247 0 : CSR_WRITE_4(sc, JME_RXMAC, reg);
2248 0 : }
2249 :
2250 : void
2251 0 : jme_iff(struct jme_softc *sc)
2252 : {
2253 0 : struct arpcom *ac = &sc->sc_arpcom;
2254 0 : struct ifnet *ifp = &ac->ac_if;
2255 : struct ether_multi *enm;
2256 : struct ether_multistep step;
2257 : uint32_t crc;
2258 0 : uint32_t mchash[2];
2259 : uint32_t rxcfg;
2260 :
2261 0 : rxcfg = CSR_READ_4(sc, JME_RXMAC);
2262 0 : rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2263 : RXMAC_ALLMULTI);
2264 0 : ifp->if_flags &= ~IFF_ALLMULTI;
2265 :
2266 : /*
2267 : * Always accept frames destined to our station address.
2268 : * Always accept broadcast frames.
2269 : */
2270 0 : rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2271 :
2272 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2273 0 : ifp->if_flags |= IFF_ALLMULTI;
2274 0 : if (ifp->if_flags & IFF_PROMISC)
2275 0 : rxcfg |= RXMAC_PROMISC;
2276 : else
2277 0 : rxcfg |= RXMAC_ALLMULTI;
2278 0 : mchash[0] = mchash[1] = 0xFFFFFFFF;
2279 0 : } else {
2280 : /*
2281 : * Set up the multicast address filter by passing all
2282 : * multicast addresses through a CRC generator, and then
2283 : * using the low-order 6 bits as an index into the 64 bit
2284 : * multicast hash table. The high order bits select the
2285 : * register, while the rest of the bits select the bit
2286 : * within the register.
2287 : */
2288 0 : rxcfg |= RXMAC_MULTICAST;
2289 0 : bzero(mchash, sizeof(mchash));
2290 :
2291 0 : ETHER_FIRST_MULTI(step, ac, enm);
2292 0 : while (enm != NULL) {
2293 0 : crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2294 :
2295 : /* Just want the 6 least significant bits. */
2296 0 : crc &= 0x3f;
2297 :
2298 : /* Set the corresponding bit in the hash table. */
2299 0 : mchash[crc >> 5] |= 1 << (crc & 0x1f);
2300 :
2301 0 : ETHER_NEXT_MULTI(step, enm);
2302 : }
2303 : }
2304 :
2305 0 : CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2306 0 : CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2307 0 : CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2308 0 : }
|