Line data Source code
1 : /* $OpenBSD: if_xge.c,v 1.77 2017/08/17 12:46:32 jsg Exp $ */
2 : /* $NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $ */
3 :
4 : /*
5 : * Copyright (c) 2004, SUNET, Swedish University Computer Network.
6 : * All rights reserved.
7 : *
8 : * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
9 : *
10 : * Redistribution and use in source and binary forms, with or without
11 : * modification, are permitted provided that the following conditions
12 : * are met:
13 : * 1. Redistributions of source code must retain the above copyright
14 : * notice, this list of conditions and the following disclaimer.
15 : * 2. Redistributions in binary form must reproduce the above copyright
16 : * notice, this list of conditions and the following disclaimer in the
17 : * documentation and/or other materials provided with the distribution.
18 : * 3. All advertising materials mentioning features or use of this software
19 : * must display the following acknowledgement:
20 : * This product includes software developed for the NetBSD Project by
21 : * SUNET, Swedish University Computer Network.
22 : * 4. The name of SUNET may not be used to endorse or promote products
23 : * derived from this software without specific prior written permission.
24 : *
25 : * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
26 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
29 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 : * POSSIBILITY OF SUCH DAMAGE.
36 : */
37 :
38 : /*
39 : * Driver for the Neterion Xframe Ten Gigabit Ethernet controller.
40 : */
41 :
42 : #include "bpfilter.h"
43 : #include "vlan.h"
44 :
45 : #include <sys/param.h>
46 : #include <sys/systm.h>
47 : #include <sys/sockio.h>
48 : #include <sys/mbuf.h>
49 : #include <sys/malloc.h>
50 : #include <sys/kernel.h>
51 : #include <sys/socket.h>
52 : #include <sys/device.h>
53 : #include <sys/endian.h>
54 :
55 : #include <net/if.h>
56 : #include <net/if_media.h>
57 :
58 : #include <netinet/in.h>
59 : #include <netinet/if_ether.h>
60 :
61 : #if NBPFILTER > 0
62 : #include <net/bpf.h>
63 : #endif
64 :
65 : #include <machine/bus.h>
66 : #include <machine/intr.h>
67 :
68 : #include <dev/mii/miivar.h>
69 :
70 : #include <dev/pci/pcivar.h>
71 : #include <dev/pci/pcireg.h>
72 : #include <dev/pci/pcidevs.h>
73 :
74 : #include <sys/lock.h>
75 :
76 : #include <dev/pci/if_xgereg.h>
77 :
78 : /* Xframe chipset revisions */
79 : #define XGE_TYPE_XENA 1 /* Xframe */
80 : #define XGE_TYPE_HERC 2 /* Xframe-II */
81 :
82 : #define XGE_PCISIZE_XENA 26
83 : #define XGE_PCISIZE_HERC 64
84 :
85 : /*
86 : * Some tunable constants, tune with care!
87 : */
88 : #define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */
89 : #define NRXDESCS 1016 /* # of receive descriptors (requested) */
90 : #define NTXDESCS 2048 /* Number of transmit descriptors */
91 : #define NTXFRAGS 100 /* Max fragments per packet */
92 :
93 : /*
94 : * Receive buffer modes; 1, 3 or 5 buffers.
95 : */
96 : #define RX_MODE_1 1
97 : #define RX_MODE_3 3
98 : #define RX_MODE_5 5
99 :
100 : /*
101 : * Use clever macros to avoid a bunch of #ifdef's.
102 : */
103 : #define XCONCAT3(x,y,z) x ## y ## z
104 : #define CONCAT3(x,y,z) XCONCAT3(x,y,z)
105 : #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
106 : #define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
107 : /* XXX */
108 : #if 0
109 : #define rxdesc ___CONCAT(rxd,RX_MODE)
110 : #endif
111 : #define rxdesc rxd1
112 :
113 : #define NEXTTX(x) (((x)+1) % NTXDESCS)
114 : #define NRXFRAGS RX_MODE /* hardware imposed frags */
115 : #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1)
116 : #define NRXREAL (NRXPAGES*NDESC_BUFMODE)
117 : #define RXMAPSZ (NRXPAGES*PAGE_SIZE)
118 :
119 : /*
120 : * Magic to fix a bug when the MAC address cannot be read correctly.
121 : * This came from the Linux driver.
122 : */
123 : static const uint64_t xge_fix_mac[] = {
124 : 0x0060000000000000ULL, 0x0060600000000000ULL,
125 : 0x0040600000000000ULL, 0x0000600000000000ULL,
126 : 0x0020600000000000ULL, 0x0060600000000000ULL,
127 : 0x0020600000000000ULL, 0x0060600000000000ULL,
128 : 0x0020600000000000ULL, 0x0060600000000000ULL,
129 : 0x0020600000000000ULL, 0x0060600000000000ULL,
130 : 0x0020600000000000ULL, 0x0060600000000000ULL,
131 : 0x0020600000000000ULL, 0x0060600000000000ULL,
132 : 0x0020600000000000ULL, 0x0060600000000000ULL,
133 : 0x0020600000000000ULL, 0x0060600000000000ULL,
134 : 0x0020600000000000ULL, 0x0060600000000000ULL,
135 : 0x0020600000000000ULL, 0x0060600000000000ULL,
136 : 0x0020600000000000ULL, 0x0000600000000000ULL,
137 : 0x0040600000000000ULL, 0x0060600000000000ULL,
138 : };
139 :
140 : /*
141 : * Constants to be programmed into Hercules's registers, to configure
142 : * the XGXS transciever.
143 : */
144 : static const uint64_t xge_herc_dtx_cfg[] = {
145 : 0x8000051536750000ULL, 0x80000515367500E0ULL,
146 : 0x8000051536750004ULL, 0x80000515367500E4ULL,
147 :
148 : 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
149 : 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
150 :
151 : 0x801205150D440000ULL, 0x801205150D4400E0ULL,
152 : 0x801205150D440004ULL, 0x801205150D4400E4ULL,
153 :
154 : 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
155 : 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
156 : };
157 :
158 : static const uint64_t xge_xena_dtx_cfg[] = {
159 : 0x8000051500000000ULL, 0x80000515000000E0ULL,
160 : 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
161 :
162 : 0x8001051500000000ULL, 0x80010515000000E0ULL,
163 : 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
164 :
165 : 0x8002051500000000ULL, 0x80020515000000E0ULL,
166 : 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
167 : };
168 :
169 : struct xge_softc {
170 : struct device sc_dev;
171 : struct arpcom sc_arpcom;
172 : struct ifmedia xena_media;
173 :
174 : void *sc_ih;
175 :
176 : bus_dma_tag_t sc_dmat;
177 : bus_space_tag_t sc_st;
178 : bus_space_handle_t sc_sh;
179 : bus_space_tag_t sc_txt;
180 : bus_space_handle_t sc_txh;
181 :
182 : pcireg_t sc_pciregs[16];
183 :
184 : int xge_type; /* chip type */
185 : int xge_if_flags;
186 :
187 : /* Transmit structures */
188 : struct txd *sc_txd[NTXDESCS]; /* transmit frags array */
189 : bus_addr_t sc_txdp[NTXDESCS]; /* dva of transmit frags */
190 : bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */
191 : struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */
192 : int sc_nexttx, sc_lasttx;
193 : bus_dmamap_t sc_txmap; /* transmit descriptor map */
194 :
195 : /* Receive data */
196 : bus_dmamap_t sc_rxmap; /* receive descriptor map */
197 : struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */
198 : bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */
199 : struct mbuf *sc_rxb[NRXREAL]; /* mbufs on rx descriptors */
200 : int sc_nextrx; /* next descriptor to check */
201 : };
202 :
203 : #ifdef XGE_DEBUG
204 : #define DPRINTF(x) do { if (xgedebug) printf x ; } while (0)
205 : #define DPRINTFN(n,x) do { if (xgedebug >= (n)) printf x ; } while (0)
206 : int xgedebug = 0;
207 : #else
208 : #define DPRINTF(x)
209 : #define DPRINTFN(n,x)
210 : #endif
211 :
212 : int xge_match(struct device *, void *, void *);
213 : void xge_attach(struct device *, struct device *, void *);
214 : int xge_alloc_txmem(struct xge_softc *);
215 : int xge_alloc_rxmem(struct xge_softc *);
216 : void xge_start(struct ifnet *);
217 : void xge_stop(struct ifnet *, int);
218 : int xge_add_rxbuf(struct xge_softc *, int);
219 : void xge_setmulti(struct xge_softc *);
220 : void xge_setpromisc(struct xge_softc *);
221 : int xge_setup_xgxs_xena(struct xge_softc *);
222 : int xge_setup_xgxs_herc(struct xge_softc *);
223 : int xge_ioctl(struct ifnet *, u_long, caddr_t);
224 : int xge_init(struct ifnet *);
225 : void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
226 : int xge_xgmii_mediachange(struct ifnet *);
227 : void xge_enable(struct xge_softc *);
228 : int xge_intr(void *);
229 :
230 : /*
231 : * Helpers to address registers.
232 : */
233 : #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val)
234 : #define PIF_RCSR(csr) pif_rcsr(sc, csr)
235 : #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val)
236 : #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val)
237 :
238 : static inline void
239 0 : pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
240 : {
241 : #if defined(__LP64__)
242 0 : bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val);
243 : #else
244 : uint32_t lval, hval;
245 :
246 : lval = val&0xffffffff;
247 : hval = val>>32;
248 :
249 : #if BYTE_ORDER == LITTLE_ENDIAN
250 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval);
251 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval);
252 : #else
253 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval);
254 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval);
255 : #endif
256 : #endif
257 0 : }
258 :
259 : static inline uint64_t
260 0 : pif_rcsr(struct xge_softc *sc, bus_size_t csr)
261 : {
262 : uint64_t val;
263 : #if defined(__LP64__)
264 0 : val = bus_space_read_raw_8(sc->sc_st, sc->sc_sh, csr);
265 : #else
266 : uint64_t val2;
267 :
268 : val = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr);
269 : val2 = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr+4);
270 : #if BYTE_ORDER == LITTLE_ENDIAN
271 : val |= (val2 << 32);
272 : #else
273 : val = (val << 32 | val2);
274 : #endif
275 : #endif
276 0 : return (val);
277 : }
278 :
279 : static inline void
280 0 : txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
281 : {
282 : #if defined(__LP64__)
283 0 : bus_space_write_raw_8(sc->sc_txt, sc->sc_txh, csr, val);
284 : #else
285 : uint32_t lval, hval;
286 :
287 : lval = val&0xffffffff;
288 : hval = val>>32;
289 :
290 : #if BYTE_ORDER == LITTLE_ENDIAN
291 : bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, lval);
292 : bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
293 : #else
294 : bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, hval);
295 : bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, lval);
296 : #endif
297 : #endif
298 0 : }
299 :
300 : static inline void
301 0 : pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
302 : {
303 : #if defined(__LP64__)
304 0 : if (sc->xge_type == XGE_TYPE_XENA)
305 0 : PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
306 :
307 0 : bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val);
308 : #else
309 : uint32_t lval, hval;
310 :
311 : lval = val&0xffffffff;
312 : hval = val>>32;
313 :
314 : if (sc->xge_type == XGE_TYPE_XENA)
315 : PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
316 :
317 : #if BYTE_ORDER == LITTLE_ENDIAN
318 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval);
319 : #else
320 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval);
321 : #endif
322 :
323 : if (sc->xge_type == XGE_TYPE_XENA)
324 : PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
325 : #if BYTE_ORDER == LITTLE_ENDIAN
326 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval);
327 : #else
328 : bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval);
329 : #endif
330 : #endif
331 0 : }
332 :
333 : struct cfattach xge_ca = {
334 : sizeof(struct xge_softc), xge_match, xge_attach
335 : };
336 :
337 : struct cfdriver xge_cd = {
338 : NULL, "xge", DV_IFNET
339 : };
340 :
341 : #define XNAME sc->sc_dev.dv_xname
342 :
343 : #define XGE_RXSYNC(desc, what) \
344 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
345 : (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
346 : (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
347 : #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
348 : r4_rxd[desc%NDESC_BUFMODE]
349 :
350 : /*
351 : * Non-tunable constants.
352 : */
353 : #define XGE_MAX_FRAMELEN 9622
354 : #define XGE_MAX_MTU (XGE_MAX_FRAMELEN - ETHER_HDR_LEN - \
355 : ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
356 :
357 : const struct pci_matchid xge_devices[] = {
358 : { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME },
359 : { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME_2 }
360 : };
361 :
362 : int
363 0 : xge_match(struct device *parent, void *match, void *aux)
364 : {
365 0 : return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices,
366 : nitems(xge_devices)));
367 : }
368 :
369 : void
370 0 : xge_attach(struct device *parent, struct device *self, void *aux)
371 : {
372 0 : struct pci_attach_args *pa = aux;
373 : struct xge_softc *sc;
374 : struct ifnet *ifp;
375 : pcireg_t memtype;
376 0 : pci_intr_handle_t ih;
377 : const char *intrstr = NULL;
378 0 : pci_chipset_tag_t pc = pa->pa_pc;
379 0 : uint8_t enaddr[ETHER_ADDR_LEN];
380 : uint64_t val;
381 : int i;
382 :
383 0 : sc = (struct xge_softc *)self;
384 :
385 0 : sc->sc_dmat = pa->pa_dmat;
386 :
387 0 : if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETERION_XFRAME)
388 0 : sc->xge_type = XGE_TYPE_XENA;
389 : else
390 0 : sc->xge_type = XGE_TYPE_HERC;
391 :
392 : /* Get BAR0 address */
393 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
394 0 : if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
395 0 : &sc->sc_st, &sc->sc_sh, 0, 0, 0)) {
396 0 : printf(": unable to map PIF BAR registers\n");
397 0 : return;
398 : }
399 :
400 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
401 0 : if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
402 0 : &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) {
403 0 : printf(": unable to map TXP BAR registers\n");
404 0 : return;
405 : }
406 :
407 0 : if (sc->xge_type == XGE_TYPE_XENA) {
408 : /* Save PCI config space */
409 0 : for (i = 0; i < XGE_PCISIZE_XENA; i += 4)
410 0 : sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
411 : }
412 :
413 : #if BYTE_ORDER == LITTLE_ENDIAN
414 : val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
415 : val &= ~(TxF_R_SE|RxF_W_SE);
416 0 : PIF_WCSR(SWAPPER_CTRL, val);
417 0 : PIF_WCSR(SWAPPER_CTRL, val);
418 : #endif
419 0 : if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
420 0 : printf(": failed configuring endian (read), %llx != %llx!\n",
421 : (unsigned long long)val, SWAPPER_MAGIC);
422 0 : }
423 :
424 0 : PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC);
425 0 : if ((val = PIF_RCSR(XMSI_ADDRESS)) != SWAPPER_MAGIC) {
426 0 : printf(": failed configuring endian (write), %llx != %llx!\n",
427 : (unsigned long long)val, SWAPPER_MAGIC);
428 0 : }
429 :
430 : /*
431 : * Fix for all "FFs" MAC address problems observed on
432 : * Alpha platforms. Not needed for Herc.
433 : */
434 0 : if (sc->xge_type == XGE_TYPE_XENA) {
435 : /*
436 : * The MAC addr may be all FF's, which is not good.
437 : * Resolve it by writing some magics to GPIO_CONTROL and
438 : * force a chip reset to read in the serial eeprom again.
439 : */
440 0 : for (i = 0; i < nitems(xge_fix_mac); i++) {
441 0 : PIF_WCSR(GPIO_CONTROL, xge_fix_mac[i]);
442 0 : PIF_RCSR(GPIO_CONTROL);
443 : }
444 :
445 : /*
446 : * Reset the chip and restore the PCI registers.
447 : */
448 0 : PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
449 0 : DELAY(500000);
450 0 : for (i = 0; i < XGE_PCISIZE_XENA; i += 4)
451 0 : pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
452 :
453 : /*
454 : * Restore the byte order registers.
455 : */
456 : #if BYTE_ORDER == LITTLE_ENDIAN
457 : val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
458 : val &= ~(TxF_R_SE|RxF_W_SE);
459 0 : PIF_WCSR(SWAPPER_CTRL, val);
460 0 : PIF_WCSR(SWAPPER_CTRL, val);
461 : #endif
462 :
463 0 : if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
464 0 : printf(": failed configuring endian2 (read), %llx != %llx!\n",
465 : (unsigned long long)val, SWAPPER_MAGIC);
466 0 : return;
467 : }
468 :
469 0 : PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC);
470 0 : if ((val = PIF_RCSR(XMSI_ADDRESS)) != SWAPPER_MAGIC) {
471 0 : printf(": failed configuring endian2 (write), %llx != %llx!\n",
472 : (unsigned long long)val, SWAPPER_MAGIC);
473 0 : return;
474 : }
475 : }
476 :
477 : /*
478 : * XGXS initialization.
479 : */
480 :
481 : /*
482 : * For Herc, bring EOI out of reset before XGXS.
483 : */
484 0 : if (sc->xge_type == XGE_TYPE_HERC) {
485 0 : val = PIF_RCSR(SW_RESET);
486 0 : val &= 0xffff00ffffffffffULL;
487 0 : PIF_WCSR(SW_RESET,val);
488 0 : delay(1000*1000); /* wait for 1 sec */
489 0 : }
490 :
491 : /* 29, Bring adapter out of reset */
492 0 : val = PIF_RCSR(SW_RESET);
493 0 : val &= 0xffffff00ffffffffULL;
494 0 : PIF_WCSR(SW_RESET, val);
495 0 : DELAY(500000);
496 :
497 : /* Ensure that it's safe to access registers by checking
498 : * RIC_RUNNING bit is reset. Check is valid only for XframeII.
499 : */
500 0 : if (sc->xge_type == XGE_TYPE_HERC){
501 0 : for (i = 0; i < 50; i++) {
502 0 : val = PIF_RCSR(ADAPTER_STATUS);
503 0 : if (!(val & RIC_RUNNING))
504 : break;
505 0 : delay(20*1000);
506 : }
507 :
508 0 : if (i == 50) {
509 0 : printf(": not safe to access registers\n");
510 0 : return;
511 : }
512 : }
513 :
514 : /* 30, configure XGXS transceiver */
515 0 : if (sc->xge_type == XGE_TYPE_XENA)
516 0 : xge_setup_xgxs_xena(sc);
517 0 : else if(sc->xge_type == XGE_TYPE_HERC)
518 0 : xge_setup_xgxs_herc(sc);
519 :
520 : /* 33, program MAC address (not needed here) */
521 : /* Get ethernet address */
522 0 : PIF_WCSR(RMAC_ADDR_CMD_MEM,
523 : RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
524 0 : while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
525 : ;
526 0 : val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
527 0 : for (i = 0; i < ETHER_ADDR_LEN; i++)
528 0 : enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
529 :
530 : /*
531 : * Get memory for transmit descriptor lists.
532 : */
533 0 : if (xge_alloc_txmem(sc)) {
534 0 : printf(": failed allocating txmem.\n");
535 0 : return;
536 : }
537 :
538 : /* 9 and 10 - set FIFO number/prio */
539 0 : PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
540 0 : PIF_WCSR(TX_FIFO_P1, 0ULL);
541 0 : PIF_WCSR(TX_FIFO_P2, 0ULL);
542 0 : PIF_WCSR(TX_FIFO_P3, 0ULL);
543 :
544 : /* 11, XXX set round-robin prio? */
545 :
546 : /* 12, enable transmit FIFO */
547 0 : val = PIF_RCSR(TX_FIFO_P0);
548 0 : val |= TX_FIFO_ENABLE;
549 0 : PIF_WCSR(TX_FIFO_P0, val);
550 :
551 : /* 13, disable some error checks */
552 0 : PIF_WCSR(TX_PA_CFG,
553 : TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
554 :
555 : /* Create transmit DMA maps */
556 0 : for (i = 0; i < NTXDESCS; i++) {
557 0 : if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,
558 : NTXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT,
559 : &sc->sc_txm[i])) {
560 0 : printf(": cannot create TX DMA maps\n");
561 0 : return;
562 : }
563 : }
564 :
565 0 : sc->sc_lasttx = NTXDESCS-1;
566 :
567 : /*
568 : * RxDMA initialization.
569 : * Only use one out of 8 possible receive queues.
570 : */
571 : /* allocate rx descriptor memory */
572 0 : if (xge_alloc_rxmem(sc)) {
573 0 : printf(": failed allocating rxmem\n");
574 0 : return;
575 : }
576 :
577 : /* Create receive buffer DMA maps */
578 0 : for (i = 0; i < NRXREAL; i++) {
579 0 : if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,
580 : NRXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT,
581 : &sc->sc_rxm[i])) {
582 0 : printf(": cannot create RX DMA maps\n");
583 0 : return;
584 : }
585 : }
586 :
587 : /* allocate mbufs to receive descriptors */
588 0 : for (i = 0; i < NRXREAL; i++)
589 0 : if (xge_add_rxbuf(sc, i))
590 0 : panic("out of mbufs too early");
591 :
592 : /* 14, setup receive ring priority */
593 0 : PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
594 :
595 : /* 15, setup receive ring round-robin calendar */
596 0 : PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
597 0 : PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
598 0 : PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
599 0 : PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
600 0 : PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
601 :
602 : /* 16, write receive ring start address */
603 0 : PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
604 : /* PRC_RXD0_[1-7] are not used */
605 :
606 : /* 17, Setup alarm registers */
607 0 : PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
608 :
609 : /* 18, init receive ring controller */
610 : #if RX_MODE == RX_MODE_1
611 : val = RING_MODE_1;
612 : #elif RX_MODE == RX_MODE_3
613 : val = RING_MODE_3;
614 : #else /* RX_MODE == RX_MODE_5 */
615 : val = RING_MODE_5;
616 : #endif
617 0 : PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
618 : /* leave 1-7 disabled */
619 : /* XXXX snoop configuration? */
620 :
621 : /* 19, set chip memory assigned to the queue */
622 0 : if (sc->xge_type == XGE_TYPE_XENA) {
623 : /* all 64M to queue 0 */
624 0 : PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64));
625 0 : } else {
626 : /* all 32M to queue 0 */
627 0 : PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 32));
628 : }
629 :
630 : /* 20, setup RLDRAM parameters */
631 : /* do not touch it for now */
632 :
633 : /* 21, setup pause frame thresholds */
634 : /* so not touch the defaults */
635 : /* XXX - must 0xff be written as stated in the manual? */
636 :
637 : /* 22, configure RED */
638 : /* we do not want to drop packets, so ignore */
639 :
640 : /* 23, initiate RLDRAM */
641 0 : val = PIF_RCSR(MC_RLDRAM_MRS);
642 0 : val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
643 0 : PIF_WCSR(MC_RLDRAM_MRS, val);
644 0 : DELAY(1000);
645 :
646 : /*
647 : * Setup interrupt policies.
648 : */
649 : /* 40, Transmit interrupts */
650 0 : PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
651 : TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
652 0 : PIF_WCSR(TTI_DATA2_MEM,
653 : TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
654 0 : PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
655 0 : while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
656 : ;
657 :
658 : /* 41, Receive interrupts */
659 0 : PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
660 : RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
661 0 : PIF_WCSR(RTI_DATA2_MEM,
662 : RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
663 0 : PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
664 0 : while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
665 : ;
666 :
667 : /*
668 : * Setup media stuff.
669 : */
670 0 : ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
671 : xge_ifmedia_status);
672 0 : ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_SR, 0, NULL);
673 0 : ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_SR);
674 :
675 0 : ifp = &sc->sc_arpcom.ac_if;
676 0 : strlcpy(ifp->if_xname, XNAME, IFNAMSIZ);
677 0 : memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
678 0 : ifp->if_baudrate = IF_Gbps(10);
679 0 : ifp->if_softc = sc;
680 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
681 0 : ifp->if_ioctl = xge_ioctl;
682 0 : ifp->if_start = xge_start;
683 0 : ifp->if_hardmtu = XGE_MAX_MTU;
684 0 : IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESCS - 1);
685 :
686 0 : ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
687 : IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
688 :
689 : #if NVLAN > 0
690 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
691 : #endif
692 :
693 : /*
694 : * Attach the interface.
695 : */
696 0 : if_attach(ifp);
697 0 : ether_ifattach(ifp);
698 :
699 : /*
700 : * Setup interrupt vector before initializing.
701 : */
702 0 : if (pci_intr_map(pa, &ih)) {
703 0 : printf(": unable to map interrupt\n");
704 0 : return;
705 : }
706 0 : intrstr = pci_intr_string(pc, ih);
707 0 : if ((sc->sc_ih =
708 0 : pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc, XNAME)) == NULL) {
709 0 : printf(": unable to establish interrupt at %s\n",
710 0 : intrstr ? intrstr : "<unknown>");
711 0 : return;
712 : }
713 0 : printf(": %s, address %s\n", intrstr, ether_sprintf(enaddr));
714 0 : }
715 :
716 : void
717 0 : xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
718 : {
719 0 : struct xge_softc *sc = ifp->if_softc;
720 : uint64_t reg;
721 :
722 0 : ifmr->ifm_status = IFM_AVALID;
723 0 : ifmr->ifm_active = IFM_ETHER|IFM_10G_SR;
724 :
725 0 : reg = PIF_RCSR(ADAPTER_STATUS);
726 0 : if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
727 0 : ifmr->ifm_status |= IFM_ACTIVE;
728 0 : }
729 :
730 : int
731 0 : xge_xgmii_mediachange(struct ifnet *ifp)
732 : {
733 0 : return (0);
734 : }
735 :
736 : void
737 0 : xge_enable(struct xge_softc *sc)
738 : {
739 : uint64_t val;
740 :
741 : /* 2, enable adapter */
742 0 : val = PIF_RCSR(ADAPTER_CONTROL);
743 0 : val |= ADAPTER_EN;
744 0 : PIF_WCSR(ADAPTER_CONTROL, val);
745 :
746 : /* 3, light the card enable led */
747 0 : val = PIF_RCSR(ADAPTER_CONTROL);
748 0 : val |= LED_ON;
749 0 : PIF_WCSR(ADAPTER_CONTROL, val);
750 : #ifdef XGE_DEBUG
751 : printf("%s: link up\n", XNAME);
752 : #endif
753 0 : }
754 :
755 : int
756 0 : xge_init(struct ifnet *ifp)
757 : {
758 0 : struct xge_softc *sc = ifp->if_softc;
759 : uint64_t val;
760 : int s;
761 :
762 0 : s = splnet();
763 :
764 : /*
765 : * Cancel any pending I/O
766 : */
767 0 : xge_stop(ifp, 0);
768 :
769 : /* 31+32, setup MAC config */
770 0 : PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
771 : RMAC_BCAST_EN|RMAC_DISCARD_PFRM);
772 :
773 0 : DELAY(1000);
774 :
775 : /* 54, ensure that the adapter is 'quiescent' */
776 0 : val = PIF_RCSR(ADAPTER_STATUS);
777 0 : if ((val & QUIESCENT) != QUIESCENT) {
778 : #if 0
779 : char buf[200];
780 : #endif
781 0 : printf("%s: adapter not quiescent, aborting\n", XNAME);
782 : val = (val & QUIESCENT) ^ QUIESCENT;
783 : #if 0
784 : bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf);
785 : printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
786 : #endif
787 0 : splx(s);
788 0 : return (1);
789 : }
790 :
791 0 : if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
792 : /* disable VLAN tag stripping */
793 0 : val = PIF_RCSR(RX_PA_CFG);
794 0 : val &= ~STRIP_VLAN_TAG;
795 0 : PIF_WCSR(RX_PA_CFG, val);
796 0 : }
797 :
798 : /* set MRU */
799 0 : PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(XGE_MAX_FRAMELEN));
800 :
801 : /* 56, enable the transmit laser */
802 0 : val = PIF_RCSR(ADAPTER_CONTROL);
803 0 : val |= EOI_TX_ON;
804 0 : PIF_WCSR(ADAPTER_CONTROL, val);
805 :
806 0 : xge_enable(sc);
807 :
808 : /*
809 : * Enable all interrupts
810 : */
811 0 : PIF_WCSR(TX_TRAFFIC_MASK, 0);
812 0 : PIF_WCSR(RX_TRAFFIC_MASK, 0);
813 0 : PIF_WCSR(TXPIC_INT_MASK, 0);
814 0 : PIF_WCSR(RXPIC_INT_MASK, 0);
815 :
816 0 : PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
817 0 : PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
818 0 : PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
819 0 : PIF_WCSR(GENERAL_INT_MASK, 0);
820 :
821 0 : xge_setpromisc(sc);
822 :
823 0 : xge_setmulti(sc);
824 :
825 : /* Done... */
826 0 : ifp->if_flags |= IFF_RUNNING;
827 0 : ifq_clr_oactive(&ifp->if_snd);
828 :
829 0 : splx(s);
830 :
831 0 : return (0);
832 0 : }
833 :
834 : void
835 0 : xge_stop(struct ifnet *ifp, int disable)
836 : {
837 0 : struct xge_softc *sc = ifp->if_softc;
838 : uint64_t val;
839 :
840 0 : ifp->if_flags &= ~IFF_RUNNING;
841 0 : ifq_clr_oactive(&ifp->if_snd);
842 :
843 0 : val = PIF_RCSR(ADAPTER_CONTROL);
844 0 : val &= ~ADAPTER_EN;
845 0 : PIF_WCSR(ADAPTER_CONTROL, val);
846 :
847 0 : while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
848 : ;
849 0 : }
850 :
851 : int
852 0 : xge_intr(void *pv)
853 : {
854 0 : struct xge_softc *sc = pv;
855 : struct txd *txd;
856 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
857 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
858 : bus_dmamap_t dmp;
859 : uint64_t val;
860 : int i, lasttx, plen;
861 :
862 0 : val = PIF_RCSR(GENERAL_INT_STATUS);
863 0 : if (val == 0)
864 0 : return (0); /* no interrupt here */
865 :
866 0 : PIF_WCSR(GENERAL_INT_STATUS, val);
867 :
868 0 : if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
869 : /* Wait for quiescence */
870 : #ifdef XGE_DEBUG
871 : printf("%s: link down\n", XNAME);
872 : #endif
873 0 : while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
874 : ;
875 0 : PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
876 :
877 0 : val = PIF_RCSR(ADAPTER_STATUS);
878 0 : if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
879 0 : xge_enable(sc); /* Only if link restored */
880 : }
881 :
882 0 : if ((val = PIF_RCSR(TX_TRAFFIC_INT)))
883 0 : PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
884 : /*
885 : * Collect sent packets.
886 : */
887 0 : lasttx = sc->sc_lasttx;
888 0 : while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
889 0 : txd = sc->sc_txd[i];
890 0 : dmp = sc->sc_txm[i];
891 :
892 0 : bus_dmamap_sync(sc->sc_dmat, dmp, 0,
893 : dmp->dm_mapsize,
894 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
895 :
896 0 : if (txd->txd_control1 & TXD_CTL1_OWN) {
897 0 : bus_dmamap_sync(sc->sc_dmat, dmp, 0,
898 : dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
899 0 : break;
900 : }
901 0 : bus_dmamap_unload(sc->sc_dmat, dmp);
902 0 : m_freem(sc->sc_txb[i]);
903 0 : sc->sc_lasttx = i;
904 : }
905 :
906 0 : if (sc->sc_lasttx != lasttx)
907 0 : ifq_clr_oactive(&ifp->if_snd);
908 :
909 : /* Try to get more packets on the wire */
910 0 : xge_start(ifp);
911 :
912 : /* clear interrupt bits */
913 0 : if ((val = PIF_RCSR(RX_TRAFFIC_INT)))
914 0 : PIF_WCSR(RX_TRAFFIC_INT, val);
915 :
916 0 : for (;;) {
917 : struct rxdesc *rxd;
918 : struct mbuf *m;
919 :
920 0 : XGE_RXSYNC(sc->sc_nextrx,
921 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
922 :
923 0 : rxd = XGE_RXD(sc->sc_nextrx);
924 0 : if (rxd->rxd_control1 & RXD_CTL1_OWN) {
925 0 : XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
926 0 : break;
927 : }
928 :
929 : /* got a packet */
930 0 : m = sc->sc_rxb[sc->sc_nextrx];
931 : #if RX_MODE == RX_MODE_1
932 0 : plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
933 : #elif RX_MODE == RX_MODE_3
934 : #error Fix rxmodes in xge_intr
935 : #elif RX_MODE == RX_MODE_5
936 : plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
937 : plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
938 : plen += m->m_next->m_next->m_len =
939 : RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
940 : plen += m->m_next->m_next->m_next->m_len =
941 : RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
942 : plen += m->m_next->m_next->m_next->m_next->m_len =
943 : RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
944 : #endif
945 0 : m->m_pkthdr.len = plen;
946 :
947 0 : val = rxd->rxd_control1;
948 :
949 0 : if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
950 : /* Failed, recycle this mbuf */
951 : #if RX_MODE == RX_MODE_1
952 0 : rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
953 0 : rxd->rxd_control1 = RXD_CTL1_OWN;
954 : #elif RX_MODE == RX_MODE_3
955 : #elif RX_MODE == RX_MODE_5
956 : #endif
957 0 : XGE_RXSYNC(sc->sc_nextrx,
958 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
959 0 : ifp->if_ierrors++;
960 0 : break;
961 : }
962 :
963 0 : if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_IPv4)
964 0 : m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
965 0 : if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP)
966 0 : m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
967 0 : if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP)
968 0 : m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
969 :
970 : #if NVLAN > 0
971 0 : if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_VLAN) {
972 0 : m->m_pkthdr.ether_vtag =
973 0 : RXD_CTL2_VLANTAG(rxd->rxd_control2);
974 0 : m->m_flags |= M_VLANTAG;
975 0 : }
976 : #endif
977 :
978 0 : ml_enqueue(&ml, m);
979 :
980 0 : if (++sc->sc_nextrx == NRXREAL)
981 0 : sc->sc_nextrx = 0;
982 0 : }
983 :
984 0 : if_input(ifp, &ml);
985 :
986 0 : return (1);
987 0 : }
988 :
989 : int
990 0 : xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
991 : {
992 0 : struct xge_softc *sc = ifp->if_softc;
993 0 : struct ifreq *ifr = (struct ifreq *) data;
994 : int s, error = 0;
995 :
996 0 : s = splnet();
997 :
998 0 : switch (cmd) {
999 : case SIOCSIFADDR:
1000 0 : ifp->if_flags |= IFF_UP;
1001 0 : if (!(ifp->if_flags & IFF_RUNNING))
1002 0 : xge_init(ifp);
1003 : break;
1004 :
1005 : case SIOCSIFFLAGS:
1006 0 : if (ifp->if_flags & IFF_UP) {
1007 0 : if (ifp->if_flags & IFF_RUNNING &&
1008 0 : (ifp->if_flags ^ sc->xge_if_flags) &
1009 : IFF_PROMISC) {
1010 0 : xge_setpromisc(sc);
1011 0 : } else {
1012 0 : if (!(ifp->if_flags & IFF_RUNNING))
1013 0 : xge_init(ifp);
1014 : }
1015 : } else {
1016 0 : if (ifp->if_flags & IFF_RUNNING)
1017 0 : xge_stop(ifp, 1);
1018 : }
1019 0 : sc->xge_if_flags = ifp->if_flags;
1020 0 : break;
1021 :
1022 : case SIOCGIFMEDIA:
1023 : case SIOCSIFMEDIA:
1024 0 : error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
1025 0 : break;
1026 :
1027 : default:
1028 0 : error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1029 0 : }
1030 :
1031 0 : if (error == ENETRESET) {
1032 0 : if (ifp->if_flags & IFF_RUNNING)
1033 0 : xge_setmulti(sc);
1034 : error = 0;
1035 0 : }
1036 :
1037 0 : splx(s);
1038 0 : return (error);
1039 : }
1040 :
1041 : void
1042 0 : xge_setmulti(struct xge_softc *sc)
1043 : {
1044 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1045 : struct arpcom *ac = &sc->sc_arpcom;
1046 : struct ether_multi *enm;
1047 : struct ether_multistep step;
1048 : int i, numaddr = 1; /* first slot used for card unicast address */
1049 : uint64_t val;
1050 :
1051 0 : if (ac->ac_multirangecnt > 0)
1052 : goto allmulti;
1053 :
1054 0 : ETHER_FIRST_MULTI(step, ac, enm);
1055 0 : while (enm != NULL) {
1056 0 : if (numaddr == MAX_MCAST_ADDR)
1057 : goto allmulti;
1058 0 : for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
1059 0 : val <<= 8;
1060 0 : val |= enm->enm_addrlo[i];
1061 : }
1062 0 : PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
1063 0 : PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
1064 0 : PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1065 : RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
1066 0 : while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1067 : ;
1068 0 : numaddr++;
1069 0 : ETHER_NEXT_MULTI(step, enm);
1070 : }
1071 : /* set the remaining entries to the broadcast address */
1072 0 : for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
1073 0 : PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
1074 0 : PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
1075 0 : PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1076 : RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
1077 0 : while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1078 : ;
1079 : }
1080 0 : ifp->if_flags &= ~IFF_ALLMULTI;
1081 0 : return;
1082 :
1083 : allmulti:
1084 : /* Just receive everything with the multicast bit set */
1085 0 : ifp->if_flags |= IFF_ALLMULTI;
1086 0 : PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
1087 0 : PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
1088 0 : PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1089 : RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
1090 0 : while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1091 : ;
1092 0 : }
1093 :
1094 : void
1095 0 : xge_setpromisc(struct xge_softc *sc)
1096 : {
1097 0 : struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1098 : uint64_t val;
1099 :
1100 0 : val = PIF_RCSR(MAC_CFG);
1101 :
1102 0 : if (ifp->if_flags & IFF_PROMISC)
1103 0 : val |= RMAC_PROM_EN;
1104 : else
1105 0 : val &= ~RMAC_PROM_EN;
1106 :
1107 0 : PIF_WCSR(MAC_CFG, val);
1108 0 : }
1109 :
1110 : void
1111 0 : xge_start(struct ifnet *ifp)
1112 : {
1113 0 : struct xge_softc *sc = ifp->if_softc;
1114 : struct txd *txd = NULL; /* XXX - gcc */
1115 : bus_dmamap_t dmp;
1116 : struct mbuf *m;
1117 : uint64_t par, lcr;
1118 : int nexttx = 0, ntxd, i;
1119 :
1120 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1121 0 : return;
1122 :
1123 : par = lcr = 0;
1124 0 : for (;;) {
1125 0 : if (sc->sc_nexttx == sc->sc_lasttx) {
1126 0 : ifq_set_oactive(&ifp->if_snd);
1127 0 : break; /* No more space */
1128 : }
1129 :
1130 0 : m = ifq_dequeue(&ifp->if_snd);
1131 0 : if (m == NULL)
1132 : break; /* out of packets */
1133 :
1134 0 : nexttx = sc->sc_nexttx;
1135 0 : dmp = sc->sc_txm[nexttx];
1136 :
1137 0 : switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
1138 : BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
1139 : case 0:
1140 : break;
1141 : case EFBIG:
1142 0 : if (m_defrag(m, M_DONTWAIT) == 0 &&
1143 0 : bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
1144 0 : BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0)
1145 : break;
1146 : default:
1147 0 : m_freem(m);
1148 0 : continue;
1149 : }
1150 :
1151 0 : bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1152 : BUS_DMASYNC_PREWRITE);
1153 :
1154 0 : txd = sc->sc_txd[nexttx];
1155 0 : sc->sc_txb[nexttx] = m;
1156 0 : for (i = 0; i < dmp->dm_nsegs; i++) {
1157 0 : if (dmp->dm_segs[i].ds_len == 0)
1158 : continue;
1159 0 : txd->txd_control1 = dmp->dm_segs[i].ds_len;
1160 0 : txd->txd_control2 = 0;
1161 0 : txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
1162 0 : txd++;
1163 0 : }
1164 0 : ntxd = txd - sc->sc_txd[nexttx] - 1;
1165 : txd = sc->sc_txd[nexttx];
1166 0 : txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
1167 0 : txd->txd_control2 = TXD_CTL2_UTIL;
1168 :
1169 : #if NVLAN > 0
1170 0 : if (m->m_flags & M_VLANTAG) {
1171 0 : txd->txd_control2 |= TXD_CTL2_VLANE;
1172 0 : txd->txd_control2 |=
1173 0 : TXD_CTL2_VLANT(m->m_pkthdr.ether_vtag);
1174 0 : }
1175 : #endif
1176 :
1177 0 : if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1178 0 : txd->txd_control2 |= TXD_CTL2_CIPv4;
1179 0 : if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1180 0 : txd->txd_control2 |= TXD_CTL2_CTCP;
1181 0 : if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1182 0 : txd->txd_control2 |= TXD_CTL2_CUDP;
1183 :
1184 0 : txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
1185 :
1186 0 : bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1187 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1188 :
1189 0 : par = sc->sc_txdp[nexttx];
1190 0 : lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
1191 0 : TXP_WCSR(TXDL_PAR, par);
1192 0 : TXP_WCSR(TXDL_LCR, lcr);
1193 :
1194 : #if NBPFILTER > 0
1195 0 : if (ifp->if_bpf)
1196 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1197 : #endif /* NBPFILTER > 0 */
1198 :
1199 0 : sc->sc_nexttx = NEXTTX(nexttx);
1200 : }
1201 0 : }
1202 :
1203 : /*
1204 : * Allocate DMA memory for transmit descriptor fragments.
1205 : * Only one map is used for all descriptors.
1206 : */
1207 : int
1208 0 : xge_alloc_txmem(struct xge_softc *sc)
1209 : {
1210 : struct txd *txp;
1211 0 : bus_dma_segment_t seg;
1212 : bus_addr_t txdp;
1213 0 : caddr_t kva;
1214 0 : int i, rseg, state;
1215 :
1216 : #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
1217 : state = 0;
1218 0 : if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
1219 : &seg, 1, &rseg, BUS_DMA_NOWAIT))
1220 : goto err;
1221 : state++;
1222 0 : if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
1223 : BUS_DMA_NOWAIT))
1224 : goto err;
1225 :
1226 : state++;
1227 0 : if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
1228 : BUS_DMA_NOWAIT, &sc->sc_txmap))
1229 : goto err;
1230 : state++;
1231 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
1232 : kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
1233 : goto err;
1234 :
1235 : /* setup transmit array pointers */
1236 0 : txp = (struct txd *)kva;
1237 0 : txdp = seg.ds_addr;
1238 0 : for (i = 0; i < NTXDESCS; i++) {
1239 0 : sc->sc_txd[i] = txp;
1240 0 : sc->sc_txdp[i] = txdp;
1241 0 : txp += NTXFRAGS;
1242 0 : txdp += (NTXFRAGS * sizeof(struct txd));
1243 : }
1244 :
1245 0 : return (0);
1246 :
1247 : err:
1248 0 : if (state > 2)
1249 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1250 0 : if (state > 1)
1251 0 : bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1252 0 : if (state > 0)
1253 0 : bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1254 0 : return (ENOBUFS);
1255 0 : }
1256 :
1257 : /*
1258 : * Allocate DMA memory for receive descriptor,
1259 : * only one map is used for all descriptors.
1260 : * link receive descriptor pages together.
1261 : */
1262 : int
1263 0 : xge_alloc_rxmem(struct xge_softc *sc)
1264 : {
1265 : struct rxd_4k *rxpp;
1266 0 : bus_dma_segment_t seg;
1267 0 : caddr_t kva;
1268 0 : int i, rseg, state;
1269 :
1270 : /* sanity check */
1271 : if (sizeof(struct rxd_4k) != XGE_PAGE) {
1272 : printf("bad compiler struct alignment, %d != %d\n",
1273 : (int)sizeof(struct rxd_4k), XGE_PAGE);
1274 : return (EINVAL);
1275 : }
1276 :
1277 : state = 0;
1278 0 : if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
1279 : &seg, 1, &rseg, BUS_DMA_NOWAIT))
1280 : goto err;
1281 : state++;
1282 0 : if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
1283 : BUS_DMA_NOWAIT))
1284 : goto err;
1285 :
1286 : state++;
1287 0 : if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
1288 : BUS_DMA_NOWAIT, &sc->sc_rxmap))
1289 : goto err;
1290 : state++;
1291 0 : if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
1292 : kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
1293 : goto err;
1294 :
1295 : /* setup receive page link pointers */
1296 0 : for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
1297 0 : sc->sc_rxd_4k[i] = rxpp;
1298 0 : rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1299 0 : (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
1300 : }
1301 0 : sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
1302 0 : (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1303 :
1304 0 : return (0);
1305 :
1306 : err:
1307 0 : if (state > 2)
1308 0 : bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap);
1309 0 : if (state > 1)
1310 0 : bus_dmamem_unmap(sc->sc_dmat, kva, RXMAPSZ);
1311 0 : if (state > 0)
1312 0 : bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1313 0 : return (ENOBUFS);
1314 0 : }
1315 :
1316 :
1317 : /*
1318 : * Add a new mbuf chain to descriptor id.
1319 : */
1320 : int
1321 0 : xge_add_rxbuf(struct xge_softc *sc, int id)
1322 : {
1323 : struct rxdesc *rxd;
1324 : struct mbuf *m[5];
1325 : int page, desc, error;
1326 : #if RX_MODE == RX_MODE_5
1327 : int i;
1328 : #endif
1329 :
1330 0 : page = id/NDESC_BUFMODE;
1331 0 : desc = id%NDESC_BUFMODE;
1332 :
1333 0 : rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1334 :
1335 : /*
1336 : * Allocate mbufs.
1337 : * Currently five mbufs and two clusters are used,
1338 : * the hardware will put (ethernet, ip, tcp/udp) headers in
1339 : * their own buffer and the clusters are only used for data.
1340 : */
1341 : #if RX_MODE == RX_MODE_1
1342 0 : MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1343 0 : if (m[0] == NULL)
1344 0 : return (ENOBUFS);
1345 0 : MCLGETI(m[0], M_DONTWAIT, NULL, XGE_MAX_FRAMELEN + ETHER_ALIGN);
1346 0 : if ((m[0]->m_flags & M_EXT) == 0) {
1347 0 : m_freem(m[0]);
1348 0 : return (ENOBUFS);
1349 : }
1350 0 : m[0]->m_len = m[0]->m_pkthdr.len = XGE_MAX_FRAMELEN + ETHER_ALIGN;
1351 : #elif RX_MODE == RX_MODE_3
1352 : #error missing rxmode 3.
1353 : #elif RX_MODE == RX_MODE_5
1354 : MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1355 : for (i = 1; i < 5; i++) {
1356 : MGET(m[i], M_DONTWAIT, MT_DATA);
1357 : }
1358 : if (m[3])
1359 : MCLGET(m[3], M_DONTWAIT);
1360 : if (m[4])
1361 : MCLGET(m[4], M_DONTWAIT);
1362 : if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1363 : ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
1364 : /* Out of something */
1365 : for (i = 0; i < 5; i++)
1366 : m_free(m[i]);
1367 : return (ENOBUFS);
1368 : }
1369 : /* Link'em together */
1370 : m[0]->m_next = m[1];
1371 : m[1]->m_next = m[2];
1372 : m[2]->m_next = m[3];
1373 : m[3]->m_next = m[4];
1374 : #else
1375 : #error bad mode RX_MODE
1376 : #endif
1377 :
1378 0 : if (sc->sc_rxb[id])
1379 0 : bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
1380 0 : sc->sc_rxb[id] = m[0];
1381 :
1382 0 : m_adj(m[0], ETHER_ALIGN);
1383 :
1384 0 : error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
1385 : BUS_DMA_READ|BUS_DMA_NOWAIT);
1386 0 : if (error)
1387 0 : return (error);
1388 0 : bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
1389 : sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
1390 :
1391 : #if RX_MODE == RX_MODE_1
1392 0 : rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
1393 0 : rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1394 0 : rxd->rxd_control1 = RXD_CTL1_OWN;
1395 : #elif RX_MODE == RX_MODE_3
1396 : #elif RX_MODE == RX_MODE_5
1397 : rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
1398 : rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
1399 : rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1400 : rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1401 : rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1402 : rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1403 : rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1404 : rxd->rxd_control1 = RXD_CTL1_OWN;
1405 : #endif
1406 :
1407 0 : XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1408 0 : return (0);
1409 0 : }
1410 :
1411 : /*
1412 : * This magic comes from the FreeBSD driver.
1413 : */
1414 : int
1415 0 : xge_setup_xgxs_xena(struct xge_softc *sc)
1416 : {
1417 : int i;
1418 :
1419 0 : for (i = 0; i < nitems(xge_xena_dtx_cfg); i++) {
1420 0 : PIF_WCSR(DTX_CONTROL, xge_xena_dtx_cfg[i]);
1421 0 : DELAY(100);
1422 : }
1423 :
1424 0 : return (0);
1425 : }
1426 :
1427 : int
1428 0 : xge_setup_xgxs_herc(struct xge_softc *sc)
1429 : {
1430 : int i;
1431 :
1432 0 : for (i = 0; i < nitems(xge_herc_dtx_cfg); i++) {
1433 0 : PIF_WCSR(DTX_CONTROL, xge_herc_dtx_cfg[i]);
1434 0 : DELAY(100);
1435 : }
1436 :
1437 0 : return (0);
1438 : }
|