Line data Source code
1 : /* $OpenBSD: if_oce.c,v 1.100 2017/11/27 16:53:04 sthen Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2012 Mike Belopuhov
5 : *
6 : * Permission to use, copy, modify, and distribute this software for any
7 : * purpose with or without fee is hereby granted, provided that the above
8 : * copyright notice and this permission notice appear in all copies.
9 : *
10 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 : */
18 :
19 : /*-
20 : * Copyright (C) 2012 Emulex
21 : * All rights reserved.
22 : *
23 : * Redistribution and use in source and binary forms, with or without
24 : * modification, are permitted provided that the following conditions are met:
25 : *
26 : * 1. Redistributions of source code must retain the above copyright notice,
27 : * this list of conditions and the following disclaimer.
28 : *
29 : * 2. Redistributions in binary form must reproduce the above copyright
30 : * notice, this list of conditions and the following disclaimer in the
31 : * documentation and/or other materials provided with the distribution.
32 : *
33 : * 3. Neither the name of the Emulex Corporation nor the names of its
34 : * contributors may be used to endorse or promote products derived from
35 : * this software without specific prior written permission.
36 : *
37 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38 : * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 : * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41 : * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47 : * POSSIBILITY OF SUCH DAMAGE.
48 : *
49 : * Contact Information:
50 : * freebsd-drivers@emulex.com
51 : *
52 : * Emulex
53 : * 3333 Susan Street
54 : * Costa Mesa, CA 92626
55 : */
56 :
57 : #include "bpfilter.h"
58 : #include "vlan.h"
59 :
60 : #include <sys/param.h>
61 : #include <sys/systm.h>
62 : #include <sys/sockio.h>
63 : #include <sys/mbuf.h>
64 : #include <sys/malloc.h>
65 : #include <sys/kernel.h>
66 : #include <sys/device.h>
67 : #include <sys/socket.h>
68 : #include <sys/queue.h>
69 : #include <sys/timeout.h>
70 : #include <sys/pool.h>
71 :
72 : #include <net/if.h>
73 : #include <net/if_media.h>
74 :
75 : #include <netinet/in.h>
76 : #include <netinet/if_ether.h>
77 :
78 : #ifdef INET6
79 : #include <netinet/ip6.h>
80 : #endif
81 :
82 : #if NBPFILTER > 0
83 : #include <net/bpf.h>
84 : #endif
85 :
86 : #include <dev/pci/pcireg.h>
87 : #include <dev/pci/pcivar.h>
88 : #include <dev/pci/pcidevs.h>
89 :
90 : #include <dev/pci/if_ocereg.h>
91 :
92 : #ifndef TRUE
93 : #define TRUE 1
94 : #endif
95 : #ifndef FALSE
96 : #define FALSE 0
97 : #endif
98 :
99 : #define OCE_MBX_TIMEOUT 5
100 :
101 : #define OCE_MAX_PAYLOAD 65536
102 :
103 : #define OCE_TX_RING_SIZE 512
104 : #define OCE_RX_RING_SIZE 1024
105 :
106 : /* This should be powers of 2. Like 2,4,8 & 16 */
107 : #define OCE_MAX_RSS 4 /* TODO: 8 */
108 : #define OCE_MAX_RQ OCE_MAX_RSS + 1 /* one default queue */
109 : #define OCE_MAX_WQ 8
110 :
111 : #define OCE_MAX_EQ 32
112 : #define OCE_MAX_CQ OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
113 : #define OCE_MAX_CQ_EQ 8 /* Max CQ that can attached to an EQ */
114 :
115 : #define OCE_DEFAULT_EQD 80
116 :
117 : #define OCE_MIN_MTU 256
118 : #define OCE_MAX_MTU 9000
119 :
120 : #define OCE_MAX_RQ_COMPL 64
121 : #define OCE_MAX_RQ_POSTS 255
122 : #define OCE_RX_BUF_SIZE 2048
123 :
124 : #define OCE_MAX_TX_ELEMENTS 29
125 : #define OCE_MAX_TX_DESC 1024
126 : #define OCE_MAX_TX_SIZE 65535
127 :
128 : #define OCE_MEM_KVA(_m) ((void *)((_m)->vaddr))
129 : #define OCE_MEM_DVA(_m) ((_m)->paddr)
130 :
131 : #define OCE_WQ_FOREACH(sc, wq, i) \
132 : for (i = 0, wq = sc->sc_wq[0]; i < sc->sc_nwq; i++, wq = sc->sc_wq[i])
133 : #define OCE_RQ_FOREACH(sc, rq, i) \
134 : for (i = 0, rq = sc->sc_rq[0]; i < sc->sc_nrq; i++, rq = sc->sc_rq[i])
135 : #define OCE_EQ_FOREACH(sc, eq, i) \
136 : for (i = 0, eq = sc->sc_eq[0]; i < sc->sc_neq; i++, eq = sc->sc_eq[i])
137 : #define OCE_CQ_FOREACH(sc, cq, i) \
138 : for (i = 0, cq = sc->sc_cq[0]; i < sc->sc_ncq; i++, cq = sc->sc_cq[i])
139 : #define OCE_RING_FOREACH(_r, _v, _c) \
140 : for ((_v) = oce_ring_first(_r); _c; (_v) = oce_ring_next(_r))
141 :
142 : static inline int
143 0 : ilog2(unsigned int v)
144 : {
145 : int r = 0;
146 :
147 0 : while (v >>= 1)
148 0 : r++;
149 0 : return (r);
150 : }
151 :
152 : struct oce_pkt {
153 : struct mbuf * mbuf;
154 : bus_dmamap_t map;
155 : int nsegs;
156 : SIMPLEQ_ENTRY(oce_pkt) entry;
157 : };
158 : SIMPLEQ_HEAD(oce_pkt_list, oce_pkt);
159 :
160 : struct oce_dma_mem {
161 : bus_dma_tag_t tag;
162 : bus_dmamap_t map;
163 : bus_dma_segment_t segs;
164 : int nsegs;
165 : bus_size_t size;
166 : caddr_t vaddr;
167 : bus_addr_t paddr;
168 : };
169 :
170 : struct oce_ring {
171 : int index;
172 : int nitems;
173 : int nused;
174 : int isize;
175 : struct oce_dma_mem dma;
176 : };
177 :
178 : struct oce_softc;
179 :
180 : enum cq_len {
181 : CQ_LEN_256 = 256,
182 : CQ_LEN_512 = 512,
183 : CQ_LEN_1024 = 1024
184 : };
185 :
186 : enum eq_len {
187 : EQ_LEN_256 = 256,
188 : EQ_LEN_512 = 512,
189 : EQ_LEN_1024 = 1024,
190 : EQ_LEN_2048 = 2048,
191 : EQ_LEN_4096 = 4096
192 : };
193 :
194 : enum eqe_size {
195 : EQE_SIZE_4 = 4,
196 : EQE_SIZE_16 = 16
197 : };
198 :
199 : enum qtype {
200 : QTYPE_EQ,
201 : QTYPE_MQ,
202 : QTYPE_WQ,
203 : QTYPE_RQ,
204 : QTYPE_CQ,
205 : QTYPE_RSS
206 : };
207 :
208 : struct oce_eq {
209 : struct oce_softc * sc;
210 : struct oce_ring * ring;
211 : enum qtype type;
212 : int id;
213 :
214 : struct oce_cq * cq[OCE_MAX_CQ_EQ];
215 : int cq_valid;
216 :
217 : int nitems;
218 : int isize;
219 : int delay;
220 : };
221 :
222 : struct oce_cq {
223 : struct oce_softc * sc;
224 : struct oce_ring * ring;
225 : enum qtype type;
226 : int id;
227 :
228 : struct oce_eq * eq;
229 :
230 : void (*cq_intr)(void *);
231 : void * cb_arg;
232 :
233 : int nitems;
234 : int nodelay;
235 : int eventable;
236 : int ncoalesce;
237 : };
238 :
239 : struct oce_mq {
240 : struct oce_softc * sc;
241 : struct oce_ring * ring;
242 : enum qtype type;
243 : int id;
244 :
245 : struct oce_cq * cq;
246 :
247 : int nitems;
248 : };
249 :
250 : struct oce_wq {
251 : struct oce_softc * sc;
252 : struct oce_ring * ring;
253 : enum qtype type;
254 : int id;
255 :
256 : struct oce_cq * cq;
257 :
258 : struct oce_pkt_list pkt_list;
259 : struct oce_pkt_list pkt_free;
260 :
261 : int nitems;
262 : };
263 :
264 : struct oce_rq {
265 : struct oce_softc * sc;
266 : struct oce_ring * ring;
267 : enum qtype type;
268 : int id;
269 :
270 : struct oce_cq * cq;
271 :
272 : struct if_rxring rxring;
273 : struct oce_pkt_list pkt_list;
274 : struct oce_pkt_list pkt_free;
275 :
276 : uint32_t rss_cpuid;
277 :
278 : #ifdef OCE_LRO
279 : struct lro_ctrl lro;
280 : int lro_pkts_queued;
281 : #endif
282 :
283 : int nitems;
284 : int fragsize;
285 : int mtu;
286 : int rss;
287 : };
288 :
289 : struct oce_softc {
290 : struct device sc_dev;
291 :
292 : uint sc_flags;
293 : #define OCE_F_BE2 0x00000001
294 : #define OCE_F_BE3 0x00000002
295 : #define OCE_F_XE201 0x00000008
296 : #define OCE_F_BE3_NATIVE 0x00000100
297 : #define OCE_F_RESET_RQD 0x00001000
298 : #define OCE_F_MBOX_ENDIAN_RQD 0x00002000
299 :
300 : bus_dma_tag_t sc_dmat;
301 :
302 : bus_space_tag_t sc_cfg_iot;
303 : bus_space_handle_t sc_cfg_ioh;
304 : bus_size_t sc_cfg_size;
305 :
306 : bus_space_tag_t sc_csr_iot;
307 : bus_space_handle_t sc_csr_ioh;
308 : bus_size_t sc_csr_size;
309 :
310 : bus_space_tag_t sc_db_iot;
311 : bus_space_handle_t sc_db_ioh;
312 : bus_size_t sc_db_size;
313 :
314 : void * sc_ih;
315 :
316 : struct arpcom sc_ac;
317 : struct ifmedia sc_media;
318 : ushort sc_link_up;
319 : ushort sc_link_speed;
320 : uint64_t sc_fc;
321 :
322 : struct oce_dma_mem sc_mbx;
323 : struct oce_dma_mem sc_pld;
324 :
325 : uint sc_port;
326 : uint sc_fmode;
327 :
328 : struct oce_wq * sc_wq[OCE_MAX_WQ]; /* TX work queues */
329 : struct oce_rq * sc_rq[OCE_MAX_RQ]; /* RX work queues */
330 : struct oce_cq * sc_cq[OCE_MAX_CQ]; /* Completion queues */
331 : struct oce_eq * sc_eq[OCE_MAX_EQ]; /* Event queues */
332 : struct oce_mq * sc_mq; /* Mailbox queue */
333 :
334 : ushort sc_neq;
335 : ushort sc_ncq;
336 : ushort sc_nrq;
337 : ushort sc_nwq;
338 : ushort sc_nintr;
339 :
340 : ushort sc_tx_ring_size;
341 : ushort sc_rx_ring_size;
342 : ushort sc_rss_enable;
343 :
344 : uint32_t sc_if_id; /* interface ID */
345 : uint32_t sc_pmac_id; /* PMAC id */
346 : char sc_macaddr[ETHER_ADDR_LEN];
347 :
348 : uint32_t sc_pvid;
349 :
350 : uint64_t sc_rx_errors;
351 : uint64_t sc_tx_errors;
352 :
353 : struct timeout sc_tick;
354 : struct timeout sc_rxrefill;
355 :
356 : void * sc_statcmd;
357 : };
358 :
359 : #define IS_BE(sc) ISSET((sc)->sc_flags, OCE_F_BE2 | OCE_F_BE3)
360 : #define IS_XE201(sc) ISSET((sc)->sc_flags, OCE_F_XE201)
361 :
362 : #define ADDR_HI(x) ((uint32_t)((uint64_t)(x) >> 32))
363 : #define ADDR_LO(x) ((uint32_t)((uint64_t)(x) & 0xffffffff))
364 :
365 : #define IF_LRO_ENABLED(ifp) ISSET((ifp)->if_capabilities, IFCAP_LRO)
366 :
367 : int oce_match(struct device *, void *, void *);
368 : void oce_attach(struct device *, struct device *, void *);
369 : int oce_pci_alloc(struct oce_softc *, struct pci_attach_args *);
370 : void oce_attachhook(struct device *);
371 : void oce_attach_ifp(struct oce_softc *);
372 : int oce_ioctl(struct ifnet *, u_long, caddr_t);
373 : int oce_rxrinfo(struct oce_softc *, struct if_rxrinfo *);
374 : void oce_iff(struct oce_softc *);
375 : void oce_link_status(struct oce_softc *);
376 : void oce_media_status(struct ifnet *, struct ifmediareq *);
377 : int oce_media_change(struct ifnet *);
378 : void oce_tick(void *);
379 : void oce_init(void *);
380 : void oce_stop(struct oce_softc *);
381 : void oce_watchdog(struct ifnet *);
382 : void oce_start(struct ifnet *);
383 : int oce_encap(struct oce_softc *, struct mbuf **, int wqidx);
384 : #ifdef OCE_TSO
385 : struct mbuf *
386 : oce_tso(struct oce_softc *, struct mbuf **);
387 : #endif
388 : int oce_intr(void *);
389 : void oce_intr_wq(void *);
390 : void oce_txeof(struct oce_wq *);
391 : void oce_intr_rq(void *);
392 : void oce_rxeof(struct oce_rq *, struct oce_nic_rx_cqe *);
393 : void oce_rxeoc(struct oce_rq *, struct oce_nic_rx_cqe *);
394 : int oce_vtp_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
395 : int oce_port_valid(struct oce_softc *, struct oce_nic_rx_cqe *);
396 : #ifdef OCE_LRO
397 : void oce_flush_lro(struct oce_rq *);
398 : int oce_init_lro(struct oce_softc *);
399 : void oce_free_lro(struct oce_softc *);
400 : #endif
401 : int oce_get_buf(struct oce_rq *);
402 : int oce_alloc_rx_bufs(struct oce_rq *);
403 : void oce_refill_rx(void *);
404 : void oce_free_posted_rxbuf(struct oce_rq *);
405 : void oce_intr_mq(void *);
406 : void oce_link_event(struct oce_softc *,
407 : struct oce_async_cqe_link_state *);
408 :
409 : int oce_init_queues(struct oce_softc *);
410 : void oce_release_queues(struct oce_softc *);
411 : struct oce_wq *oce_create_wq(struct oce_softc *, struct oce_eq *);
412 : void oce_drain_wq(struct oce_wq *);
413 : void oce_destroy_wq(struct oce_wq *);
414 : struct oce_rq *
415 : oce_create_rq(struct oce_softc *, struct oce_eq *, int rss);
416 : void oce_drain_rq(struct oce_rq *);
417 : void oce_destroy_rq(struct oce_rq *);
418 : struct oce_eq *
419 : oce_create_eq(struct oce_softc *);
420 : static inline void
421 : oce_arm_eq(struct oce_eq *, int neqe, int rearm, int clearint);
422 : void oce_drain_eq(struct oce_eq *);
423 : void oce_destroy_eq(struct oce_eq *);
424 : struct oce_mq *
425 : oce_create_mq(struct oce_softc *, struct oce_eq *);
426 : void oce_drain_mq(struct oce_mq *);
427 : void oce_destroy_mq(struct oce_mq *);
428 : struct oce_cq *
429 : oce_create_cq(struct oce_softc *, struct oce_eq *, int nitems,
430 : int isize, int eventable, int nodelay, int ncoalesce);
431 : static inline void
432 : oce_arm_cq(struct oce_cq *, int ncqe, int rearm);
433 : void oce_destroy_cq(struct oce_cq *);
434 :
435 : int oce_dma_alloc(struct oce_softc *, bus_size_t, struct oce_dma_mem *);
436 : void oce_dma_free(struct oce_softc *, struct oce_dma_mem *);
437 : #define oce_dma_sync(d, f) \
438 : bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
439 :
440 : struct oce_ring *
441 : oce_create_ring(struct oce_softc *, int nitems, int isize, int maxseg);
442 : void oce_destroy_ring(struct oce_softc *, struct oce_ring *);
443 : int oce_load_ring(struct oce_softc *, struct oce_ring *,
444 : struct oce_pa *, int max_segs);
445 : static inline void *
446 : oce_ring_get(struct oce_ring *);
447 : static inline void *
448 : oce_ring_first(struct oce_ring *);
449 : static inline void *
450 : oce_ring_next(struct oce_ring *);
451 : struct oce_pkt *
452 : oce_pkt_alloc(struct oce_softc *, size_t size, int nsegs,
453 : int maxsegsz);
454 : void oce_pkt_free(struct oce_softc *, struct oce_pkt *);
455 : static inline struct oce_pkt *
456 : oce_pkt_get(struct oce_pkt_list *);
457 : static inline void
458 : oce_pkt_put(struct oce_pkt_list *, struct oce_pkt *);
459 :
460 : int oce_init_fw(struct oce_softc *);
461 : int oce_mbox_init(struct oce_softc *);
462 : int oce_mbox_dispatch(struct oce_softc *);
463 : int oce_cmd(struct oce_softc *, int subsys, int opcode, int version,
464 : void *payload, int length);
465 : void oce_first_mcc(struct oce_softc *);
466 :
467 : int oce_get_fw_config(struct oce_softc *);
468 : int oce_check_native_mode(struct oce_softc *);
469 : int oce_create_iface(struct oce_softc *, uint8_t *macaddr);
470 : int oce_config_vlan(struct oce_softc *, struct normal_vlan *vtags,
471 : int nvtags, int untagged, int promisc);
472 : int oce_set_flow_control(struct oce_softc *, uint64_t);
473 : int oce_config_rss(struct oce_softc *, int enable);
474 : int oce_update_mcast(struct oce_softc *, uint8_t multi[][ETHER_ADDR_LEN],
475 : int naddr);
476 : int oce_set_promisc(struct oce_softc *, int enable);
477 : int oce_get_link_status(struct oce_softc *);
478 :
479 : void oce_macaddr_set(struct oce_softc *);
480 : int oce_macaddr_get(struct oce_softc *, uint8_t *macaddr);
481 : int oce_macaddr_add(struct oce_softc *, uint8_t *macaddr, uint32_t *pmac);
482 : int oce_macaddr_del(struct oce_softc *, uint32_t pmac);
483 :
484 : int oce_new_rq(struct oce_softc *, struct oce_rq *);
485 : int oce_new_wq(struct oce_softc *, struct oce_wq *);
486 : int oce_new_mq(struct oce_softc *, struct oce_mq *);
487 : int oce_new_eq(struct oce_softc *, struct oce_eq *);
488 : int oce_new_cq(struct oce_softc *, struct oce_cq *);
489 :
490 : int oce_init_stats(struct oce_softc *);
491 : int oce_update_stats(struct oce_softc *);
492 : int oce_stats_be2(struct oce_softc *, uint64_t *, uint64_t *);
493 : int oce_stats_be3(struct oce_softc *, uint64_t *, uint64_t *);
494 : int oce_stats_xe(struct oce_softc *, uint64_t *, uint64_t *);
495 :
496 : struct pool *oce_pkt_pool;
497 :
498 : struct cfdriver oce_cd = {
499 : NULL, "oce", DV_IFNET
500 : };
501 :
502 : struct cfattach oce_ca = {
503 : sizeof(struct oce_softc), oce_match, oce_attach, NULL, NULL
504 : };
505 :
506 : const struct pci_matchid oce_devices[] = {
507 : { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
508 : { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
509 : { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
510 : { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
511 : { PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
512 : };
513 :
514 : int
515 0 : oce_match(struct device *parent, void *match, void *aux)
516 : {
517 0 : return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
518 : }
519 :
520 : void
521 0 : oce_attach(struct device *parent, struct device *self, void *aux)
522 : {
523 0 : struct pci_attach_args *pa = (struct pci_attach_args *)aux;
524 0 : struct oce_softc *sc = (struct oce_softc *)self;
525 : const char *intrstr = NULL;
526 0 : pci_intr_handle_t ih;
527 :
528 0 : switch (PCI_PRODUCT(pa->pa_id)) {
529 : case PCI_PRODUCT_SERVERENGINES_BE2:
530 : case PCI_PRODUCT_SERVERENGINES_OCBE2:
531 0 : SET(sc->sc_flags, OCE_F_BE2);
532 0 : break;
533 : case PCI_PRODUCT_SERVERENGINES_BE3:
534 : case PCI_PRODUCT_SERVERENGINES_OCBE3:
535 0 : SET(sc->sc_flags, OCE_F_BE3);
536 0 : break;
537 : case PCI_PRODUCT_EMULEX_XE201:
538 0 : SET(sc->sc_flags, OCE_F_XE201);
539 0 : break;
540 : }
541 :
542 0 : sc->sc_dmat = pa->pa_dmat;
543 0 : if (oce_pci_alloc(sc, pa))
544 0 : return;
545 :
546 0 : sc->sc_tx_ring_size = OCE_TX_RING_SIZE;
547 0 : sc->sc_rx_ring_size = OCE_RX_RING_SIZE;
548 :
549 : /* create the bootstrap mailbox */
550 0 : if (oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->sc_mbx)) {
551 0 : printf(": failed to allocate mailbox memory\n");
552 0 : return;
553 : }
554 0 : if (oce_dma_alloc(sc, OCE_MAX_PAYLOAD, &sc->sc_pld)) {
555 0 : printf(": failed to allocate payload memory\n");
556 0 : goto fail_1;
557 : }
558 :
559 0 : if (oce_init_fw(sc))
560 : goto fail_2;
561 :
562 0 : if (oce_mbox_init(sc)) {
563 0 : printf(": failed to initialize mailbox\n");
564 0 : goto fail_2;
565 : }
566 :
567 0 : if (oce_get_fw_config(sc)) {
568 0 : printf(": failed to get firmware configuration\n");
569 0 : goto fail_2;
570 : }
571 :
572 0 : if (ISSET(sc->sc_flags, OCE_F_BE3)) {
573 0 : if (oce_check_native_mode(sc))
574 : goto fail_2;
575 : }
576 :
577 0 : if (oce_macaddr_get(sc, sc->sc_macaddr)) {
578 0 : printf(": failed to fetch MAC address\n");
579 0 : goto fail_2;
580 : }
581 0 : memcpy(sc->sc_ac.ac_enaddr, sc->sc_macaddr, ETHER_ADDR_LEN);
582 :
583 0 : if (oce_pkt_pool == NULL) {
584 0 : oce_pkt_pool = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
585 0 : if (oce_pkt_pool == NULL) {
586 0 : printf(": unable to allocate descriptor pool\n");
587 0 : goto fail_2;
588 : }
589 0 : pool_init(oce_pkt_pool, sizeof(struct oce_pkt), 0, IPL_NET,
590 : 0, "ocepkts", NULL);
591 0 : }
592 :
593 : /* We allocate a single interrupt resource */
594 0 : sc->sc_nintr = 1;
595 0 : if (pci_intr_map_msi(pa, &ih) != 0 &&
596 0 : pci_intr_map(pa, &ih) != 0) {
597 0 : printf(": couldn't map interrupt\n");
598 0 : goto fail_2;
599 : }
600 :
601 0 : intrstr = pci_intr_string(pa->pa_pc, ih);
602 0 : sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
603 0 : sc->sc_dev.dv_xname);
604 0 : if (sc->sc_ih == NULL) {
605 0 : printf(": couldn't establish interrupt\n");
606 0 : if (intrstr != NULL)
607 0 : printf(" at %s", intrstr);
608 0 : printf("\n");
609 0 : goto fail_2;
610 : }
611 0 : printf(": %s", intrstr);
612 :
613 0 : if (oce_init_stats(sc))
614 : goto fail_3;
615 :
616 0 : if (oce_init_queues(sc))
617 : goto fail_3;
618 :
619 0 : oce_attach_ifp(sc);
620 :
621 : #ifdef OCE_LRO
622 : if (oce_init_lro(sc))
623 : goto fail_4;
624 : #endif
625 :
626 0 : timeout_set(&sc->sc_tick, oce_tick, sc);
627 0 : timeout_set(&sc->sc_rxrefill, oce_refill_rx, sc);
628 :
629 0 : config_mountroot(self, oce_attachhook);
630 :
631 0 : printf(", address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
632 :
633 0 : return;
634 :
635 : #ifdef OCE_LRO
636 : fail_4:
637 : oce_free_lro(sc);
638 : ether_ifdetach(&sc->sc_ac.ac_if);
639 : if_detach(&sc->sc_ac.ac_if);
640 : oce_release_queues(sc);
641 : #endif
642 : fail_3:
643 0 : pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
644 : fail_2:
645 0 : oce_dma_free(sc, &sc->sc_pld);
646 : fail_1:
647 0 : oce_dma_free(sc, &sc->sc_mbx);
648 0 : }
649 :
650 : int
651 0 : oce_pci_alloc(struct oce_softc *sc, struct pci_attach_args *pa)
652 : {
653 : pcireg_t memtype, reg;
654 :
655 : /* setup the device config region */
656 0 : if (ISSET(sc->sc_flags, OCE_F_BE2))
657 0 : reg = OCE_BAR_CFG_BE2;
658 : else
659 : reg = OCE_BAR_CFG;
660 :
661 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
662 0 : if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_cfg_iot,
663 0 : &sc->sc_cfg_ioh, NULL, &sc->sc_cfg_size,
664 0 : IS_BE(sc) ? 0 : 32768)) {
665 0 : printf(": can't find cfg mem space\n");
666 0 : return (ENXIO);
667 : }
668 :
669 : /*
670 : * Read the SLI_INTF register and determine whether we
671 : * can use this port and its features
672 : */
673 0 : reg = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
674 0 : if (OCE_SLI_SIGNATURE(reg) != OCE_INTF_VALID_SIG) {
675 0 : printf(": invalid signature\n");
676 0 : goto fail_1;
677 : }
678 0 : if (OCE_SLI_REVISION(reg) != OCE_INTF_SLI_REV4) {
679 0 : printf(": unsupported SLI revision\n");
680 0 : goto fail_1;
681 : }
682 0 : if (OCE_SLI_IFTYPE(reg) == OCE_INTF_IF_TYPE_1)
683 0 : SET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD);
684 0 : if (OCE_SLI_HINT1(reg) == OCE_INTF_FUNC_RESET_REQD)
685 0 : SET(sc->sc_flags, OCE_F_RESET_RQD);
686 :
687 : /* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
688 0 : if (IS_BE(sc)) {
689 : /* set up CSR region */
690 : reg = OCE_BAR_CSR;
691 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
692 0 : if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_csr_iot,
693 0 : &sc->sc_csr_ioh, NULL, &sc->sc_csr_size, 0)) {
694 0 : printf(": can't find csr mem space\n");
695 0 : goto fail_1;
696 : }
697 :
698 : /* set up DB doorbell region */
699 : reg = OCE_BAR_DB;
700 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
701 0 : if (pci_mapreg_map(pa, reg, memtype, 0, &sc->sc_db_iot,
702 0 : &sc->sc_db_ioh, NULL, &sc->sc_db_size, 0)) {
703 0 : printf(": can't find csr mem space\n");
704 : goto fail_2;
705 : }
706 : } else {
707 0 : sc->sc_csr_iot = sc->sc_db_iot = sc->sc_cfg_iot;
708 0 : sc->sc_csr_ioh = sc->sc_db_ioh = sc->sc_cfg_ioh;
709 : }
710 :
711 0 : return (0);
712 :
713 : fail_2:
714 0 : bus_space_unmap(sc->sc_csr_iot, sc->sc_csr_ioh, sc->sc_csr_size);
715 : fail_1:
716 0 : bus_space_unmap(sc->sc_cfg_iot, sc->sc_cfg_ioh, sc->sc_cfg_size);
717 0 : return (ENXIO);
718 0 : }
719 :
720 : static inline uint32_t
721 0 : oce_read_cfg(struct oce_softc *sc, bus_size_t off)
722 : {
723 0 : bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
724 : BUS_SPACE_BARRIER_READ);
725 0 : return (bus_space_read_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off));
726 : }
727 :
728 : static inline uint32_t
729 0 : oce_read_csr(struct oce_softc *sc, bus_size_t off)
730 : {
731 0 : bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
732 : BUS_SPACE_BARRIER_READ);
733 0 : return (bus_space_read_4(sc->sc_csr_iot, sc->sc_csr_ioh, off));
734 : }
735 :
736 : static inline uint32_t
737 0 : oce_read_db(struct oce_softc *sc, bus_size_t off)
738 : {
739 0 : bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
740 : BUS_SPACE_BARRIER_READ);
741 0 : return (bus_space_read_4(sc->sc_db_iot, sc->sc_db_ioh, off));
742 : }
743 :
744 : static inline void
745 0 : oce_write_cfg(struct oce_softc *sc, bus_size_t off, uint32_t val)
746 : {
747 0 : bus_space_write_4(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, val);
748 0 : bus_space_barrier(sc->sc_cfg_iot, sc->sc_cfg_ioh, off, 4,
749 : BUS_SPACE_BARRIER_WRITE);
750 0 : }
751 :
752 : static inline void
753 0 : oce_write_csr(struct oce_softc *sc, bus_size_t off, uint32_t val)
754 : {
755 0 : bus_space_write_4(sc->sc_csr_iot, sc->sc_csr_ioh, off, val);
756 0 : bus_space_barrier(sc->sc_csr_iot, sc->sc_csr_ioh, off, 4,
757 : BUS_SPACE_BARRIER_WRITE);
758 0 : }
759 :
760 : static inline void
761 0 : oce_write_db(struct oce_softc *sc, bus_size_t off, uint32_t val)
762 : {
763 0 : bus_space_write_4(sc->sc_db_iot, sc->sc_db_ioh, off, val);
764 0 : bus_space_barrier(sc->sc_db_iot, sc->sc_db_ioh, off, 4,
765 : BUS_SPACE_BARRIER_WRITE);
766 0 : }
767 :
768 : static inline void
769 0 : oce_intr_enable(struct oce_softc *sc)
770 : {
771 : uint32_t reg;
772 :
773 0 : reg = oce_read_cfg(sc, PCI_INTR_CTRL);
774 0 : oce_write_cfg(sc, PCI_INTR_CTRL, reg | HOSTINTR_MASK);
775 0 : }
776 :
777 : static inline void
778 0 : oce_intr_disable(struct oce_softc *sc)
779 : {
780 : uint32_t reg;
781 :
782 0 : reg = oce_read_cfg(sc, PCI_INTR_CTRL);
783 0 : oce_write_cfg(sc, PCI_INTR_CTRL, reg & ~HOSTINTR_MASK);
784 0 : }
785 :
786 : void
787 0 : oce_attachhook(struct device *self)
788 : {
789 0 : struct oce_softc *sc = (struct oce_softc *)self;
790 :
791 0 : oce_get_link_status(sc);
792 :
793 0 : oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
794 :
795 : /*
796 : * We need to get MCC async events. So enable intrs and arm
797 : * first EQ, Other EQs will be armed after interface is UP
798 : */
799 0 : oce_intr_enable(sc);
800 0 : oce_arm_eq(sc->sc_eq[0], 0, TRUE, FALSE);
801 :
802 : /*
803 : * Send first mcc cmd and after that we get gracious
804 : * MCC notifications from FW
805 : */
806 0 : oce_first_mcc(sc);
807 0 : }
808 :
809 : void
810 0 : oce_attach_ifp(struct oce_softc *sc)
811 : {
812 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
813 :
814 0 : ifmedia_init(&sc->sc_media, IFM_IMASK, oce_media_change,
815 : oce_media_status);
816 0 : ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
817 0 : ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
818 :
819 0 : strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
820 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 0 : ifp->if_ioctl = oce_ioctl;
822 0 : ifp->if_start = oce_start;
823 0 : ifp->if_watchdog = oce_watchdog;
824 0 : ifp->if_hardmtu = OCE_MAX_MTU;
825 0 : ifp->if_softc = sc;
826 0 : IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_size - 1);
827 :
828 0 : ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
829 : IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
830 :
831 : #if NVLAN > 0
832 0 : ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
833 : #endif
834 :
835 : #ifdef OCE_TSO
836 : ifp->if_capabilities |= IFCAP_TSO;
837 : ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
838 : #endif
839 : #ifdef OCE_LRO
840 : ifp->if_capabilities |= IFCAP_LRO;
841 : #endif
842 :
843 0 : if_attach(ifp);
844 0 : ether_ifattach(ifp);
845 0 : }
846 :
847 : int
848 0 : oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
849 : {
850 0 : struct oce_softc *sc = ifp->if_softc;
851 0 : struct ifreq *ifr = (struct ifreq *)data;
852 : int s, error = 0;
853 :
854 0 : s = splnet();
855 :
856 0 : switch (command) {
857 : case SIOCSIFADDR:
858 0 : ifp->if_flags |= IFF_UP;
859 0 : if (!(ifp->if_flags & IFF_RUNNING))
860 0 : oce_init(sc);
861 : break;
862 : case SIOCSIFFLAGS:
863 0 : if (ifp->if_flags & IFF_UP) {
864 0 : if (ifp->if_flags & IFF_RUNNING)
865 0 : error = ENETRESET;
866 : else
867 0 : oce_init(sc);
868 : } else {
869 0 : if (ifp->if_flags & IFF_RUNNING)
870 0 : oce_stop(sc);
871 : }
872 : break;
873 : case SIOCGIFMEDIA:
874 : case SIOCSIFMEDIA:
875 0 : error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
876 0 : break;
877 : case SIOCGIFRXR:
878 0 : error = oce_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
879 0 : break;
880 : default:
881 0 : error = ether_ioctl(ifp, &sc->sc_ac, command, data);
882 0 : break;
883 : }
884 :
885 0 : if (error == ENETRESET) {
886 0 : if (ifp->if_flags & IFF_RUNNING)
887 0 : oce_iff(sc);
888 : error = 0;
889 0 : }
890 :
891 0 : splx(s);
892 :
893 0 : return (error);
894 : }
895 :
896 : int
897 0 : oce_rxrinfo(struct oce_softc *sc, struct if_rxrinfo *ifri)
898 : {
899 0 : struct if_rxring_info *ifr, ifr1;
900 : struct oce_rq *rq;
901 : int error, i;
902 : u_int n = 0;
903 :
904 0 : if (sc->sc_nrq > 1) {
905 0 : if ((ifr = mallocarray(sc->sc_nrq, sizeof(*ifr), M_DEVBUF,
906 0 : M_WAITOK | M_ZERO)) == NULL)
907 0 : return (ENOMEM);
908 : } else
909 : ifr = &ifr1;
910 :
911 0 : OCE_RQ_FOREACH(sc, rq, i) {
912 0 : ifr[n].ifr_size = MCLBYTES;
913 0 : snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
914 0 : ifr[n].ifr_info = rq->rxring;
915 0 : n++;
916 : }
917 :
918 0 : error = if_rxr_info_ioctl(ifri, sc->sc_nrq, ifr);
919 :
920 0 : if (sc->sc_nrq > 1)
921 0 : free(ifr, M_DEVBUF, sc->sc_nrq * sizeof(*ifr));
922 0 : return (error);
923 0 : }
924 :
925 :
926 : void
927 0 : oce_iff(struct oce_softc *sc)
928 : {
929 0 : uint8_t multi[OCE_MAX_MC_FILTER_SIZE][ETHER_ADDR_LEN];
930 0 : struct arpcom *ac = &sc->sc_ac;
931 0 : struct ifnet *ifp = &ac->ac_if;
932 : struct ether_multi *enm;
933 : struct ether_multistep step;
934 : int naddr = 0, promisc = 0;
935 :
936 0 : ifp->if_flags &= ~IFF_ALLMULTI;
937 :
938 0 : if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
939 0 : ac->ac_multicnt >= OCE_MAX_MC_FILTER_SIZE) {
940 0 : ifp->if_flags |= IFF_ALLMULTI;
941 : promisc = 1;
942 0 : } else {
943 0 : ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
944 0 : while (enm != NULL) {
945 0 : memcpy(multi[naddr++], enm->enm_addrlo, ETHER_ADDR_LEN);
946 0 : ETHER_NEXT_MULTI(step, enm);
947 : }
948 0 : oce_update_mcast(sc, multi, naddr);
949 : }
950 :
951 0 : oce_set_promisc(sc, promisc);
952 0 : }
953 :
954 : void
955 0 : oce_link_status(struct oce_softc *sc)
956 : {
957 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
958 : int link_state = LINK_STATE_DOWN;
959 :
960 0 : ifp->if_baudrate = 0;
961 0 : if (sc->sc_link_up) {
962 : link_state = LINK_STATE_FULL_DUPLEX;
963 :
964 0 : switch (sc->sc_link_speed) {
965 : case 1:
966 0 : ifp->if_baudrate = IF_Mbps(10);
967 0 : break;
968 : case 2:
969 0 : ifp->if_baudrate = IF_Mbps(100);
970 0 : break;
971 : case 3:
972 0 : ifp->if_baudrate = IF_Gbps(1);
973 0 : break;
974 : case 4:
975 0 : ifp->if_baudrate = IF_Gbps(10);
976 0 : break;
977 : }
978 : }
979 0 : if (ifp->if_link_state != link_state) {
980 0 : ifp->if_link_state = link_state;
981 0 : if_link_state_change(ifp);
982 0 : }
983 0 : }
984 :
985 : void
986 0 : oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
987 : {
988 0 : struct oce_softc *sc = ifp->if_softc;
989 :
990 0 : ifmr->ifm_status = IFM_AVALID;
991 0 : ifmr->ifm_active = IFM_ETHER;
992 :
993 0 : if (oce_get_link_status(sc) == 0)
994 0 : oce_link_status(sc);
995 :
996 0 : if (!sc->sc_link_up) {
997 0 : ifmr->ifm_active |= IFM_NONE;
998 0 : return;
999 : }
1000 :
1001 0 : ifmr->ifm_status |= IFM_ACTIVE;
1002 :
1003 0 : switch (sc->sc_link_speed) {
1004 : case 1: /* 10 Mbps */
1005 0 : ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1006 0 : break;
1007 : case 2: /* 100 Mbps */
1008 0 : ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1009 0 : break;
1010 : case 3: /* 1 Gbps */
1011 0 : ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1012 0 : break;
1013 : case 4: /* 10 Gbps */
1014 0 : ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1015 0 : break;
1016 : }
1017 :
1018 0 : if (sc->sc_fc & IFM_ETH_RXPAUSE)
1019 0 : ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1020 0 : if (sc->sc_fc & IFM_ETH_TXPAUSE)
1021 0 : ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1022 0 : }
1023 :
1024 : int
1025 0 : oce_media_change(struct ifnet *ifp)
1026 : {
1027 0 : return (0);
1028 : }
1029 :
1030 : void
1031 0 : oce_tick(void *arg)
1032 : {
1033 0 : struct oce_softc *sc = arg;
1034 : int s;
1035 :
1036 0 : s = splnet();
1037 :
1038 0 : if (oce_update_stats(sc) == 0)
1039 0 : timeout_add_sec(&sc->sc_tick, 1);
1040 :
1041 0 : splx(s);
1042 0 : }
1043 :
1044 : void
1045 0 : oce_init(void *arg)
1046 : {
1047 0 : struct oce_softc *sc = arg;
1048 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1049 : struct oce_eq *eq;
1050 : struct oce_rq *rq;
1051 : struct oce_wq *wq;
1052 : int i;
1053 :
1054 0 : oce_stop(sc);
1055 :
1056 0 : DELAY(10);
1057 :
1058 0 : oce_macaddr_set(sc);
1059 :
1060 0 : oce_iff(sc);
1061 :
1062 : /* Enable VLAN promiscuous mode */
1063 0 : if (oce_config_vlan(sc, NULL, 0, 1, 1))
1064 : goto error;
1065 :
1066 0 : if (oce_set_flow_control(sc, IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE))
1067 : goto error;
1068 :
1069 0 : OCE_RQ_FOREACH(sc, rq, i) {
1070 0 : rq->mtu = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1071 : ETHER_VLAN_ENCAP_LEN;
1072 0 : if (oce_new_rq(sc, rq)) {
1073 0 : printf("%s: failed to create rq\n",
1074 0 : sc->sc_dev.dv_xname);
1075 0 : goto error;
1076 : }
1077 0 : rq->ring->index = 0;
1078 :
1079 : /* oce splits jumbos into 2k chunks... */
1080 0 : if_rxr_init(&rq->rxring, 8, rq->nitems);
1081 :
1082 0 : if (!oce_alloc_rx_bufs(rq)) {
1083 0 : printf("%s: failed to allocate rx buffers\n",
1084 0 : sc->sc_dev.dv_xname);
1085 0 : goto error;
1086 : }
1087 : }
1088 :
1089 : #ifdef OCE_RSS
1090 : /* RSS config */
1091 : if (sc->sc_rss_enable) {
1092 : if (oce_config_rss(sc, (uint8_t)sc->sc_if_id, 1)) {
1093 : printf("%s: failed to configure RSS\n",
1094 : sc->sc_dev.dv_xname);
1095 : goto error;
1096 : }
1097 : }
1098 : #endif
1099 :
1100 0 : OCE_RQ_FOREACH(sc, rq, i)
1101 0 : oce_arm_cq(rq->cq, 0, TRUE);
1102 :
1103 0 : OCE_WQ_FOREACH(sc, wq, i)
1104 0 : oce_arm_cq(wq->cq, 0, TRUE);
1105 :
1106 0 : oce_arm_cq(sc->sc_mq->cq, 0, TRUE);
1107 :
1108 0 : OCE_EQ_FOREACH(sc, eq, i)
1109 0 : oce_arm_eq(eq, 0, TRUE, FALSE);
1110 :
1111 0 : if (oce_get_link_status(sc) == 0)
1112 0 : oce_link_status(sc);
1113 :
1114 0 : ifp->if_flags |= IFF_RUNNING;
1115 0 : ifq_clr_oactive(&ifp->if_snd);
1116 :
1117 0 : timeout_add_sec(&sc->sc_tick, 1);
1118 :
1119 0 : oce_intr_enable(sc);
1120 :
1121 0 : return;
1122 : error:
1123 0 : oce_stop(sc);
1124 0 : }
1125 :
1126 : void
1127 0 : oce_stop(struct oce_softc *sc)
1128 : {
1129 0 : struct mbx_delete_nic_rq cmd;
1130 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1131 : struct oce_rq *rq;
1132 : struct oce_wq *wq;
1133 : struct oce_eq *eq;
1134 : int i;
1135 :
1136 0 : timeout_del(&sc->sc_tick);
1137 0 : timeout_del(&sc->sc_rxrefill);
1138 :
1139 0 : ifp->if_flags &= ~IFF_RUNNING;
1140 0 : ifq_clr_oactive(&ifp->if_snd);
1141 :
1142 : /* Stop intrs and finish any bottom halves pending */
1143 0 : oce_intr_disable(sc);
1144 :
1145 : /* Invalidate any pending cq and eq entries */
1146 0 : OCE_EQ_FOREACH(sc, eq, i)
1147 0 : oce_drain_eq(eq);
1148 0 : OCE_RQ_FOREACH(sc, rq, i) {
1149 : /* destroy the work queue in the firmware */
1150 0 : memset(&cmd, 0, sizeof(cmd));
1151 0 : cmd.params.req.rq_id = htole16(rq->id);
1152 0 : oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ,
1153 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
1154 0 : DELAY(1000);
1155 0 : oce_drain_rq(rq);
1156 0 : oce_free_posted_rxbuf(rq);
1157 : }
1158 0 : OCE_WQ_FOREACH(sc, wq, i)
1159 0 : oce_drain_wq(wq);
1160 0 : }
1161 :
1162 : void
1163 0 : oce_watchdog(struct ifnet *ifp)
1164 : {
1165 0 : printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
1166 :
1167 0 : oce_init(ifp->if_softc);
1168 :
1169 0 : ifp->if_oerrors++;
1170 0 : }
1171 :
1172 : void
1173 0 : oce_start(struct ifnet *ifp)
1174 : {
1175 0 : struct oce_softc *sc = ifp->if_softc;
1176 0 : struct mbuf *m;
1177 : int pkts = 0;
1178 :
1179 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1180 0 : return;
1181 :
1182 0 : for (;;) {
1183 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
1184 0 : if (m == NULL)
1185 : break;
1186 :
1187 0 : if (oce_encap(sc, &m, 0)) {
1188 0 : ifq_set_oactive(&ifp->if_snd);
1189 0 : break;
1190 : }
1191 :
1192 : #if NBPFILTER > 0
1193 0 : if (ifp->if_bpf)
1194 0 : bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1195 : #endif
1196 0 : pkts++;
1197 : }
1198 :
1199 : /* Set a timeout in case the chip goes out to lunch */
1200 0 : if (pkts)
1201 0 : ifp->if_timer = 5;
1202 0 : }
1203 :
1204 : int
1205 0 : oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wqidx)
1206 : {
1207 0 : struct mbuf *m = *mpp;
1208 0 : struct oce_wq *wq = sc->sc_wq[wqidx];
1209 : struct oce_pkt *pkt = NULL;
1210 : struct oce_nic_hdr_wqe *nhe;
1211 : struct oce_nic_frag_wqe *nfe;
1212 : int i, nwqe, err;
1213 :
1214 : #ifdef OCE_TSO
1215 : if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1216 : /* consolidate packet buffers for TSO/LSO segment offload */
1217 : m = oce_tso(sc, mpp);
1218 : if (m == NULL)
1219 : goto error;
1220 : }
1221 : #endif
1222 :
1223 0 : if ((pkt = oce_pkt_get(&wq->pkt_free)) == NULL)
1224 : goto error;
1225 :
1226 0 : err = bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m, BUS_DMA_NOWAIT);
1227 0 : if (err == EFBIG) {
1228 0 : if (m_defrag(m, M_DONTWAIT) ||
1229 0 : bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, m,
1230 : BUS_DMA_NOWAIT))
1231 : goto error;
1232 0 : *mpp = m;
1233 0 : } else if (err != 0)
1234 : goto error;
1235 :
1236 0 : pkt->nsegs = pkt->map->dm_nsegs;
1237 :
1238 0 : nwqe = pkt->nsegs + 1;
1239 0 : if (IS_BE(sc)) {
1240 : /* BE2 and BE3 require even number of WQEs */
1241 0 : if (nwqe & 1)
1242 0 : nwqe++;
1243 : }
1244 :
1245 : /* Fail if there's not enough free WQEs */
1246 0 : if (nwqe >= wq->ring->nitems - wq->ring->nused) {
1247 0 : bus_dmamap_unload(sc->sc_dmat, pkt->map);
1248 0 : goto error;
1249 : }
1250 :
1251 0 : bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1252 : BUS_DMASYNC_PREWRITE);
1253 0 : pkt->mbuf = m;
1254 :
1255 : /* TX work queue entry for the header */
1256 0 : nhe = oce_ring_get(wq->ring);
1257 0 : memset(nhe, 0, sizeof(*nhe));
1258 :
1259 0 : nhe->u0.s.complete = 1;
1260 0 : nhe->u0.s.event = 1;
1261 0 : nhe->u0.s.crc = 1;
1262 0 : nhe->u0.s.forward = 0;
1263 0 : nhe->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
1264 0 : nhe->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
1265 0 : nhe->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
1266 0 : nhe->u0.s.num_wqe = nwqe;
1267 0 : nhe->u0.s.total_length = m->m_pkthdr.len;
1268 :
1269 : #if NVLAN > 0
1270 0 : if (m->m_flags & M_VLANTAG) {
1271 0 : nhe->u0.s.vlan = 1; /* Vlan present */
1272 0 : nhe->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1273 0 : }
1274 : #endif
1275 :
1276 : #ifdef OCE_TSO
1277 : if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1278 : if (m->m_pkthdr.tso_segsz) {
1279 : nhe->u0.s.lso = 1;
1280 : nhe->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
1281 : }
1282 : if (!IS_BE(sc))
1283 : nhe->u0.s.ipcs = 1;
1284 : }
1285 : #endif
1286 :
1287 0 : oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
1288 : BUS_DMASYNC_PREWRITE);
1289 :
1290 0 : wq->ring->nused++;
1291 :
1292 : /* TX work queue entries for data chunks */
1293 0 : for (i = 0; i < pkt->nsegs; i++) {
1294 0 : nfe = oce_ring_get(wq->ring);
1295 0 : memset(nfe, 0, sizeof(*nfe));
1296 0 : nfe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[i].ds_addr);
1297 0 : nfe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[i].ds_addr);
1298 0 : nfe->u0.s.frag_len = pkt->map->dm_segs[i].ds_len;
1299 0 : wq->ring->nused++;
1300 : }
1301 0 : if (nwqe > (pkt->nsegs + 1)) {
1302 0 : nfe = oce_ring_get(wq->ring);
1303 0 : memset(nfe, 0, sizeof(*nfe));
1304 0 : wq->ring->nused++;
1305 0 : pkt->nsegs++;
1306 0 : }
1307 :
1308 0 : oce_pkt_put(&wq->pkt_list, pkt);
1309 :
1310 0 : oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_POSTREAD |
1311 : BUS_DMASYNC_POSTWRITE);
1312 :
1313 0 : oce_write_db(sc, PD_TXULP_DB, wq->id | (nwqe << 16));
1314 :
1315 0 : return (0);
1316 :
1317 : error:
1318 0 : if (pkt)
1319 0 : oce_pkt_put(&wq->pkt_free, pkt);
1320 0 : m_freem(*mpp);
1321 0 : *mpp = NULL;
1322 0 : return (1);
1323 0 : }
1324 :
1325 : #ifdef OCE_TSO
1326 : struct mbuf *
1327 : oce_tso(struct oce_softc *sc, struct mbuf **mpp)
1328 : {
1329 : struct mbuf *m;
1330 : struct ip *ip;
1331 : #ifdef INET6
1332 : struct ip6_hdr *ip6;
1333 : #endif
1334 : struct ether_vlan_header *eh;
1335 : struct tcphdr *th;
1336 : uint16_t etype;
1337 : int total_len = 0, ehdrlen = 0;
1338 :
1339 : m = *mpp;
1340 :
1341 : if (M_WRITABLE(m) == 0) {
1342 : m = m_dup(*mpp, M_DONTWAIT);
1343 : if (!m)
1344 : return (NULL);
1345 : m_freem(*mpp);
1346 : *mpp = m;
1347 : }
1348 :
1349 : eh = mtod(m, struct ether_vlan_header *);
1350 : if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1351 : etype = ntohs(eh->evl_proto);
1352 : ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1353 : } else {
1354 : etype = ntohs(eh->evl_encap_proto);
1355 : ehdrlen = ETHER_HDR_LEN;
1356 : }
1357 :
1358 : switch (etype) {
1359 : case ETHERTYPE_IP:
1360 : ip = (struct ip *)(m->m_data + ehdrlen);
1361 : if (ip->ip_p != IPPROTO_TCP)
1362 : return (NULL);
1363 : th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1364 :
1365 : total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1366 : break;
1367 : #ifdef INET6
1368 : case ETHERTYPE_IPV6:
1369 : ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1370 : if (ip6->ip6_nxt != IPPROTO_TCP)
1371 : return NULL;
1372 : th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1373 :
1374 : total_len = ehdrlen + sizeof(struct ip6_hdr) +
1375 : (th->th_off << 2);
1376 : break;
1377 : #endif
1378 : default:
1379 : return (NULL);
1380 : }
1381 :
1382 : m = m_pullup(m, total_len);
1383 : if (!m)
1384 : return (NULL);
1385 : *mpp = m;
1386 : return (m);
1387 :
1388 : }
1389 : #endif /* OCE_TSO */
1390 :
1391 : int
1392 0 : oce_intr(void *arg)
1393 : {
1394 0 : struct oce_softc *sc = arg;
1395 0 : struct oce_eq *eq = sc->sc_eq[0];
1396 : struct oce_eqe *eqe;
1397 : struct oce_cq *cq = NULL;
1398 : int i, neqe = 0;
1399 :
1400 0 : oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
1401 :
1402 0 : OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
1403 0 : eqe->evnt = 0;
1404 0 : neqe++;
1405 : }
1406 :
1407 : /* Spurious? */
1408 0 : if (!neqe) {
1409 0 : oce_arm_eq(eq, 0, TRUE, FALSE);
1410 0 : return (0);
1411 : }
1412 :
1413 0 : oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
1414 :
1415 : /* Clear EQ entries, but dont arm */
1416 0 : oce_arm_eq(eq, neqe, FALSE, TRUE);
1417 :
1418 : /* Process TX, RX and MCC completion queues */
1419 0 : for (i = 0; i < eq->cq_valid; i++) {
1420 0 : cq = eq->cq[i];
1421 0 : (*cq->cq_intr)(cq->cb_arg);
1422 0 : oce_arm_cq(cq, 0, TRUE);
1423 : }
1424 :
1425 0 : oce_arm_eq(eq, 0, TRUE, FALSE);
1426 0 : return (1);
1427 0 : }
1428 :
1429 : /* Handle the Completion Queue for transmit */
1430 : void
1431 0 : oce_intr_wq(void *arg)
1432 : {
1433 0 : struct oce_wq *wq = (struct oce_wq *)arg;
1434 0 : struct oce_cq *cq = wq->cq;
1435 : struct oce_nic_tx_cqe *cqe;
1436 0 : struct oce_softc *sc = wq->sc;
1437 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1438 : int ncqe = 0;
1439 :
1440 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1441 0 : OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
1442 0 : oce_txeof(wq);
1443 0 : WQ_CQE_INVALIDATE(cqe);
1444 0 : ncqe++;
1445 : }
1446 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1447 :
1448 0 : if (ifq_is_oactive(&ifp->if_snd)) {
1449 0 : if (wq->ring->nused < (wq->ring->nitems / 2)) {
1450 0 : ifq_clr_oactive(&ifp->if_snd);
1451 0 : oce_start(ifp);
1452 0 : }
1453 : }
1454 0 : if (wq->ring->nused == 0)
1455 0 : ifp->if_timer = 0;
1456 :
1457 0 : if (ncqe)
1458 0 : oce_arm_cq(cq, ncqe, FALSE);
1459 0 : }
1460 :
1461 : void
1462 0 : oce_txeof(struct oce_wq *wq)
1463 : {
1464 0 : struct oce_softc *sc = wq->sc;
1465 : struct oce_pkt *pkt;
1466 : struct mbuf *m;
1467 :
1468 0 : if ((pkt = oce_pkt_get(&wq->pkt_list)) == NULL) {
1469 0 : printf("%s: missing descriptor in txeof\n",
1470 0 : sc->sc_dev.dv_xname);
1471 0 : return;
1472 : }
1473 :
1474 0 : wq->ring->nused -= pkt->nsegs + 1;
1475 0 : bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1476 : BUS_DMASYNC_POSTWRITE);
1477 0 : bus_dmamap_unload(sc->sc_dmat, pkt->map);
1478 :
1479 0 : m = pkt->mbuf;
1480 0 : m_freem(m);
1481 0 : pkt->mbuf = NULL;
1482 0 : oce_pkt_put(&wq->pkt_free, pkt);
1483 0 : }
1484 :
1485 : /* Handle the Completion Queue for receive */
1486 : void
1487 0 : oce_intr_rq(void *arg)
1488 : {
1489 0 : struct oce_rq *rq = (struct oce_rq *)arg;
1490 0 : struct oce_cq *cq = rq->cq;
1491 0 : struct oce_softc *sc = rq->sc;
1492 : struct oce_nic_rx_cqe *cqe;
1493 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1494 : int maxrx, ncqe = 0;
1495 :
1496 0 : maxrx = IS_XE201(sc) ? 8 : OCE_MAX_RQ_COMPL;
1497 :
1498 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1499 :
1500 0 : OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe) && ncqe <= maxrx) {
1501 0 : if (cqe->u0.s.error == 0) {
1502 0 : if (cqe->u0.s.pkt_size == 0)
1503 : /* partial DMA workaround for Lancer */
1504 0 : oce_rxeoc(rq, cqe);
1505 : else
1506 0 : oce_rxeof(rq, cqe);
1507 : } else {
1508 0 : ifp->if_ierrors++;
1509 0 : if (IS_XE201(sc))
1510 : /* Lancer A0 no buffer workaround */
1511 0 : oce_rxeoc(rq, cqe);
1512 : else
1513 : /* Post L3/L4 errors to stack.*/
1514 0 : oce_rxeof(rq, cqe);
1515 : }
1516 : #ifdef OCE_LRO
1517 : if (IF_LRO_ENABLED(ifp) && rq->lro_pkts_queued >= 16)
1518 : oce_flush_lro(rq);
1519 : #endif
1520 0 : RQ_CQE_INVALIDATE(cqe);
1521 0 : ncqe++;
1522 : }
1523 :
1524 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1525 :
1526 : #ifdef OCE_LRO
1527 : if (IF_LRO_ENABLED(ifp))
1528 : oce_flush_lro(rq);
1529 : #endif
1530 :
1531 0 : if (ncqe) {
1532 0 : oce_arm_cq(cq, ncqe, FALSE);
1533 0 : if (!oce_alloc_rx_bufs(rq))
1534 0 : timeout_add(&sc->sc_rxrefill, 1);
1535 : }
1536 0 : }
1537 :
1538 : void
1539 0 : oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1540 : {
1541 0 : struct oce_softc *sc = rq->sc;
1542 : struct oce_pkt *pkt = NULL;
1543 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1544 0 : struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1545 : struct mbuf *m = NULL, *tail = NULL;
1546 : int i, len, frag_len;
1547 : uint16_t vtag;
1548 :
1549 0 : len = cqe->u0.s.pkt_size;
1550 :
1551 : /* Get vlan_tag value */
1552 0 : if (IS_BE(sc))
1553 0 : vtag = ntohs(cqe->u0.s.vlan_tag);
1554 : else
1555 0 : vtag = cqe->u0.s.vlan_tag;
1556 :
1557 0 : for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1558 0 : if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1559 0 : printf("%s: missing descriptor in rxeof\n",
1560 0 : sc->sc_dev.dv_xname);
1561 0 : goto exit;
1562 : }
1563 :
1564 0 : bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1565 : BUS_DMASYNC_POSTREAD);
1566 0 : bus_dmamap_unload(sc->sc_dmat, pkt->map);
1567 0 : if_rxr_put(&rq->rxring, 1);
1568 :
1569 0 : frag_len = (len > rq->fragsize) ? rq->fragsize : len;
1570 0 : pkt->mbuf->m_len = frag_len;
1571 :
1572 0 : if (tail != NULL) {
1573 : /* additional fragments */
1574 0 : pkt->mbuf->m_flags &= ~M_PKTHDR;
1575 0 : tail->m_next = pkt->mbuf;
1576 0 : tail = pkt->mbuf;
1577 0 : } else {
1578 : /* first fragment, fill out most of the header */
1579 0 : pkt->mbuf->m_pkthdr.len = len;
1580 0 : pkt->mbuf->m_pkthdr.csum_flags = 0;
1581 0 : if (cqe->u0.s.ip_cksum_pass) {
1582 0 : if (!cqe->u0.s.ip_ver) { /* IPV4 */
1583 0 : pkt->mbuf->m_pkthdr.csum_flags =
1584 : M_IPV4_CSUM_IN_OK;
1585 0 : }
1586 : }
1587 0 : if (cqe->u0.s.l4_cksum_pass) {
1588 0 : pkt->mbuf->m_pkthdr.csum_flags |=
1589 : M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1590 0 : }
1591 0 : m = tail = pkt->mbuf;
1592 : }
1593 0 : pkt->mbuf = NULL;
1594 0 : oce_pkt_put(&rq->pkt_free, pkt);
1595 0 : len -= frag_len;
1596 : }
1597 :
1598 0 : if (m) {
1599 0 : if (!oce_port_valid(sc, cqe)) {
1600 0 : m_freem(m);
1601 0 : goto exit;
1602 : }
1603 :
1604 : #if NVLAN > 0
1605 : /* This determines if vlan tag is valid */
1606 0 : if (oce_vtp_valid(sc, cqe)) {
1607 0 : if (sc->sc_fmode & FNM_FLEX10_MODE) {
1608 : /* FLEX10. If QnQ is not set, neglect VLAN */
1609 0 : if (cqe->u0.s.qnq) {
1610 0 : m->m_pkthdr.ether_vtag = vtag;
1611 0 : m->m_flags |= M_VLANTAG;
1612 0 : }
1613 0 : } else if (sc->sc_pvid != (vtag & VLAN_VID_MASK)) {
1614 : /*
1615 : * In UMC mode generally pvid will be striped.
1616 : * But in some cases we have seen it comes
1617 : * with pvid. So if pvid == vlan, neglect vlan.
1618 : */
1619 0 : m->m_pkthdr.ether_vtag = vtag;
1620 0 : m->m_flags |= M_VLANTAG;
1621 0 : }
1622 : }
1623 : #endif
1624 :
1625 : #ifdef OCE_LRO
1626 : /* Try to queue to LRO */
1627 : if (IF_LRO_ENABLED(ifp) && !(m->m_flags & M_VLANTAG) &&
1628 : cqe->u0.s.ip_cksum_pass && cqe->u0.s.l4_cksum_pass &&
1629 : !cqe->u0.s.ip_ver && rq->lro.lro_cnt != 0) {
1630 :
1631 : if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1632 : rq->lro_pkts_queued ++;
1633 : goto exit;
1634 : }
1635 : /* If LRO posting fails then try to post to STACK */
1636 : }
1637 : #endif
1638 :
1639 0 : ml_enqueue(&ml, m);
1640 0 : }
1641 : exit:
1642 0 : if_input(ifp, &ml);
1643 0 : }
1644 :
1645 : void
1646 0 : oce_rxeoc(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1647 : {
1648 0 : struct oce_softc *sc = rq->sc;
1649 : struct oce_pkt *pkt;
1650 0 : int i, num_frags = cqe->u0.s.num_fragments;
1651 :
1652 0 : if (IS_XE201(sc) && cqe->u0.s.error) {
1653 : /*
1654 : * Lancer A0 workaround:
1655 : * num_frags will be 1 more than actual in case of error
1656 : */
1657 0 : if (num_frags)
1658 0 : num_frags--;
1659 : }
1660 0 : for (i = 0; i < num_frags; i++) {
1661 0 : if ((pkt = oce_pkt_get(&rq->pkt_list)) == NULL) {
1662 0 : printf("%s: missing descriptor in rxeoc\n",
1663 0 : sc->sc_dev.dv_xname);
1664 0 : return;
1665 : }
1666 0 : bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1667 : BUS_DMASYNC_POSTREAD);
1668 0 : bus_dmamap_unload(sc->sc_dmat, pkt->map);
1669 0 : if_rxr_put(&rq->rxring, 1);
1670 0 : m_freem(pkt->mbuf);
1671 0 : oce_pkt_put(&rq->pkt_free, pkt);
1672 : }
1673 0 : }
1674 :
1675 : int
1676 0 : oce_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1677 : {
1678 : struct oce_nic_rx_cqe_v1 *cqe_v1;
1679 :
1680 0 : if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1681 0 : cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1682 0 : return (cqe_v1->u0.s.vlan_tag_present);
1683 : }
1684 0 : return (cqe->u0.s.vlan_tag_present);
1685 0 : }
1686 :
1687 : int
1688 0 : oce_port_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
1689 : {
1690 : struct oce_nic_rx_cqe_v1 *cqe_v1;
1691 :
1692 0 : if (IS_BE(sc) && ISSET(sc->sc_flags, OCE_F_BE3_NATIVE)) {
1693 0 : cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1694 0 : if (sc->sc_port != cqe_v1->u0.s.port)
1695 0 : return (0);
1696 : }
1697 0 : return (1);
1698 0 : }
1699 :
1700 : #ifdef OCE_LRO
1701 : void
1702 : oce_flush_lro(struct oce_rq *rq)
1703 : {
1704 : struct oce_softc *sc = rq->sc;
1705 : struct ifnet *ifp = &sc->sc_ac.ac_if;
1706 : struct lro_ctrl *lro = &rq->lro;
1707 : struct lro_entry *queued;
1708 :
1709 : if (!IF_LRO_ENABLED(ifp))
1710 : return;
1711 :
1712 : while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1713 : SLIST_REMOVE_HEAD(&lro->lro_active, next);
1714 : tcp_lro_flush(lro, queued);
1715 : }
1716 : rq->lro_pkts_queued = 0;
1717 : }
1718 :
1719 : int
1720 : oce_init_lro(struct oce_softc *sc)
1721 : {
1722 : struct lro_ctrl *lro = NULL;
1723 : int i = 0, rc = 0;
1724 :
1725 : for (i = 0; i < sc->sc_nrq; i++) {
1726 : lro = &sc->sc_rq[i]->lro;
1727 : rc = tcp_lro_init(lro);
1728 : if (rc != 0) {
1729 : printf("%s: LRO init failed\n",
1730 : sc->sc_dev.dv_xname);
1731 : return rc;
1732 : }
1733 : lro->ifp = &sc->sc_ac.ac_if;
1734 : }
1735 :
1736 : return (rc);
1737 : }
1738 :
1739 : void
1740 : oce_free_lro(struct oce_softc *sc)
1741 : {
1742 : struct lro_ctrl *lro = NULL;
1743 : int i = 0;
1744 :
1745 : for (i = 0; i < sc->sc_nrq; i++) {
1746 : lro = &sc->sc_rq[i]->lro;
1747 : if (lro)
1748 : tcp_lro_free(lro);
1749 : }
1750 : }
1751 : #endif /* OCE_LRO */
1752 :
1753 : int
1754 0 : oce_get_buf(struct oce_rq *rq)
1755 : {
1756 0 : struct oce_softc *sc = rq->sc;
1757 : struct oce_pkt *pkt;
1758 : struct oce_nic_rqe *rqe;
1759 :
1760 0 : if ((pkt = oce_pkt_get(&rq->pkt_free)) == NULL)
1761 0 : return (0);
1762 :
1763 0 : pkt->mbuf = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1764 0 : if (pkt->mbuf == NULL) {
1765 0 : oce_pkt_put(&rq->pkt_free, pkt);
1766 0 : return (0);
1767 : }
1768 :
1769 0 : pkt->mbuf->m_len = pkt->mbuf->m_pkthdr.len = MCLBYTES;
1770 : #ifdef __STRICT_ALIGNMENT
1771 : m_adj(pkt->mbuf, ETHER_ALIGN);
1772 : #endif
1773 :
1774 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, pkt->map, pkt->mbuf,
1775 : BUS_DMA_NOWAIT)) {
1776 0 : m_freem(pkt->mbuf);
1777 0 : pkt->mbuf = NULL;
1778 0 : oce_pkt_put(&rq->pkt_free, pkt);
1779 0 : return (0);
1780 : }
1781 :
1782 0 : bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
1783 : BUS_DMASYNC_PREREAD);
1784 :
1785 0 : oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_PREREAD |
1786 : BUS_DMASYNC_PREWRITE);
1787 :
1788 0 : rqe = oce_ring_get(rq->ring);
1789 0 : rqe->u0.s.frag_pa_hi = ADDR_HI(pkt->map->dm_segs[0].ds_addr);
1790 0 : rqe->u0.s.frag_pa_lo = ADDR_LO(pkt->map->dm_segs[0].ds_addr);
1791 :
1792 0 : oce_dma_sync(&rq->ring->dma, BUS_DMASYNC_POSTREAD |
1793 : BUS_DMASYNC_POSTWRITE);
1794 :
1795 0 : oce_pkt_put(&rq->pkt_list, pkt);
1796 :
1797 0 : return (1);
1798 0 : }
1799 :
1800 : int
1801 0 : oce_alloc_rx_bufs(struct oce_rq *rq)
1802 : {
1803 0 : struct oce_softc *sc = rq->sc;
1804 : int i, nbufs = 0;
1805 : u_int slots;
1806 :
1807 0 : for (slots = if_rxr_get(&rq->rxring, rq->nitems); slots > 0; slots--) {
1808 0 : if (oce_get_buf(rq) == 0)
1809 : break;
1810 :
1811 0 : nbufs++;
1812 : }
1813 0 : if_rxr_put(&rq->rxring, slots);
1814 :
1815 0 : if (!nbufs)
1816 0 : return (0);
1817 0 : for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
1818 0 : oce_write_db(sc, PD_RXULP_DB, rq->id |
1819 : (OCE_MAX_RQ_POSTS << 24));
1820 0 : nbufs -= OCE_MAX_RQ_POSTS;
1821 : }
1822 0 : if (nbufs > 0)
1823 0 : oce_write_db(sc, PD_RXULP_DB, rq->id | (nbufs << 24));
1824 0 : return (1);
1825 0 : }
1826 :
1827 : void
1828 0 : oce_refill_rx(void *arg)
1829 : {
1830 0 : struct oce_softc *sc = arg;
1831 : struct oce_rq *rq;
1832 : int i, s;
1833 :
1834 0 : s = splnet();
1835 0 : OCE_RQ_FOREACH(sc, rq, i) {
1836 0 : if (!oce_alloc_rx_bufs(rq))
1837 0 : timeout_add(&sc->sc_rxrefill, 5);
1838 : }
1839 0 : splx(s);
1840 0 : }
1841 :
1842 : /* Handle the Completion Queue for the Mailbox/Async notifications */
1843 : void
1844 0 : oce_intr_mq(void *arg)
1845 : {
1846 0 : struct oce_mq *mq = (struct oce_mq *)arg;
1847 0 : struct oce_softc *sc = mq->sc;
1848 0 : struct oce_cq *cq = mq->cq;
1849 : struct oce_mq_cqe *cqe;
1850 : struct oce_async_cqe_link_state *acqe;
1851 : struct oce_async_event_grp5_pvid_state *gcqe;
1852 : int evtype, optype, ncqe = 0;
1853 :
1854 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
1855 :
1856 0 : OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
1857 0 : if (cqe->u0.s.async_event) {
1858 0 : evtype = cqe->u0.s.event_type;
1859 0 : optype = cqe->u0.s.async_type;
1860 0 : if (evtype == ASYNC_EVENT_CODE_LINK_STATE) {
1861 : /* Link status evt */
1862 0 : acqe = (struct oce_async_cqe_link_state *)cqe;
1863 0 : oce_link_event(sc, acqe);
1864 0 : } else if ((evtype == ASYNC_EVENT_GRP5) &&
1865 0 : (optype == ASYNC_EVENT_PVID_STATE)) {
1866 : /* GRP5 PVID */
1867 : gcqe =
1868 0 : (struct oce_async_event_grp5_pvid_state *)cqe;
1869 0 : if (gcqe->enabled)
1870 0 : sc->sc_pvid =
1871 0 : gcqe->tag & VLAN_VID_MASK;
1872 : else
1873 0 : sc->sc_pvid = 0;
1874 : }
1875 : }
1876 0 : MQ_CQE_INVALIDATE(cqe);
1877 0 : ncqe++;
1878 : }
1879 :
1880 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
1881 :
1882 0 : if (ncqe)
1883 0 : oce_arm_cq(cq, ncqe, FALSE);
1884 0 : }
1885 :
1886 : void
1887 0 : oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
1888 : {
1889 : /* Update Link status */
1890 0 : sc->sc_link_up = ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1891 : ASYNC_EVENT_LINK_UP);
1892 : /* Update speed */
1893 0 : sc->sc_link_speed = acqe->u0.s.speed;
1894 0 : oce_link_status(sc);
1895 0 : }
1896 :
1897 : int
1898 0 : oce_init_queues(struct oce_softc *sc)
1899 : {
1900 : struct oce_wq *wq;
1901 : struct oce_rq *rq;
1902 : int i;
1903 :
1904 0 : sc->sc_nrq = 1;
1905 0 : sc->sc_nwq = 1;
1906 :
1907 : /* Create network interface on card */
1908 0 : if (oce_create_iface(sc, sc->sc_macaddr))
1909 : goto error;
1910 :
1911 : /* create all of the event queues */
1912 0 : for (i = 0; i < sc->sc_nintr; i++) {
1913 0 : sc->sc_eq[i] = oce_create_eq(sc);
1914 0 : if (!sc->sc_eq[i])
1915 : goto error;
1916 : }
1917 :
1918 : /* alloc tx queues */
1919 0 : OCE_WQ_FOREACH(sc, wq, i) {
1920 0 : sc->sc_wq[i] = oce_create_wq(sc, sc->sc_eq[i]);
1921 0 : if (!sc->sc_wq[i])
1922 : goto error;
1923 : }
1924 :
1925 : /* alloc rx queues */
1926 0 : OCE_RQ_FOREACH(sc, rq, i) {
1927 0 : sc->sc_rq[i] = oce_create_rq(sc, sc->sc_eq[i > 0 ? i - 1 : 0],
1928 0 : i > 0 ? sc->sc_rss_enable : 0);
1929 0 : if (!sc->sc_rq[i])
1930 : goto error;
1931 : }
1932 :
1933 : /* alloc mailbox queue */
1934 0 : sc->sc_mq = oce_create_mq(sc, sc->sc_eq[0]);
1935 0 : if (!sc->sc_mq)
1936 : goto error;
1937 :
1938 0 : return (0);
1939 : error:
1940 0 : oce_release_queues(sc);
1941 0 : return (1);
1942 0 : }
1943 :
1944 : void
1945 0 : oce_release_queues(struct oce_softc *sc)
1946 : {
1947 : struct oce_wq *wq;
1948 : struct oce_rq *rq;
1949 : struct oce_eq *eq;
1950 : int i;
1951 :
1952 0 : OCE_RQ_FOREACH(sc, rq, i) {
1953 0 : if (rq)
1954 0 : oce_destroy_rq(sc->sc_rq[i]);
1955 : }
1956 :
1957 0 : OCE_WQ_FOREACH(sc, wq, i) {
1958 0 : if (wq)
1959 0 : oce_destroy_wq(sc->sc_wq[i]);
1960 : }
1961 :
1962 0 : if (sc->sc_mq)
1963 0 : oce_destroy_mq(sc->sc_mq);
1964 :
1965 0 : OCE_EQ_FOREACH(sc, eq, i) {
1966 0 : if (eq)
1967 0 : oce_destroy_eq(sc->sc_eq[i]);
1968 : }
1969 0 : }
1970 :
1971 : /**
1972 : * @brief Function to create a WQ for NIC Tx
1973 : * @param sc software handle to the device
1974 : * @returns the pointer to the WQ created or NULL on failure
1975 : */
1976 : struct oce_wq *
1977 0 : oce_create_wq(struct oce_softc *sc, struct oce_eq *eq)
1978 : {
1979 : struct oce_wq *wq;
1980 : struct oce_cq *cq;
1981 : struct oce_pkt *pkt;
1982 : int i;
1983 :
1984 0 : if (sc->sc_tx_ring_size < 256 || sc->sc_tx_ring_size > 2048)
1985 0 : return (NULL);
1986 :
1987 0 : wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
1988 0 : if (!wq)
1989 0 : return (NULL);
1990 :
1991 0 : wq->ring = oce_create_ring(sc, sc->sc_tx_ring_size, NIC_WQE_SIZE, 8);
1992 0 : if (!wq->ring) {
1993 0 : free(wq, M_DEVBUF, 0);
1994 0 : return (NULL);
1995 : }
1996 :
1997 0 : cq = oce_create_cq(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
1998 : 1, 0, 3);
1999 0 : if (!cq) {
2000 0 : oce_destroy_ring(sc, wq->ring);
2001 0 : free(wq, M_DEVBUF, 0);
2002 0 : return (NULL);
2003 : }
2004 :
2005 0 : wq->id = -1;
2006 0 : wq->sc = sc;
2007 :
2008 0 : wq->cq = cq;
2009 0 : wq->nitems = sc->sc_tx_ring_size;
2010 :
2011 0 : SIMPLEQ_INIT(&wq->pkt_free);
2012 0 : SIMPLEQ_INIT(&wq->pkt_list);
2013 :
2014 0 : for (i = 0; i < sc->sc_tx_ring_size / 2; i++) {
2015 0 : pkt = oce_pkt_alloc(sc, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS,
2016 : PAGE_SIZE);
2017 0 : if (pkt == NULL) {
2018 0 : oce_destroy_wq(wq);
2019 0 : return (NULL);
2020 : }
2021 0 : oce_pkt_put(&wq->pkt_free, pkt);
2022 : }
2023 :
2024 0 : if (oce_new_wq(sc, wq)) {
2025 0 : oce_destroy_wq(wq);
2026 0 : return (NULL);
2027 : }
2028 :
2029 0 : eq->cq[eq->cq_valid] = cq;
2030 0 : eq->cq_valid++;
2031 0 : cq->cb_arg = wq;
2032 0 : cq->cq_intr = oce_intr_wq;
2033 :
2034 0 : return (wq);
2035 0 : }
2036 :
2037 : void
2038 0 : oce_drain_wq(struct oce_wq *wq)
2039 : {
2040 0 : struct oce_cq *cq = wq->cq;
2041 : struct oce_nic_tx_cqe *cqe;
2042 : int ncqe = 0;
2043 :
2044 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2045 0 : OCE_RING_FOREACH(cq->ring, cqe, WQ_CQE_VALID(cqe)) {
2046 0 : WQ_CQE_INVALIDATE(cqe);
2047 0 : ncqe++;
2048 : }
2049 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2050 0 : oce_arm_cq(cq, ncqe, FALSE);
2051 0 : }
2052 :
2053 : void
2054 0 : oce_destroy_wq(struct oce_wq *wq)
2055 : {
2056 0 : struct mbx_delete_nic_wq cmd;
2057 0 : struct oce_softc *sc = wq->sc;
2058 : struct oce_pkt *pkt;
2059 :
2060 0 : if (wq->id >= 0) {
2061 0 : memset(&cmd, 0, sizeof(cmd));
2062 0 : cmd.params.req.wq_id = htole16(wq->id);
2063 0 : oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_WQ, OCE_MBX_VER_V0,
2064 : &cmd, sizeof(cmd));
2065 0 : }
2066 0 : if (wq->cq != NULL)
2067 0 : oce_destroy_cq(wq->cq);
2068 0 : if (wq->ring != NULL)
2069 0 : oce_destroy_ring(sc, wq->ring);
2070 0 : while ((pkt = oce_pkt_get(&wq->pkt_free)) != NULL)
2071 0 : oce_pkt_free(sc, pkt);
2072 0 : free(wq, M_DEVBUF, 0);
2073 0 : }
2074 :
2075 : /**
2076 : * @brief function to allocate receive queue resources
2077 : * @param sc software handle to the device
2078 : * @param eq pointer to associated event queue
2079 : * @param rss is-rss-queue flag
2080 : * @returns the pointer to the RQ created or NULL on failure
2081 : */
2082 : struct oce_rq *
2083 0 : oce_create_rq(struct oce_softc *sc, struct oce_eq *eq, int rss)
2084 : {
2085 : struct oce_rq *rq;
2086 : struct oce_cq *cq;
2087 : struct oce_pkt *pkt;
2088 : int i;
2089 :
2090 : /* Hardware doesn't support any other value */
2091 0 : if (sc->sc_rx_ring_size != 1024)
2092 0 : return (NULL);
2093 :
2094 0 : rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
2095 0 : if (!rq)
2096 0 : return (NULL);
2097 :
2098 0 : rq->ring = oce_create_ring(sc, sc->sc_rx_ring_size,
2099 : sizeof(struct oce_nic_rqe), 2);
2100 0 : if (!rq->ring) {
2101 0 : free(rq, M_DEVBUF, 0);
2102 0 : return (NULL);
2103 : }
2104 :
2105 0 : cq = oce_create_cq(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
2106 : 1, 0, 3);
2107 0 : if (!cq) {
2108 0 : oce_destroy_ring(sc, rq->ring);
2109 0 : free(rq, M_DEVBUF, 0);
2110 0 : return (NULL);
2111 : }
2112 :
2113 0 : rq->id = -1;
2114 0 : rq->sc = sc;
2115 :
2116 0 : rq->nitems = sc->sc_rx_ring_size;
2117 0 : rq->fragsize = OCE_RX_BUF_SIZE;
2118 0 : rq->rss = rss;
2119 :
2120 0 : SIMPLEQ_INIT(&rq->pkt_free);
2121 0 : SIMPLEQ_INIT(&rq->pkt_list);
2122 :
2123 0 : for (i = 0; i < sc->sc_rx_ring_size; i++) {
2124 0 : pkt = oce_pkt_alloc(sc, OCE_RX_BUF_SIZE, 1, OCE_RX_BUF_SIZE);
2125 0 : if (pkt == NULL) {
2126 0 : oce_destroy_rq(rq);
2127 0 : return (NULL);
2128 : }
2129 0 : oce_pkt_put(&rq->pkt_free, pkt);
2130 : }
2131 :
2132 0 : rq->cq = cq;
2133 0 : eq->cq[eq->cq_valid] = cq;
2134 0 : eq->cq_valid++;
2135 0 : cq->cb_arg = rq;
2136 0 : cq->cq_intr = oce_intr_rq;
2137 :
2138 : /* RX queue is created in oce_init */
2139 :
2140 0 : return (rq);
2141 0 : }
2142 :
2143 : void
2144 0 : oce_drain_rq(struct oce_rq *rq)
2145 : {
2146 : struct oce_nic_rx_cqe *cqe;
2147 0 : struct oce_cq *cq = rq->cq;
2148 : int ncqe = 0;
2149 :
2150 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2151 0 : OCE_RING_FOREACH(cq->ring, cqe, RQ_CQE_VALID(cqe)) {
2152 0 : RQ_CQE_INVALIDATE(cqe);
2153 0 : ncqe++;
2154 : }
2155 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2156 0 : oce_arm_cq(cq, ncqe, FALSE);
2157 0 : }
2158 :
2159 : void
2160 0 : oce_destroy_rq(struct oce_rq *rq)
2161 : {
2162 0 : struct mbx_delete_nic_rq cmd;
2163 0 : struct oce_softc *sc = rq->sc;
2164 : struct oce_pkt *pkt;
2165 :
2166 0 : if (rq->id >= 0) {
2167 0 : memset(&cmd, 0, sizeof(cmd));
2168 0 : cmd.params.req.rq_id = htole16(rq->id);
2169 0 : oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_DELETE_RQ, OCE_MBX_VER_V0,
2170 : &cmd, sizeof(cmd));
2171 0 : }
2172 0 : if (rq->cq != NULL)
2173 0 : oce_destroy_cq(rq->cq);
2174 0 : if (rq->ring != NULL)
2175 0 : oce_destroy_ring(sc, rq->ring);
2176 0 : while ((pkt = oce_pkt_get(&rq->pkt_free)) != NULL)
2177 0 : oce_pkt_free(sc, pkt);
2178 0 : free(rq, M_DEVBUF, 0);
2179 0 : }
2180 :
2181 : struct oce_eq *
2182 0 : oce_create_eq(struct oce_softc *sc)
2183 : {
2184 : struct oce_eq *eq;
2185 :
2186 : /* allocate an eq */
2187 0 : eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
2188 0 : if (eq == NULL)
2189 0 : return (NULL);
2190 :
2191 0 : eq->ring = oce_create_ring(sc, EQ_LEN_1024, EQE_SIZE_4, 8);
2192 0 : if (!eq->ring) {
2193 0 : free(eq, M_DEVBUF, 0);
2194 0 : return (NULL);
2195 : }
2196 :
2197 0 : eq->id = -1;
2198 0 : eq->sc = sc;
2199 0 : eq->nitems = EQ_LEN_1024; /* length of event queue */
2200 0 : eq->isize = EQE_SIZE_4; /* size of a queue item */
2201 0 : eq->delay = OCE_DEFAULT_EQD; /* event queue delay */
2202 :
2203 0 : if (oce_new_eq(sc, eq)) {
2204 0 : oce_destroy_ring(sc, eq->ring);
2205 0 : free(eq, M_DEVBUF, 0);
2206 0 : return (NULL);
2207 : }
2208 :
2209 0 : return (eq);
2210 0 : }
2211 :
2212 : /**
2213 : * @brief Function to arm an EQ so that it can generate events
2214 : * @param eq pointer to event queue structure
2215 : * @param neqe number of EQEs to arm
2216 : * @param rearm rearm bit enable/disable
2217 : * @param clearint bit to clear the interrupt condition because of which
2218 : * EQEs are generated
2219 : */
2220 : static inline void
2221 0 : oce_arm_eq(struct oce_eq *eq, int neqe, int rearm, int clearint)
2222 : {
2223 0 : oce_write_db(eq->sc, PD_EQ_DB, eq->id | PD_EQ_DB_EVENT |
2224 0 : (clearint << 9) | (neqe << 16) | (rearm << 29));
2225 0 : }
2226 :
2227 : void
2228 0 : oce_drain_eq(struct oce_eq *eq)
2229 : {
2230 : struct oce_eqe *eqe;
2231 : int neqe = 0;
2232 :
2233 0 : oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTREAD);
2234 0 : OCE_RING_FOREACH(eq->ring, eqe, eqe->evnt != 0) {
2235 0 : eqe->evnt = 0;
2236 0 : neqe++;
2237 : }
2238 0 : oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_PREWRITE);
2239 0 : oce_arm_eq(eq, neqe, FALSE, TRUE);
2240 0 : }
2241 :
2242 : void
2243 0 : oce_destroy_eq(struct oce_eq *eq)
2244 : {
2245 0 : struct mbx_destroy_common_eq cmd;
2246 0 : struct oce_softc *sc = eq->sc;
2247 :
2248 0 : if (eq->id >= 0) {
2249 0 : memset(&cmd, 0, sizeof(cmd));
2250 0 : cmd.params.req.id = htole16(eq->id);
2251 0 : oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_EQ,
2252 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2253 0 : }
2254 0 : if (eq->ring != NULL)
2255 0 : oce_destroy_ring(sc, eq->ring);
2256 0 : free(eq, M_DEVBUF, 0);
2257 0 : }
2258 :
2259 : struct oce_mq *
2260 0 : oce_create_mq(struct oce_softc *sc, struct oce_eq *eq)
2261 : {
2262 : struct oce_mq *mq = NULL;
2263 : struct oce_cq *cq;
2264 :
2265 : /* allocate the mq */
2266 0 : mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
2267 0 : if (!mq)
2268 0 : return (NULL);
2269 :
2270 0 : mq->ring = oce_create_ring(sc, 128, sizeof(struct oce_mbx), 8);
2271 0 : if (!mq->ring) {
2272 0 : free(mq, M_DEVBUF, 0);
2273 0 : return (NULL);
2274 : }
2275 :
2276 0 : cq = oce_create_cq(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
2277 : 1, 0, 0);
2278 0 : if (!cq) {
2279 0 : oce_destroy_ring(sc, mq->ring);
2280 0 : free(mq, M_DEVBUF, 0);
2281 0 : return (NULL);
2282 : }
2283 :
2284 0 : mq->id = -1;
2285 0 : mq->sc = sc;
2286 0 : mq->cq = cq;
2287 :
2288 0 : mq->nitems = 128;
2289 :
2290 0 : if (oce_new_mq(sc, mq)) {
2291 0 : oce_destroy_cq(mq->cq);
2292 0 : oce_destroy_ring(sc, mq->ring);
2293 0 : free(mq, M_DEVBUF, 0);
2294 0 : return (NULL);
2295 : }
2296 :
2297 0 : eq->cq[eq->cq_valid] = cq;
2298 0 : eq->cq_valid++;
2299 0 : mq->cq->eq = eq;
2300 0 : mq->cq->cb_arg = mq;
2301 0 : mq->cq->cq_intr = oce_intr_mq;
2302 :
2303 0 : return (mq);
2304 0 : }
2305 :
2306 : void
2307 0 : oce_drain_mq(struct oce_mq *mq)
2308 : {
2309 0 : struct oce_cq *cq = mq->cq;
2310 : struct oce_mq_cqe *cqe;
2311 : int ncqe = 0;
2312 :
2313 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTREAD);
2314 0 : OCE_RING_FOREACH(cq->ring, cqe, MQ_CQE_VALID(cqe)) {
2315 0 : MQ_CQE_INVALIDATE(cqe);
2316 0 : ncqe++;
2317 : }
2318 0 : oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_PREWRITE);
2319 0 : oce_arm_cq(cq, ncqe, FALSE);
2320 0 : }
2321 :
2322 : void
2323 0 : oce_destroy_mq(struct oce_mq *mq)
2324 : {
2325 0 : struct mbx_destroy_common_mq cmd;
2326 0 : struct oce_softc *sc = mq->sc;
2327 :
2328 0 : if (mq->id >= 0) {
2329 0 : memset(&cmd, 0, sizeof(cmd));
2330 0 : cmd.params.req.id = htole16(mq->id);
2331 0 : oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_MQ,
2332 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2333 0 : }
2334 0 : if (mq->ring != NULL)
2335 0 : oce_destroy_ring(sc, mq->ring);
2336 0 : if (mq->cq != NULL)
2337 0 : oce_destroy_cq(mq->cq);
2338 0 : free(mq, M_DEVBUF, 0);
2339 0 : }
2340 :
2341 : /**
2342 : * @brief Function to create a completion queue
2343 : * @param sc software handle to the device
2344 : * @param eq optional eq to be associated with to the cq
2345 : * @param nitems length of completion queue
2346 : * @param isize size of completion queue items
2347 : * @param eventable event table
2348 : * @param nodelay no delay flag
2349 : * @param ncoalesce no coalescence flag
2350 : * @returns pointer to the cq created, NULL on failure
2351 : */
2352 : struct oce_cq *
2353 0 : oce_create_cq(struct oce_softc *sc, struct oce_eq *eq, int nitems, int isize,
2354 : int eventable, int nodelay, int ncoalesce)
2355 : {
2356 : struct oce_cq *cq = NULL;
2357 :
2358 0 : cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
2359 0 : if (!cq)
2360 0 : return (NULL);
2361 :
2362 0 : cq->ring = oce_create_ring(sc, nitems, isize, 4);
2363 0 : if (!cq->ring) {
2364 0 : free(cq, M_DEVBUF, 0);
2365 0 : return (NULL);
2366 : }
2367 :
2368 0 : cq->sc = sc;
2369 0 : cq->eq = eq;
2370 0 : cq->nitems = nitems;
2371 0 : cq->nodelay = nodelay;
2372 0 : cq->ncoalesce = ncoalesce;
2373 0 : cq->eventable = eventable;
2374 :
2375 0 : if (oce_new_cq(sc, cq)) {
2376 0 : oce_destroy_ring(sc, cq->ring);
2377 0 : free(cq, M_DEVBUF, 0);
2378 0 : return (NULL);
2379 : }
2380 :
2381 0 : sc->sc_cq[sc->sc_ncq++] = cq;
2382 :
2383 0 : return (cq);
2384 0 : }
2385 :
2386 : void
2387 0 : oce_destroy_cq(struct oce_cq *cq)
2388 : {
2389 0 : struct mbx_destroy_common_cq cmd;
2390 0 : struct oce_softc *sc = cq->sc;
2391 :
2392 0 : if (cq->id >= 0) {
2393 0 : memset(&cmd, 0, sizeof(cmd));
2394 0 : cmd.params.req.id = htole16(cq->id);
2395 0 : oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DESTROY_CQ,
2396 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2397 0 : }
2398 0 : if (cq->ring != NULL)
2399 0 : oce_destroy_ring(sc, cq->ring);
2400 0 : free(cq, M_DEVBUF, 0);
2401 0 : }
2402 :
2403 : /**
2404 : * @brief Function to arm a CQ with CQEs
2405 : * @param cq pointer to the completion queue structure
2406 : * @param ncqe number of CQEs to arm
2407 : * @param rearm rearm bit enable/disable
2408 : */
2409 : static inline void
2410 0 : oce_arm_cq(struct oce_cq *cq, int ncqe, int rearm)
2411 : {
2412 0 : oce_write_db(cq->sc, PD_CQ_DB, cq->id | (ncqe << 16) | (rearm << 29));
2413 0 : }
2414 :
2415 : void
2416 0 : oce_free_posted_rxbuf(struct oce_rq *rq)
2417 : {
2418 0 : struct oce_softc *sc = rq->sc;
2419 : struct oce_pkt *pkt;
2420 :
2421 0 : while ((pkt = oce_pkt_get(&rq->pkt_list)) != NULL) {
2422 0 : bus_dmamap_sync(sc->sc_dmat, pkt->map, 0, pkt->map->dm_mapsize,
2423 : BUS_DMASYNC_POSTREAD);
2424 0 : bus_dmamap_unload(sc->sc_dmat, pkt->map);
2425 0 : if (pkt->mbuf != NULL) {
2426 0 : m_freem(pkt->mbuf);
2427 0 : pkt->mbuf = NULL;
2428 0 : }
2429 0 : oce_pkt_put(&rq->pkt_free, pkt);
2430 0 : if_rxr_put(&rq->rxring, 1);
2431 : }
2432 0 : }
2433 :
2434 : int
2435 0 : oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma)
2436 : {
2437 : int rc;
2438 :
2439 0 : memset(dma, 0, sizeof(struct oce_dma_mem));
2440 :
2441 0 : dma->tag = sc->sc_dmat;
2442 0 : rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
2443 : &dma->map);
2444 0 : if (rc != 0) {
2445 0 : printf("%s: failed to allocate DMA handle",
2446 0 : sc->sc_dev.dv_xname);
2447 0 : goto fail_0;
2448 : }
2449 :
2450 0 : rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
2451 : &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2452 0 : if (rc != 0) {
2453 0 : printf("%s: failed to allocate DMA memory",
2454 0 : sc->sc_dev.dv_xname);
2455 0 : goto fail_1;
2456 : }
2457 :
2458 0 : rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2459 : &dma->vaddr, BUS_DMA_NOWAIT);
2460 0 : if (rc != 0) {
2461 0 : printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2462 0 : goto fail_2;
2463 : }
2464 :
2465 0 : rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
2466 : BUS_DMA_NOWAIT);
2467 0 : if (rc != 0) {
2468 0 : printf("%s: failed to load DMA memory", sc->sc_dev.dv_xname);
2469 : goto fail_3;
2470 : }
2471 :
2472 0 : bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2473 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2474 :
2475 0 : dma->paddr = dma->map->dm_segs[0].ds_addr;
2476 0 : dma->size = size;
2477 :
2478 0 : return (0);
2479 :
2480 : fail_3:
2481 0 : bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2482 : fail_2:
2483 0 : bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2484 : fail_1:
2485 0 : bus_dmamap_destroy(dma->tag, dma->map);
2486 : fail_0:
2487 0 : return (rc);
2488 0 : }
2489 :
2490 : void
2491 0 : oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
2492 : {
2493 0 : if (dma->tag == NULL)
2494 : return;
2495 :
2496 0 : if (dma->map != NULL) {
2497 0 : oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2498 0 : bus_dmamap_unload(dma->tag, dma->map);
2499 :
2500 0 : if (dma->vaddr != 0) {
2501 0 : bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2502 0 : dma->vaddr = 0;
2503 0 : }
2504 :
2505 0 : bus_dmamap_destroy(dma->tag, dma->map);
2506 0 : dma->map = NULL;
2507 0 : dma->tag = NULL;
2508 0 : }
2509 0 : }
2510 :
2511 : struct oce_ring *
2512 0 : oce_create_ring(struct oce_softc *sc, int nitems, int isize, int maxsegs)
2513 : {
2514 : struct oce_dma_mem *dma;
2515 : struct oce_ring *ring;
2516 0 : bus_size_t size = nitems * isize;
2517 : int rc;
2518 :
2519 0 : if (size > maxsegs * PAGE_SIZE)
2520 0 : return (NULL);
2521 :
2522 0 : ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
2523 0 : if (ring == NULL)
2524 0 : return (NULL);
2525 :
2526 0 : ring->isize = isize;
2527 0 : ring->nitems = nitems;
2528 :
2529 0 : dma = &ring->dma;
2530 0 : dma->tag = sc->sc_dmat;
2531 0 : rc = bus_dmamap_create(dma->tag, size, maxsegs, PAGE_SIZE, 0,
2532 : BUS_DMA_NOWAIT, &dma->map);
2533 0 : if (rc != 0) {
2534 0 : printf("%s: failed to allocate DMA handle",
2535 0 : sc->sc_dev.dv_xname);
2536 0 : goto fail_0;
2537 : }
2538 :
2539 0 : rc = bus_dmamem_alloc(dma->tag, size, 0, 0, &dma->segs, maxsegs,
2540 : &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
2541 0 : if (rc != 0) {
2542 0 : printf("%s: failed to allocate DMA memory",
2543 0 : sc->sc_dev.dv_xname);
2544 0 : goto fail_1;
2545 : }
2546 :
2547 0 : rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
2548 : &dma->vaddr, BUS_DMA_NOWAIT);
2549 0 : if (rc != 0) {
2550 0 : printf("%s: failed to map DMA memory", sc->sc_dev.dv_xname);
2551 : goto fail_2;
2552 : }
2553 :
2554 0 : bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2555 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2556 :
2557 0 : dma->paddr = 0;
2558 0 : dma->size = size;
2559 :
2560 0 : return (ring);
2561 :
2562 : fail_2:
2563 0 : bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
2564 : fail_1:
2565 0 : bus_dmamap_destroy(dma->tag, dma->map);
2566 : fail_0:
2567 0 : free(ring, M_DEVBUF, 0);
2568 0 : return (NULL);
2569 0 : }
2570 :
2571 : void
2572 0 : oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
2573 : {
2574 0 : oce_dma_free(sc, &ring->dma);
2575 0 : free(ring, M_DEVBUF, 0);
2576 0 : }
2577 :
2578 : int
2579 0 : oce_load_ring(struct oce_softc *sc, struct oce_ring *ring,
2580 : struct oce_pa *pa, int maxsegs)
2581 : {
2582 0 : struct oce_dma_mem *dma = &ring->dma;
2583 : int i;
2584 :
2585 0 : if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
2586 : ring->isize * ring->nitems, NULL, BUS_DMA_NOWAIT)) {
2587 0 : printf("%s: failed to load a ring map\n", sc->sc_dev.dv_xname);
2588 0 : return (0);
2589 : }
2590 :
2591 0 : if (dma->map->dm_nsegs > maxsegs) {
2592 0 : printf("%s: too many segments\n", sc->sc_dev.dv_xname);
2593 0 : return (0);
2594 : }
2595 :
2596 0 : bus_dmamap_sync(dma->tag, dma->map, 0, dma->map->dm_mapsize,
2597 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2598 :
2599 0 : for (i = 0; i < dma->map->dm_nsegs; i++)
2600 0 : pa[i].addr = dma->map->dm_segs[i].ds_addr;
2601 :
2602 0 : return (dma->map->dm_nsegs);
2603 0 : }
2604 :
2605 : static inline void *
2606 0 : oce_ring_get(struct oce_ring *ring)
2607 : {
2608 0 : int index = ring->index;
2609 :
2610 0 : if (++ring->index == ring->nitems)
2611 0 : ring->index = 0;
2612 0 : return ((void *)(ring->dma.vaddr + index * ring->isize));
2613 : }
2614 :
2615 : static inline void *
2616 0 : oce_ring_first(struct oce_ring *ring)
2617 : {
2618 0 : return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2619 : }
2620 :
2621 : static inline void *
2622 0 : oce_ring_next(struct oce_ring *ring)
2623 : {
2624 0 : if (++ring->index == ring->nitems)
2625 0 : ring->index = 0;
2626 0 : return ((void *)(ring->dma.vaddr + ring->index * ring->isize));
2627 : }
2628 :
2629 : struct oce_pkt *
2630 0 : oce_pkt_alloc(struct oce_softc *sc, size_t size, int nsegs, int maxsegsz)
2631 : {
2632 : struct oce_pkt *pkt;
2633 :
2634 0 : if ((pkt = pool_get(oce_pkt_pool, PR_NOWAIT | PR_ZERO)) == NULL)
2635 0 : return (NULL);
2636 :
2637 0 : if (bus_dmamap_create(sc->sc_dmat, size, nsegs, maxsegsz, 0,
2638 : BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &pkt->map)) {
2639 0 : pool_put(oce_pkt_pool, pkt);
2640 0 : return (NULL);
2641 : }
2642 :
2643 0 : return (pkt);
2644 0 : }
2645 :
2646 : void
2647 0 : oce_pkt_free(struct oce_softc *sc, struct oce_pkt *pkt)
2648 : {
2649 0 : if (pkt->map) {
2650 0 : bus_dmamap_unload(sc->sc_dmat, pkt->map);
2651 0 : bus_dmamap_destroy(sc->sc_dmat, pkt->map);
2652 0 : }
2653 0 : pool_put(oce_pkt_pool, pkt);
2654 0 : }
2655 :
2656 : static inline struct oce_pkt *
2657 0 : oce_pkt_get(struct oce_pkt_list *lst)
2658 : {
2659 : struct oce_pkt *pkt;
2660 :
2661 0 : pkt = SIMPLEQ_FIRST(lst);
2662 0 : if (pkt == NULL)
2663 0 : return (NULL);
2664 :
2665 0 : SIMPLEQ_REMOVE_HEAD(lst, entry);
2666 :
2667 0 : return (pkt);
2668 0 : }
2669 :
2670 : static inline void
2671 0 : oce_pkt_put(struct oce_pkt_list *lst, struct oce_pkt *pkt)
2672 : {
2673 0 : SIMPLEQ_INSERT_TAIL(lst, pkt, entry);
2674 0 : }
2675 :
2676 : /**
2677 : * @brief Wait for FW to become ready and reset it
2678 : * @param sc software handle to the device
2679 : */
2680 : int
2681 0 : oce_init_fw(struct oce_softc *sc)
2682 : {
2683 0 : struct ioctl_common_function_reset cmd;
2684 : uint32_t reg;
2685 : int err = 0, tmo = 60000;
2686 :
2687 : /* read semaphore CSR */
2688 0 : reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2689 :
2690 : /* if host is ready then wait for fw ready else send POST */
2691 0 : if ((reg & MPU_EP_SEM_STAGE_MASK) <= POST_STAGE_AWAITING_HOST_RDY) {
2692 0 : reg = (reg & ~MPU_EP_SEM_STAGE_MASK) | POST_STAGE_CHIP_RESET;
2693 0 : oce_write_csr(sc, MPU_EP_SEMAPHORE(sc), reg);
2694 0 : }
2695 :
2696 : /* wait for FW to become ready */
2697 0 : for (;;) {
2698 0 : if (--tmo == 0)
2699 : break;
2700 :
2701 0 : DELAY(1000);
2702 :
2703 0 : reg = oce_read_csr(sc, MPU_EP_SEMAPHORE(sc));
2704 0 : if (reg & MPU_EP_SEM_ERROR) {
2705 0 : printf(": POST failed: %#x\n", reg);
2706 0 : return (ENXIO);
2707 : }
2708 0 : if ((reg & MPU_EP_SEM_STAGE_MASK) == POST_STAGE_ARMFW_READY) {
2709 : /* reset FW */
2710 0 : if (ISSET(sc->sc_flags, OCE_F_RESET_RQD)) {
2711 0 : memset(&cmd, 0, sizeof(cmd));
2712 0 : err = oce_cmd(sc, SUBSYS_COMMON,
2713 : OPCODE_COMMON_FUNCTION_RESET,
2714 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2715 0 : }
2716 0 : return (err);
2717 : }
2718 : }
2719 :
2720 0 : printf(": POST timed out: %#x\n", reg);
2721 :
2722 0 : return (ENXIO);
2723 0 : }
2724 :
2725 : static inline int
2726 0 : oce_mbox_wait(struct oce_softc *sc)
2727 : {
2728 : int i;
2729 :
2730 0 : for (i = 0; i < 20000; i++) {
2731 0 : if (oce_read_db(sc, PD_MPU_MBOX_DB) & PD_MPU_MBOX_DB_READY)
2732 0 : return (0);
2733 0 : DELAY(100);
2734 : }
2735 0 : return (ETIMEDOUT);
2736 0 : }
2737 :
2738 : /**
2739 : * @brief Mailbox dispatch
2740 : * @param sc software handle to the device
2741 : */
2742 : int
2743 0 : oce_mbox_dispatch(struct oce_softc *sc)
2744 : {
2745 : uint32_t pa, reg;
2746 : int err;
2747 :
2748 0 : pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 34);
2749 0 : reg = PD_MPU_MBOX_DB_HI | (pa << PD_MPU_MBOX_DB_ADDR_SHIFT);
2750 :
2751 0 : if ((err = oce_mbox_wait(sc)) != 0)
2752 : goto out;
2753 :
2754 0 : oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2755 :
2756 0 : pa = (uint32_t)((uint64_t)OCE_MEM_DVA(&sc->sc_mbx) >> 4) & 0x3fffffff;
2757 0 : reg = pa << PD_MPU_MBOX_DB_ADDR_SHIFT;
2758 :
2759 0 : if ((err = oce_mbox_wait(sc)) != 0)
2760 : goto out;
2761 :
2762 0 : oce_write_db(sc, PD_MPU_MBOX_DB, reg);
2763 :
2764 0 : oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_POSTWRITE);
2765 :
2766 0 : if ((err = oce_mbox_wait(sc)) != 0)
2767 0 : goto out;
2768 :
2769 : out:
2770 0 : oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD);
2771 0 : return (err);
2772 : }
2773 :
2774 : /**
2775 : * @brief Function to initialize the hw with host endian information
2776 : * @param sc software handle to the device
2777 : * @returns 0 on success, ETIMEDOUT on failure
2778 : */
2779 : int
2780 0 : oce_mbox_init(struct oce_softc *sc)
2781 : {
2782 0 : struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2783 0 : uint8_t *ptr = (uint8_t *)&bmbx->mbx;
2784 :
2785 0 : if (!ISSET(sc->sc_flags, OCE_F_MBOX_ENDIAN_RQD))
2786 0 : return (0);
2787 :
2788 : /* Endian Signature */
2789 0 : *ptr++ = 0xff;
2790 0 : *ptr++ = 0x12;
2791 0 : *ptr++ = 0x34;
2792 0 : *ptr++ = 0xff;
2793 0 : *ptr++ = 0xff;
2794 0 : *ptr++ = 0x56;
2795 0 : *ptr++ = 0x78;
2796 0 : *ptr = 0xff;
2797 :
2798 0 : return (oce_mbox_dispatch(sc));
2799 0 : }
2800 :
2801 : int
2802 0 : oce_cmd(struct oce_softc *sc, int subsys, int opcode, int version,
2803 : void *payload, int length)
2804 : {
2805 0 : struct oce_bmbx *bmbx = OCE_MEM_KVA(&sc->sc_mbx);
2806 0 : struct oce_mbx *mbx = &bmbx->mbx;
2807 : struct mbx_hdr *hdr;
2808 : caddr_t epayload = NULL;
2809 : int err;
2810 :
2811 0 : if (length > OCE_MBX_PAYLOAD)
2812 0 : epayload = OCE_MEM_KVA(&sc->sc_pld);
2813 0 : if (length > OCE_MAX_PAYLOAD)
2814 0 : return (EINVAL);
2815 :
2816 0 : oce_dma_sync(&sc->sc_mbx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2817 :
2818 0 : memset(mbx, 0, sizeof(struct oce_mbx));
2819 :
2820 0 : mbx->payload_length = length;
2821 :
2822 0 : if (epayload) {
2823 0 : mbx->flags = OCE_MBX_F_SGE;
2824 0 : oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREREAD);
2825 0 : memcpy(epayload, payload, length);
2826 0 : mbx->pld.sgl[0].addr = OCE_MEM_DVA(&sc->sc_pld);
2827 0 : mbx->pld.sgl[0].length = length;
2828 0 : hdr = (struct mbx_hdr *)epayload;
2829 0 : } else {
2830 0 : mbx->flags = OCE_MBX_F_EMBED;
2831 0 : memcpy(mbx->pld.data, payload, length);
2832 0 : hdr = (struct mbx_hdr *)&mbx->pld.data;
2833 : }
2834 :
2835 0 : hdr->subsys = subsys;
2836 0 : hdr->opcode = opcode;
2837 0 : hdr->version = version;
2838 0 : hdr->length = length - sizeof(*hdr);
2839 0 : if (opcode == OPCODE_COMMON_FUNCTION_RESET)
2840 0 : hdr->timeout = 2 * OCE_MBX_TIMEOUT;
2841 : else
2842 0 : hdr->timeout = OCE_MBX_TIMEOUT;
2843 :
2844 0 : if (epayload)
2845 0 : oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_PREWRITE);
2846 :
2847 0 : err = oce_mbox_dispatch(sc);
2848 0 : if (err == 0) {
2849 0 : if (epayload) {
2850 0 : oce_dma_sync(&sc->sc_pld, BUS_DMASYNC_POSTWRITE);
2851 0 : memcpy(payload, epayload, length);
2852 0 : } else
2853 0 : memcpy(payload, &mbx->pld.data, length);
2854 : } else
2855 0 : printf("%s: mailbox timeout, subsys %d op %d ver %d "
2856 0 : "%spayload length %d\n", sc->sc_dev.dv_xname, subsys,
2857 0 : opcode, version, epayload ? "ext " : "",
2858 : length);
2859 0 : return (err);
2860 0 : }
2861 :
2862 : /**
2863 : * @brief Firmware will send gracious notifications during
2864 : * attach only after sending first mcc commnad. We
2865 : * use MCC queue only for getting async and mailbox
2866 : * for sending cmds. So to get gracious notifications
2867 : * atleast send one dummy command on mcc.
2868 : */
2869 : void
2870 0 : oce_first_mcc(struct oce_softc *sc)
2871 : {
2872 : struct oce_mbx *mbx;
2873 0 : struct oce_mq *mq = sc->sc_mq;
2874 : struct mbx_hdr *hdr;
2875 : struct mbx_get_common_fw_version *cmd;
2876 :
2877 0 : mbx = oce_ring_get(mq->ring);
2878 0 : memset(mbx, 0, sizeof(struct oce_mbx));
2879 :
2880 0 : cmd = (struct mbx_get_common_fw_version *)&mbx->pld.data;
2881 :
2882 0 : hdr = &cmd->hdr;
2883 0 : hdr->subsys = SUBSYS_COMMON;
2884 0 : hdr->opcode = OPCODE_COMMON_GET_FW_VERSION;
2885 0 : hdr->version = OCE_MBX_VER_V0;
2886 0 : hdr->timeout = OCE_MBX_TIMEOUT;
2887 0 : hdr->length = sizeof(*cmd) - sizeof(*hdr);
2888 :
2889 0 : mbx->flags = OCE_MBX_F_EMBED;
2890 0 : mbx->payload_length = sizeof(*cmd);
2891 0 : oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
2892 : BUS_DMASYNC_PREWRITE);
2893 0 : oce_write_db(sc, PD_MQ_DB, mq->id | (1 << 16));
2894 0 : }
2895 :
2896 : int
2897 0 : oce_get_fw_config(struct oce_softc *sc)
2898 : {
2899 0 : struct mbx_common_query_fw_config cmd;
2900 : int err;
2901 :
2902 0 : memset(&cmd, 0, sizeof(cmd));
2903 :
2904 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2905 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2906 0 : if (err)
2907 0 : return (err);
2908 :
2909 0 : sc->sc_port = cmd.params.rsp.port_id;
2910 0 : sc->sc_fmode = cmd.params.rsp.function_mode;
2911 :
2912 0 : return (0);
2913 0 : }
2914 :
2915 : int
2916 0 : oce_check_native_mode(struct oce_softc *sc)
2917 : {
2918 0 : struct mbx_common_set_function_cap cmd;
2919 : int err;
2920 :
2921 0 : memset(&cmd, 0, sizeof(cmd));
2922 :
2923 0 : cmd.params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
2924 : CAP_BE3_NATIVE_ERX_API;
2925 0 : cmd.params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
2926 :
2927 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
2928 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2929 0 : if (err)
2930 0 : return (err);
2931 :
2932 0 : if (cmd.params.rsp.capability_flags & CAP_BE3_NATIVE_ERX_API)
2933 0 : SET(sc->sc_flags, OCE_F_BE3_NATIVE);
2934 :
2935 0 : return (0);
2936 0 : }
2937 :
2938 : /**
2939 : * @brief Function for creating a network interface.
2940 : * @param sc software handle to the device
2941 : * @returns 0 on success, error otherwise
2942 : */
2943 : int
2944 0 : oce_create_iface(struct oce_softc *sc, uint8_t *macaddr)
2945 : {
2946 0 : struct mbx_create_common_iface cmd;
2947 : uint32_t caps, caps_en;
2948 : int err = 0;
2949 :
2950 : /* interface capabilities to give device when creating interface */
2951 : caps = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED |
2952 : MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_MCAST_PROMISC |
2953 : MBX_RX_IFACE_RSS;
2954 :
2955 : /* capabilities to enable by default (others set dynamically) */
2956 : caps_en = MBX_RX_IFACE_BROADCAST | MBX_RX_IFACE_UNTAGGED;
2957 :
2958 0 : if (!IS_XE201(sc)) {
2959 : /* LANCER A0 workaround */
2960 : caps |= MBX_RX_IFACE_PASS_L3L4_ERR;
2961 : caps_en |= MBX_RX_IFACE_PASS_L3L4_ERR;
2962 0 : }
2963 :
2964 : /* enable capabilities controlled via driver startup parameters */
2965 0 : if (sc->sc_rss_enable)
2966 0 : caps_en |= MBX_RX_IFACE_RSS;
2967 :
2968 0 : memset(&cmd, 0, sizeof(cmd));
2969 :
2970 0 : cmd.params.req.version = 0;
2971 0 : cmd.params.req.cap_flags = htole32(caps);
2972 0 : cmd.params.req.enable_flags = htole32(caps_en);
2973 0 : if (macaddr != NULL) {
2974 0 : memcpy(&cmd.params.req.mac_addr[0], macaddr, ETHER_ADDR_LEN);
2975 0 : cmd.params.req.mac_invalid = 0;
2976 0 : } else
2977 0 : cmd.params.req.mac_invalid = 1;
2978 :
2979 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_IFACE,
2980 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
2981 0 : if (err)
2982 0 : return (err);
2983 :
2984 0 : sc->sc_if_id = letoh32(cmd.params.rsp.if_id);
2985 :
2986 0 : if (macaddr != NULL)
2987 0 : sc->sc_pmac_id = letoh32(cmd.params.rsp.pmac_id);
2988 :
2989 0 : return (0);
2990 0 : }
2991 :
2992 : /**
2993 : * @brief Function to send the mbx command to configure vlan
2994 : * @param sc software handle to the device
2995 : * @param vtags array of vlan tags
2996 : * @param nvtags number of elements in array
2997 : * @param untagged boolean TRUE/FLASE
2998 : * @param promisc flag to enable/disable VLAN promiscuous mode
2999 : * @returns 0 on success, EIO on failure
3000 : */
3001 : int
3002 0 : oce_config_vlan(struct oce_softc *sc, struct normal_vlan *vtags, int nvtags,
3003 : int untagged, int promisc)
3004 : {
3005 0 : struct mbx_common_config_vlan cmd;
3006 :
3007 0 : memset(&cmd, 0, sizeof(cmd));
3008 :
3009 0 : cmd.params.req.if_id = sc->sc_if_id;
3010 0 : cmd.params.req.promisc = promisc;
3011 0 : cmd.params.req.untagged = untagged;
3012 0 : cmd.params.req.num_vlans = nvtags;
3013 :
3014 0 : if (!promisc)
3015 0 : memcpy(cmd.params.req.tags.normal_vlans, vtags,
3016 : nvtags * sizeof(struct normal_vlan));
3017 :
3018 0 : return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN,
3019 : OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3020 0 : }
3021 :
3022 : /**
3023 : * @brief Function to set flow control capability in the hardware
3024 : * @param sc software handle to the device
3025 : * @param flags flow control flags to set
3026 : * @returns 0 on success, EIO on failure
3027 : */
3028 : int
3029 0 : oce_set_flow_control(struct oce_softc *sc, uint64_t flags)
3030 : {
3031 0 : struct mbx_common_get_set_flow_control cmd;
3032 : int err;
3033 :
3034 0 : memset(&cmd, 0, sizeof(cmd));
3035 :
3036 0 : cmd.rx_flow_control = flags & IFM_ETH_RXPAUSE ? 1 : 0;
3037 0 : cmd.tx_flow_control = flags & IFM_ETH_TXPAUSE ? 1 : 0;
3038 :
3039 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL,
3040 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3041 0 : if (err)
3042 0 : return (err);
3043 :
3044 0 : memset(&cmd, 0, sizeof(cmd));
3045 :
3046 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL,
3047 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3048 0 : if (err)
3049 0 : return (err);
3050 :
3051 0 : sc->sc_fc = cmd.rx_flow_control ? IFM_ETH_RXPAUSE : 0;
3052 0 : sc->sc_fc |= cmd.tx_flow_control ? IFM_ETH_TXPAUSE : 0;
3053 :
3054 0 : return (0);
3055 0 : }
3056 :
3057 : #ifdef OCE_RSS
3058 : /**
3059 : * @brief Function to set flow control capability in the hardware
3060 : * @param sc software handle to the device
3061 : * @param enable 0=disable, OCE_RSS_xxx flags otherwise
3062 : * @returns 0 on success, EIO on failure
3063 : */
3064 : int
3065 : oce_config_rss(struct oce_softc *sc, int enable)
3066 : {
3067 : struct mbx_config_nic_rss cmd;
3068 : uint8_t *tbl = &cmd.params.req.cputable;
3069 : int i, j;
3070 :
3071 : memset(&cmd, 0, sizeof(cmd));
3072 :
3073 : if (enable)
3074 : cmd.params.req.enable_rss = RSS_ENABLE_IPV4 | RSS_ENABLE_IPV6 |
3075 : RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_TCP_IPV6;
3076 : cmd.params.req.flush = OCE_FLUSH;
3077 : cmd.params.req.if_id = htole32(sc->sc_if_id);
3078 :
3079 : arc4random_buf(cmd.params.req.hash, sizeof(cmd.params.req.hash));
3080 :
3081 : /*
3082 : * Initialize the RSS CPU indirection table.
3083 : *
3084 : * The table is used to choose the queue to place incoming packets.
3085 : * Incoming packets are hashed. The lowest bits in the hash result
3086 : * are used as the index into the CPU indirection table.
3087 : * Each entry in the table contains the RSS CPU-ID returned by the NIC
3088 : * create. Based on the CPU ID, the receive completion is routed to
3089 : * the corresponding RSS CQs. (Non-RSS packets are always completed
3090 : * on the default (0) CQ).
3091 : */
3092 : for (i = 0, j = 0; j < sc->sc_nrq; j++) {
3093 : if (sc->sc_rq[j]->cfg.is_rss_queue)
3094 : tbl[i++] = sc->sc_rq[j]->rss_cpuid;
3095 : }
3096 : if (i > 0)
3097 : cmd->params.req.cpu_tbl_sz_log2 = htole16(ilog2(i));
3098 : else
3099 : return (ENXIO);
3100 :
3101 : return (oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CONFIG_RSS, OCE_MBX_VER_V0,
3102 : &cmd, sizeof(cmd)));
3103 : }
3104 : #endif /* OCE_RSS */
3105 :
3106 : /**
3107 : * @brief Function for hardware update multicast filter
3108 : * @param sc software handle to the device
3109 : * @param multi table of multicast addresses
3110 : * @param naddr number of multicast addresses in the table
3111 : */
3112 : int
3113 0 : oce_update_mcast(struct oce_softc *sc,
3114 : uint8_t multi[][ETHER_ADDR_LEN], int naddr)
3115 : {
3116 0 : struct mbx_set_common_iface_multicast cmd;
3117 :
3118 0 : memset(&cmd, 0, sizeof(cmd));
3119 :
3120 0 : memcpy(&cmd.params.req.mac[0], &multi[0], naddr * ETHER_ADDR_LEN);
3121 0 : cmd.params.req.num_mac = htole16(naddr);
3122 0 : cmd.params.req.if_id = sc->sc_if_id;
3123 :
3124 0 : return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST,
3125 : OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3126 0 : }
3127 :
3128 : /**
3129 : * @brief RXF function to enable/disable device promiscuous mode
3130 : * @param sc software handle to the device
3131 : * @param enable enable/disable flag
3132 : * @returns 0 on success, EIO on failure
3133 : * @note
3134 : * The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
3135 : * This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
3136 : */
3137 : int
3138 0 : oce_set_promisc(struct oce_softc *sc, int enable)
3139 : {
3140 0 : struct mbx_set_common_iface_rx_filter cmd;
3141 : struct iface_rx_filter_ctx *req;
3142 :
3143 0 : memset(&cmd, 0, sizeof(cmd));
3144 :
3145 0 : req = &cmd.params.req;
3146 0 : req->if_id = sc->sc_if_id;
3147 :
3148 0 : if (enable)
3149 0 : req->iface_flags = req->iface_flags_mask =
3150 : MBX_RX_IFACE_PROMISC | MBX_RX_IFACE_VLAN_PROMISC;
3151 :
3152 0 : return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER,
3153 : OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3154 0 : }
3155 :
3156 : /**
3157 : * @brief Function to query the link status from the hardware
3158 : * @param sc software handle to the device
3159 : * @param[out] link pointer to the structure returning link attributes
3160 : * @returns 0 on success, EIO on failure
3161 : */
3162 : int
3163 0 : oce_get_link_status(struct oce_softc *sc)
3164 : {
3165 0 : struct mbx_query_common_link_config cmd;
3166 : int err;
3167 :
3168 0 : memset(&cmd, 0, sizeof(cmd));
3169 :
3170 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG,
3171 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3172 0 : if (err)
3173 0 : return (err);
3174 :
3175 0 : sc->sc_link_up = (letoh32(cmd.params.rsp.logical_link_status) ==
3176 : NTWK_LOGICAL_LINK_UP);
3177 :
3178 0 : if (cmd.params.rsp.mac_speed < 5)
3179 0 : sc->sc_link_speed = cmd.params.rsp.mac_speed;
3180 : else
3181 0 : sc->sc_link_speed = 0;
3182 :
3183 0 : return (0);
3184 0 : }
3185 :
3186 : void
3187 0 : oce_macaddr_set(struct oce_softc *sc)
3188 : {
3189 0 : uint32_t old_pmac_id = sc->sc_pmac_id;
3190 : int status = 0;
3191 :
3192 0 : if (!memcmp(sc->sc_macaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN))
3193 0 : return;
3194 :
3195 0 : status = oce_macaddr_add(sc, sc->sc_ac.ac_enaddr, &sc->sc_pmac_id);
3196 0 : if (!status)
3197 0 : status = oce_macaddr_del(sc, old_pmac_id);
3198 : else
3199 0 : printf("%s: failed to set MAC address\n", sc->sc_dev.dv_xname);
3200 0 : }
3201 :
3202 : int
3203 0 : oce_macaddr_get(struct oce_softc *sc, uint8_t *macaddr)
3204 : {
3205 0 : struct mbx_query_common_iface_mac cmd;
3206 : int err;
3207 :
3208 0 : memset(&cmd, 0, sizeof(cmd));
3209 :
3210 0 : cmd.params.req.type = MAC_ADDRESS_TYPE_NETWORK;
3211 0 : cmd.params.req.permanent = 1;
3212 :
3213 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC,
3214 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3215 0 : if (err == 0)
3216 0 : memcpy(macaddr, &cmd.params.rsp.mac.mac_addr[0],
3217 : ETHER_ADDR_LEN);
3218 0 : return (err);
3219 0 : }
3220 :
3221 : int
3222 0 : oce_macaddr_add(struct oce_softc *sc, uint8_t *enaddr, uint32_t *pmac)
3223 : {
3224 0 : struct mbx_add_common_iface_mac cmd;
3225 : int err;
3226 :
3227 0 : memset(&cmd, 0, sizeof(cmd));
3228 :
3229 0 : cmd.params.req.if_id = htole16(sc->sc_if_id);
3230 0 : memcpy(cmd.params.req.mac_address, enaddr, ETHER_ADDR_LEN);
3231 :
3232 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_ADD_IFACE_MAC,
3233 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3234 0 : if (err == 0)
3235 0 : *pmac = letoh32(cmd.params.rsp.pmac_id);
3236 0 : return (err);
3237 0 : }
3238 :
3239 : int
3240 0 : oce_macaddr_del(struct oce_softc *sc, uint32_t pmac)
3241 : {
3242 0 : struct mbx_del_common_iface_mac cmd;
3243 :
3244 0 : memset(&cmd, 0, sizeof(cmd));
3245 :
3246 0 : cmd.params.req.if_id = htole16(sc->sc_if_id);
3247 0 : cmd.params.req.pmac_id = htole32(pmac);
3248 :
3249 0 : return (oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_DEL_IFACE_MAC,
3250 : OCE_MBX_VER_V0, &cmd, sizeof(cmd)));
3251 0 : }
3252 :
3253 : int
3254 0 : oce_new_rq(struct oce_softc *sc, struct oce_rq *rq)
3255 : {
3256 0 : struct mbx_create_nic_rq cmd;
3257 : int err, npages;
3258 :
3259 0 : memset(&cmd, 0, sizeof(cmd));
3260 :
3261 0 : npages = oce_load_ring(sc, rq->ring, &cmd.params.req.pages[0],
3262 : nitems(cmd.params.req.pages));
3263 0 : if (!npages) {
3264 0 : printf("%s: failed to load the rq ring\n", __func__);
3265 0 : return (1);
3266 : }
3267 :
3268 0 : if (IS_XE201(sc)) {
3269 0 : cmd.params.req.frag_size = rq->fragsize / 2048;
3270 0 : cmd.params.req.page_size = 1;
3271 0 : } else
3272 0 : cmd.params.req.frag_size = ilog2(rq->fragsize);
3273 0 : cmd.params.req.num_pages = npages;
3274 0 : cmd.params.req.cq_id = rq->cq->id;
3275 0 : cmd.params.req.if_id = htole32(sc->sc_if_id);
3276 0 : cmd.params.req.max_frame_size = htole16(rq->mtu);
3277 0 : cmd.params.req.is_rss_queue = htole32(rq->rss);
3278 :
3279 0 : err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_RQ,
3280 0 : IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3281 : sizeof(cmd));
3282 0 : if (err)
3283 0 : return (err);
3284 :
3285 0 : rq->id = letoh16(cmd.params.rsp.rq_id);
3286 0 : rq->rss_cpuid = cmd.params.rsp.rss_cpuid;
3287 :
3288 0 : return (0);
3289 0 : }
3290 :
3291 : int
3292 0 : oce_new_wq(struct oce_softc *sc, struct oce_wq *wq)
3293 : {
3294 0 : struct mbx_create_nic_wq cmd;
3295 : int err, npages;
3296 :
3297 0 : memset(&cmd, 0, sizeof(cmd));
3298 :
3299 0 : npages = oce_load_ring(sc, wq->ring, &cmd.params.req.pages[0],
3300 : nitems(cmd.params.req.pages));
3301 0 : if (!npages) {
3302 0 : printf("%s: failed to load the wq ring\n", __func__);
3303 0 : return (1);
3304 : }
3305 :
3306 0 : if (IS_XE201(sc))
3307 0 : cmd.params.req.if_id = sc->sc_if_id;
3308 0 : cmd.params.req.nic_wq_type = NIC_WQ_TYPE_STANDARD;
3309 0 : cmd.params.req.num_pages = npages;
3310 0 : cmd.params.req.wq_size = ilog2(wq->nitems) + 1;
3311 0 : cmd.params.req.cq_id = htole16(wq->cq->id);
3312 0 : cmd.params.req.ulp_num = 1;
3313 :
3314 0 : err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_CREATE_WQ,
3315 0 : IS_XE201(sc) ? OCE_MBX_VER_V1 : OCE_MBX_VER_V0, &cmd,
3316 : sizeof(cmd));
3317 0 : if (err)
3318 0 : return (err);
3319 :
3320 0 : wq->id = letoh16(cmd.params.rsp.wq_id);
3321 :
3322 0 : return (0);
3323 0 : }
3324 :
3325 : int
3326 0 : oce_new_mq(struct oce_softc *sc, struct oce_mq *mq)
3327 : {
3328 0 : struct mbx_create_common_mq_ex cmd;
3329 : union oce_mq_ext_ctx *ctx;
3330 : int err, npages;
3331 :
3332 0 : memset(&cmd, 0, sizeof(cmd));
3333 :
3334 0 : npages = oce_load_ring(sc, mq->ring, &cmd.params.req.pages[0],
3335 : nitems(cmd.params.req.pages));
3336 0 : if (!npages) {
3337 0 : printf("%s: failed to load the mq ring\n", __func__);
3338 0 : return (-1);
3339 : }
3340 :
3341 0 : ctx = &cmd.params.req.context;
3342 0 : ctx->v0.num_pages = npages;
3343 0 : ctx->v0.cq_id = mq->cq->id;
3344 0 : ctx->v0.ring_size = ilog2(mq->nitems) + 1;
3345 0 : ctx->v0.valid = 1;
3346 : /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
3347 0 : ctx->v0.async_evt_bitmap = 0xffffffff;
3348 :
3349 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_MQ_EXT,
3350 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3351 0 : if (err)
3352 0 : return (err);
3353 :
3354 0 : mq->id = letoh16(cmd.params.rsp.mq_id);
3355 :
3356 0 : return (0);
3357 0 : }
3358 :
3359 : int
3360 0 : oce_new_eq(struct oce_softc *sc, struct oce_eq *eq)
3361 : {
3362 0 : struct mbx_create_common_eq cmd;
3363 : int err, npages;
3364 :
3365 0 : memset(&cmd, 0, sizeof(cmd));
3366 :
3367 0 : npages = oce_load_ring(sc, eq->ring, &cmd.params.req.pages[0],
3368 : nitems(cmd.params.req.pages));
3369 0 : if (!npages) {
3370 0 : printf("%s: failed to load the eq ring\n", __func__);
3371 0 : return (-1);
3372 : }
3373 :
3374 0 : cmd.params.req.ctx.num_pages = htole16(npages);
3375 0 : cmd.params.req.ctx.valid = 1;
3376 0 : cmd.params.req.ctx.size = (eq->isize == 4) ? 0 : 1;
3377 0 : cmd.params.req.ctx.count = ilog2(eq->nitems / 256);
3378 0 : cmd.params.req.ctx.armed = 0;
3379 0 : cmd.params.req.ctx.delay_mult = htole32(eq->delay);
3380 :
3381 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_EQ,
3382 : OCE_MBX_VER_V0, &cmd, sizeof(cmd));
3383 0 : if (err)
3384 0 : return (err);
3385 :
3386 0 : eq->id = letoh16(cmd.params.rsp.eq_id);
3387 :
3388 0 : return (0);
3389 0 : }
3390 :
3391 : int
3392 0 : oce_new_cq(struct oce_softc *sc, struct oce_cq *cq)
3393 : {
3394 0 : struct mbx_create_common_cq cmd;
3395 : union oce_cq_ctx *ctx;
3396 : int err, npages;
3397 :
3398 0 : memset(&cmd, 0, sizeof(cmd));
3399 :
3400 0 : npages = oce_load_ring(sc, cq->ring, &cmd.params.req.pages[0],
3401 : nitems(cmd.params.req.pages));
3402 0 : if (!npages) {
3403 0 : printf("%s: failed to load the cq ring\n", __func__);
3404 0 : return (-1);
3405 : }
3406 :
3407 0 : ctx = &cmd.params.req.cq_ctx;
3408 :
3409 0 : if (IS_XE201(sc)) {
3410 0 : ctx->v2.num_pages = htole16(npages);
3411 0 : ctx->v2.page_size = 1; /* for 4K */
3412 0 : ctx->v2.eventable = cq->eventable;
3413 0 : ctx->v2.valid = 1;
3414 0 : ctx->v2.count = ilog2(cq->nitems / 256);
3415 0 : ctx->v2.nodelay = cq->nodelay;
3416 0 : ctx->v2.coalesce_wm = cq->ncoalesce;
3417 0 : ctx->v2.armed = 0;
3418 0 : ctx->v2.eq_id = cq->eq->id;
3419 0 : if (ctx->v2.count == 3) {
3420 0 : if (cq->nitems > (4*1024)-1)
3421 0 : ctx->v2.cqe_count = (4*1024)-1;
3422 : else
3423 0 : ctx->v2.cqe_count = cq->nitems;
3424 : }
3425 : } else {
3426 0 : ctx->v0.num_pages = htole16(npages);
3427 0 : ctx->v0.eventable = cq->eventable;
3428 0 : ctx->v0.valid = 1;
3429 0 : ctx->v0.count = ilog2(cq->nitems / 256);
3430 0 : ctx->v0.nodelay = cq->nodelay;
3431 0 : ctx->v0.coalesce_wm = cq->ncoalesce;
3432 0 : ctx->v0.armed = 0;
3433 0 : ctx->v0.eq_id = cq->eq->id;
3434 : }
3435 :
3436 0 : err = oce_cmd(sc, SUBSYS_COMMON, OPCODE_COMMON_CREATE_CQ,
3437 0 : IS_XE201(sc) ? OCE_MBX_VER_V2 : OCE_MBX_VER_V0, &cmd,
3438 : sizeof(cmd));
3439 0 : if (err)
3440 0 : return (err);
3441 :
3442 0 : cq->id = letoh16(cmd.params.rsp.cq_id);
3443 :
3444 0 : return (0);
3445 0 : }
3446 :
3447 : int
3448 0 : oce_init_stats(struct oce_softc *sc)
3449 : {
3450 : union cmd {
3451 : struct mbx_get_nic_stats_v0 _be2;
3452 : struct mbx_get_nic_stats _be3;
3453 : struct mbx_get_pport_stats _xe201;
3454 : };
3455 :
3456 0 : sc->sc_statcmd = malloc(sizeof(union cmd), M_DEVBUF, M_ZERO | M_NOWAIT);
3457 0 : if (sc->sc_statcmd == NULL) {
3458 0 : printf("%s: failed to allocate statistics command block\n",
3459 0 : sc->sc_dev.dv_xname);
3460 0 : return (-1);
3461 : }
3462 0 : return (0);
3463 0 : }
3464 :
3465 : int
3466 0 : oce_update_stats(struct oce_softc *sc)
3467 : {
3468 0 : struct ifnet *ifp = &sc->sc_ac.ac_if;
3469 0 : uint64_t rxe, txe;
3470 : int err;
3471 :
3472 0 : if (ISSET(sc->sc_flags, OCE_F_BE2))
3473 0 : err = oce_stats_be2(sc, &rxe, &txe);
3474 0 : else if (ISSET(sc->sc_flags, OCE_F_BE3))
3475 0 : err = oce_stats_be3(sc, &rxe, &txe);
3476 : else
3477 0 : err = oce_stats_xe(sc, &rxe, &txe);
3478 0 : if (err)
3479 0 : return (err);
3480 :
3481 0 : ifp->if_ierrors += (rxe > sc->sc_rx_errors) ?
3482 0 : rxe - sc->sc_rx_errors : sc->sc_rx_errors - rxe;
3483 0 : sc->sc_rx_errors = rxe;
3484 0 : ifp->if_oerrors += (txe > sc->sc_tx_errors) ?
3485 0 : txe - sc->sc_tx_errors : sc->sc_tx_errors - txe;
3486 0 : sc->sc_tx_errors = txe;
3487 :
3488 0 : return (0);
3489 0 : }
3490 :
3491 : int
3492 0 : oce_stats_be2(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3493 : {
3494 0 : struct mbx_get_nic_stats_v0 *cmd = sc->sc_statcmd;
3495 : struct oce_pmem_stats *ms;
3496 : struct oce_rxf_stats_v0 *rs;
3497 : struct oce_port_rxf_stats_v0 *ps;
3498 : int err;
3499 :
3500 0 : memset(cmd, 0, sizeof(*cmd));
3501 :
3502 0 : err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V0,
3503 : cmd, sizeof(*cmd));
3504 0 : if (err)
3505 0 : return (err);
3506 :
3507 0 : ms = &cmd->params.rsp.stats.pmem;
3508 0 : rs = &cmd->params.rsp.stats.rxf;
3509 0 : ps = &rs->port[sc->sc_port];
3510 :
3511 0 : *rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3512 0 : ps->rx_frame_too_long + ps->rx_dropped_runt +
3513 0 : ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3514 0 : ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3515 0 : ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3516 0 : ps->rx_dropped_too_short + ps->rx_out_range_errors +
3517 0 : ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3518 0 : ps->rx_alignment_symbol_errors;
3519 0 : if (sc->sc_if_id)
3520 0 : *rxe += rs->port1_jabber_events;
3521 : else
3522 0 : *rxe += rs->port0_jabber_events;
3523 0 : *rxe += ms->eth_red_drops;
3524 :
3525 0 : *txe = 0; /* hardware doesn't provide any extra tx error statistics */
3526 :
3527 0 : return (0);
3528 0 : }
3529 :
3530 : int
3531 0 : oce_stats_be3(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3532 : {
3533 0 : struct mbx_get_nic_stats *cmd = sc->sc_statcmd;
3534 : struct oce_pmem_stats *ms;
3535 : struct oce_rxf_stats_v1 *rs;
3536 : struct oce_port_rxf_stats_v1 *ps;
3537 : int err;
3538 :
3539 0 : memset(cmd, 0, sizeof(*cmd));
3540 :
3541 0 : err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_STATS, OCE_MBX_VER_V1,
3542 : cmd, sizeof(*cmd));
3543 0 : if (err)
3544 0 : return (err);
3545 :
3546 0 : ms = &cmd->params.rsp.stats.pmem;
3547 0 : rs = &cmd->params.rsp.stats.rxf;
3548 0 : ps = &rs->port[sc->sc_port];
3549 :
3550 0 : *rxe = ps->rx_crc_errors + ps->rx_in_range_errors +
3551 0 : ps->rx_frame_too_long + ps->rx_dropped_runt +
3552 0 : ps->rx_ip_checksum_errs + ps->rx_tcp_checksum_errs +
3553 0 : ps->rx_udp_checksum_errs + ps->rxpp_fifo_overflow_drop +
3554 0 : ps->rx_dropped_tcp_length + ps->rx_dropped_too_small +
3555 0 : ps->rx_dropped_too_short + ps->rx_out_range_errors +
3556 0 : ps->rx_dropped_header_too_small + ps->rx_input_fifo_overflow_drop +
3557 0 : ps->rx_alignment_symbol_errors + ps->jabber_events;
3558 0 : *rxe += ms->eth_red_drops;
3559 :
3560 0 : *txe = 0; /* hardware doesn't provide any extra tx error statistics */
3561 :
3562 0 : return (0);
3563 0 : }
3564 :
3565 : int
3566 0 : oce_stats_xe(struct oce_softc *sc, uint64_t *rxe, uint64_t *txe)
3567 : {
3568 0 : struct mbx_get_pport_stats *cmd = sc->sc_statcmd;
3569 : struct oce_pport_stats *pps;
3570 : int err;
3571 :
3572 0 : memset(cmd, 0, sizeof(*cmd));
3573 :
3574 0 : cmd->params.req.reset_stats = 0;
3575 0 : cmd->params.req.port_number = sc->sc_if_id;
3576 :
3577 0 : err = oce_cmd(sc, SUBSYS_NIC, OPCODE_NIC_GET_PPORT_STATS,
3578 : OCE_MBX_VER_V0, cmd, sizeof(*cmd));
3579 0 : if (err)
3580 0 : return (err);
3581 :
3582 0 : pps = &cmd->params.rsp.pps;
3583 :
3584 0 : *rxe = pps->rx_discards + pps->rx_errors + pps->rx_crc_errors +
3585 0 : pps->rx_alignment_errors + pps->rx_symbol_errors +
3586 0 : pps->rx_frames_too_long + pps->rx_internal_mac_errors +
3587 0 : pps->rx_undersize_pkts + pps->rx_oversize_pkts + pps->rx_jabbers +
3588 0 : pps->rx_control_frames_unknown_opcode + pps->rx_in_range_errors +
3589 0 : pps->rx_out_of_range_errors + pps->rx_ip_checksum_errors +
3590 0 : pps->rx_tcp_checksum_errors + pps->rx_udp_checksum_errors +
3591 0 : pps->rx_fifo_overflow + pps->rx_input_fifo_overflow +
3592 0 : pps->rx_drops_too_many_frags + pps->rx_drops_mtu;
3593 :
3594 0 : *txe = pps->tx_discards + pps->tx_errors + pps->tx_internal_mac_errors;
3595 :
3596 0 : return (0);
3597 0 : }
|