Line data Source code
1 : /* $OpenBSD: ubsec.c,v 1.164 2018/04/28 15:44:59 jasper Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
5 : * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
6 : * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice, this list of conditions and the following disclaimer.
13 : * 2. Redistributions in binary form must reproduce the above copyright
14 : * notice, this list of conditions and the following disclaimer in the
15 : * documentation and/or other materials provided with the distribution.
16 : *
17 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 : * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 : * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
21 : * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 : * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 : * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 : * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 : * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 : * POSSIBILITY OF SUCH DAMAGE.
28 : *
29 : * Effort sponsored in part by the Defense Advanced Research Projects
30 : * Agency (DARPA) and Air Force Research Laboratory, Air Force
31 : * Materiel Command, USAF, under agreement number F30602-01-2-0537.
32 : *
33 : */
34 :
35 : #undef UBSEC_DEBUG
36 :
37 : /*
38 : * uBsec 5[56]01, 58xx hardware crypto accelerator
39 : */
40 :
41 : #include <sys/param.h>
42 : #include <sys/systm.h>
43 : #include <sys/timeout.h>
44 : #include <sys/errno.h>
45 : #include <sys/malloc.h>
46 : #include <sys/kernel.h>
47 : #include <sys/mbuf.h>
48 : #include <sys/device.h>
49 : #include <sys/queue.h>
50 :
51 : #include <crypto/cryptodev.h>
52 : #include <crypto/cryptosoft.h>
53 : #include <dev/rndvar.h>
54 : #include <crypto/md5.h>
55 : #include <crypto/sha1.h>
56 :
57 : #include <dev/pci/pcireg.h>
58 : #include <dev/pci/pcivar.h>
59 : #include <dev/pci/pcidevs.h>
60 :
61 : #include <dev/pci/ubsecreg.h>
62 : #include <dev/pci/ubsecvar.h>
63 :
64 : /*
65 : * Prototypes and count for the pci_device structure
66 : */
67 : int ubsec_probe(struct device *, void *, void *);
68 : void ubsec_attach(struct device *, struct device *, void *);
69 : void ubsec_reset_board(struct ubsec_softc *);
70 : void ubsec_init_board(struct ubsec_softc *);
71 : void ubsec_init_pciregs(struct pci_attach_args *pa);
72 : void ubsec_cleanchip(struct ubsec_softc *);
73 : void ubsec_totalreset(struct ubsec_softc *);
74 : int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
75 :
76 : struct cfattach ubsec_ca = {
77 : sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach,
78 : };
79 :
80 : struct cfdriver ubsec_cd = {
81 : 0, "ubsec", DV_DULL
82 : };
83 :
84 : int ubsec_intr(void *);
85 : int ubsec_newsession(u_int32_t *, struct cryptoini *);
86 : int ubsec_freesession(u_int64_t);
87 : int ubsec_process(struct cryptop *);
88 : void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
89 : void ubsec_feed(struct ubsec_softc *);
90 : void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *);
91 : void ubsec_feed2(struct ubsec_softc *);
92 : void ubsec_feed4(struct ubsec_softc *);
93 : void ubsec_rng(void *);
94 : int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t,
95 : struct ubsec_dma_alloc *, int);
96 : void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
97 : int ubsec_dmamap_aligned(bus_dmamap_t);
98 :
99 : #define READ_REG(sc,r) \
100 : bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
101 :
102 : #define WRITE_REG(sc,reg,val) \
103 : bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
104 :
105 : #define SWAP32(x) (x) = htole32(ntohl((x)))
106 : #define HTOLE32(x) (x) = htole32(x)
107 :
108 :
109 : struct ubsec_stats ubsecstats;
110 :
111 : const struct pci_matchid ubsec_devices[] = {
112 : { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501 },
113 : { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601 },
114 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801 },
115 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802 },
116 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805 },
117 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820 },
118 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821 },
119 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822 },
120 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823 },
121 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5825 },
122 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5860 },
123 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5861 },
124 : { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5862 },
125 : { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K },
126 : { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821 },
127 : };
128 :
129 : int
130 0 : ubsec_probe(struct device *parent, void *match, void *aux)
131 : {
132 0 : return (pci_matchbyid((struct pci_attach_args *)aux, ubsec_devices,
133 : nitems(ubsec_devices)));
134 : }
135 :
136 : void
137 0 : ubsec_attach(struct device *parent, struct device *self, void *aux)
138 : {
139 0 : struct ubsec_softc *sc = (struct ubsec_softc *)self;
140 0 : struct pci_attach_args *pa = aux;
141 0 : pci_chipset_tag_t pc = pa->pa_pc;
142 0 : pci_intr_handle_t ih;
143 : pcireg_t memtype;
144 : const char *intrstr = NULL;
145 : struct ubsec_dma *dmap;
146 0 : bus_size_t iosize;
147 : u_int32_t i;
148 0 : int algs[CRYPTO_ALGORITHM_MAX + 1];
149 :
150 0 : SIMPLEQ_INIT(&sc->sc_queue);
151 0 : SIMPLEQ_INIT(&sc->sc_qchip);
152 0 : SIMPLEQ_INIT(&sc->sc_queue2);
153 0 : SIMPLEQ_INIT(&sc->sc_qchip2);
154 0 : SIMPLEQ_INIT(&sc->sc_queue4);
155 0 : SIMPLEQ_INIT(&sc->sc_qchip4);
156 :
157 0 : sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
158 0 : sc->sc_maxaggr = UBS_MIN_AGGR;
159 :
160 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BLUESTEEL &&
161 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BLUESTEEL_5601)
162 0 : sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG;
163 :
164 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
165 0 : (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5802 ||
166 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5805))
167 0 : sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG;
168 :
169 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
170 0 : (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5820 ||
171 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5822))
172 0 : sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
173 : UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY;
174 :
175 0 : if ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
176 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5821) ||
177 0 : (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN &&
178 0 : (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_SCA1K ||
179 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_5821))) {
180 0 : sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY |
181 : BS_STAT_MCR2_ALLEMPTY;
182 0 : sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
183 : UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY;
184 0 : }
185 :
186 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
187 0 : (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5823 ||
188 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5825))
189 0 : sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
190 : UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY |
191 : UBS_FLAGS_AES;
192 :
193 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
194 0 : (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5860 ||
195 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5861 ||
196 0 : PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5862)) {
197 0 : sc->sc_maxaggr = UBS_MAX_AGGR;
198 0 : sc->sc_statmask |=
199 : BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY |
200 : BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY;
201 0 : sc->sc_flags |= UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM |
202 : UBS_FLAGS_LONGCTX | UBS_FLAGS_AES |
203 : UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY;
204 : #if 0
205 : /* The RNG is not yet supported */
206 : sc->sc_flags |= UBS_FLAGS_RNG | UBS_FLAGS_RNG4;
207 : #endif
208 0 : }
209 :
210 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BS_BAR);
211 0 : if (pci_mapreg_map(pa, BS_BAR, memtype, 0,
212 0 : &sc->sc_st, &sc->sc_sh, NULL, &iosize, 0)) {
213 0 : printf(": can't find mem space\n");
214 0 : return;
215 : }
216 0 : sc->sc_dmat = pa->pa_dmat;
217 :
218 0 : if (pci_intr_map(pa, &ih)) {
219 0 : printf(": couldn't map interrupt\n");
220 0 : bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
221 0 : return;
222 : }
223 0 : intrstr = pci_intr_string(pc, ih);
224 0 : sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc,
225 0 : self->dv_xname);
226 0 : if (sc->sc_ih == NULL) {
227 0 : printf(": couldn't establish interrupt");
228 0 : if (intrstr != NULL)
229 0 : printf(" at %s", intrstr);
230 0 : printf("\n");
231 0 : bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
232 0 : return;
233 : }
234 :
235 0 : sc->sc_cid = crypto_get_driverid(0);
236 0 : if (sc->sc_cid < 0) {
237 0 : pci_intr_disestablish(pc, sc->sc_ih);
238 0 : bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
239 0 : return;
240 : }
241 :
242 0 : SIMPLEQ_INIT(&sc->sc_freequeue);
243 0 : dmap = sc->sc_dmaa;
244 0 : for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
245 : struct ubsec_q *q;
246 :
247 0 : q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q),
248 : M_DEVBUF, M_NOWAIT);
249 0 : if (q == NULL) {
250 0 : printf(": can't allocate queue buffers\n");
251 0 : break;
252 : }
253 :
254 0 : if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk),
255 0 : &dmap->d_alloc, 0)) {
256 0 : printf(": can't allocate dma buffers\n");
257 0 : free(q, M_DEVBUF, 0);
258 0 : break;
259 : }
260 0 : dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
261 :
262 0 : q->q_dma = dmap;
263 0 : sc->sc_queuea[i] = q;
264 :
265 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
266 0 : }
267 :
268 0 : bzero(algs, sizeof(algs));
269 0 : algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
270 0 : algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
271 0 : algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
272 0 : if (sc->sc_flags & UBS_FLAGS_AES)
273 0 : algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
274 0 : crypto_register(sc->sc_cid, algs, ubsec_newsession,
275 : ubsec_freesession, ubsec_process);
276 :
277 : /*
278 : * Reset Broadcom chip
279 : */
280 0 : ubsec_reset_board(sc);
281 :
282 : /*
283 : * Init Broadcom specific PCI settings
284 : */
285 0 : ubsec_init_pciregs(pa);
286 :
287 : /*
288 : * Init Broadcom chip
289 : */
290 0 : ubsec_init_board(sc);
291 :
292 0 : printf(": 3DES MD5 SHA1");
293 0 : if (sc->sc_flags & UBS_FLAGS_AES)
294 0 : printf(" AES");
295 :
296 : #ifndef UBSEC_NO_RNG
297 0 : if (sc->sc_flags & UBS_FLAGS_RNG) {
298 0 : if (sc->sc_flags & UBS_FLAGS_RNG4)
299 0 : sc->sc_statmask |= BS_STAT_MCR4_DONE;
300 : else
301 0 : sc->sc_statmask |= BS_STAT_MCR2_DONE;
302 :
303 0 : if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
304 0 : &sc->sc_rng.rng_q.q_mcr, 0))
305 : goto skip_rng;
306 :
307 0 : if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass),
308 0 : &sc->sc_rng.rng_q.q_ctx, 0)) {
309 0 : ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
310 0 : goto skip_rng;
311 : }
312 :
313 0 : if (ubsec_dma_malloc(sc, sizeof(u_int32_t) *
314 0 : UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) {
315 0 : ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx);
316 0 : ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
317 0 : goto skip_rng;
318 : }
319 :
320 0 : timeout_set(&sc->sc_rngto, ubsec_rng, sc);
321 0 : if (hz >= 100)
322 0 : sc->sc_rnghz = hz / 100;
323 : else
324 0 : sc->sc_rnghz = 1;
325 0 : timeout_add(&sc->sc_rngto, sc->sc_rnghz);
326 0 : printf(" RNG");
327 : skip_rng:
328 : ;
329 : }
330 : #endif /* UBSEC_NO_RNG */
331 :
332 0 : if (sc->sc_flags & UBS_FLAGS_KEY) {
333 0 : sc->sc_statmask |= BS_STAT_MCR2_DONE;
334 0 : }
335 :
336 0 : printf(", %s\n", intrstr);
337 0 : }
338 :
339 : /*
340 : * UBSEC Interrupt routine
341 : */
342 : int
343 0 : ubsec_intr(void *arg)
344 : {
345 0 : struct ubsec_softc *sc = arg;
346 0 : volatile u_int32_t stat;
347 : struct ubsec_q *q;
348 : struct ubsec_dma *dmap;
349 : u_int16_t flags;
350 : int npkts = 0, i;
351 :
352 0 : stat = READ_REG(sc, BS_STAT);
353 :
354 0 : if ((stat & (BS_STAT_MCR1_DONE|BS_STAT_MCR2_DONE|BS_STAT_MCR4_DONE|
355 0 : BS_STAT_DMAERR)) == 0)
356 0 : return (0);
357 :
358 0 : stat &= sc->sc_statmask;
359 0 : WRITE_REG(sc, BS_STAT, stat); /* IACK */
360 :
361 : /*
362 : * Check to see if we have any packets waiting for us
363 : */
364 0 : if ((stat & BS_STAT_MCR1_DONE)) {
365 0 : while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
366 : q = SIMPLEQ_FIRST(&sc->sc_qchip);
367 0 : dmap = q->q_dma;
368 :
369 0 : if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
370 : break;
371 :
372 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
373 :
374 0 : npkts = q->q_nstacked_mcrs;
375 : /*
376 : * search for further sc_qchip ubsec_q's that share
377 : * the same MCR, and complete them too, they must be
378 : * at the top.
379 : */
380 0 : for (i = 0; i < npkts; i++) {
381 0 : if(q->q_stacked_mcr[i])
382 0 : ubsec_callback(sc, q->q_stacked_mcr[i]);
383 : else
384 : break;
385 : }
386 0 : ubsec_callback(sc, q);
387 : }
388 :
389 : /*
390 : * Don't send any more packet to chip if there has been
391 : * a DMAERR.
392 : */
393 0 : if (!(stat & BS_STAT_DMAERR))
394 0 : ubsec_feed(sc);
395 : }
396 :
397 : /*
398 : * Check to see if we have any key setups/rng's waiting for us
399 : */
400 0 : if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) &&
401 0 : (stat & BS_STAT_MCR2_DONE)) {
402 : struct ubsec_q2 *q2;
403 : struct ubsec_mcr *mcr;
404 :
405 0 : while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) {
406 : q2 = SIMPLEQ_FIRST(&sc->sc_qchip2);
407 :
408 0 : bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map,
409 : 0, q2->q_mcr.dma_map->dm_mapsize,
410 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
411 :
412 0 : mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr;
413 :
414 : /* A bug in new devices requires to swap this field */
415 0 : if (sc->sc_flags & UBS_FLAGS_MULTIMCR)
416 0 : flags = swap16(mcr->mcr_flags);
417 : else
418 0 : flags = mcr->mcr_flags;
419 0 : if ((flags & htole16(UBS_MCR_DONE)) == 0) {
420 0 : bus_dmamap_sync(sc->sc_dmat,
421 : q2->q_mcr.dma_map, 0,
422 : q2->q_mcr.dma_map->dm_mapsize,
423 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
424 0 : break;
425 : }
426 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next);
427 0 : ubsec_callback2(sc, q2);
428 : /*
429 : * Don't send any more packet to chip if there has been
430 : * a DMAERR.
431 : */
432 0 : if (!(stat & BS_STAT_DMAERR))
433 0 : ubsec_feed2(sc);
434 : }
435 0 : }
436 0 : if ((sc->sc_flags & UBS_FLAGS_RNG4) && (stat & BS_STAT_MCR4_DONE)) {
437 : struct ubsec_q2 *q2;
438 : struct ubsec_mcr *mcr;
439 :
440 0 : while (!SIMPLEQ_EMPTY(&sc->sc_qchip4)) {
441 : q2 = SIMPLEQ_FIRST(&sc->sc_qchip4);
442 :
443 0 : bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map,
444 : 0, q2->q_mcr.dma_map->dm_mapsize,
445 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
446 :
447 0 : mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr;
448 :
449 : /* A bug in new devices requires to swap this field */
450 0 : flags = swap16(mcr->mcr_flags);
451 :
452 0 : if ((flags & htole16(UBS_MCR_DONE)) == 0) {
453 0 : bus_dmamap_sync(sc->sc_dmat,
454 : q2->q_mcr.dma_map, 0,
455 : q2->q_mcr.dma_map->dm_mapsize,
456 : BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
457 0 : break;
458 : }
459 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip4, q_next);
460 0 : ubsec_callback2(sc, q2);
461 : /*
462 : * Don't send any more packet to chip if there has been
463 : * a DMAERR.
464 : */
465 0 : if (!(stat & BS_STAT_DMAERR))
466 0 : ubsec_feed4(sc);
467 : }
468 0 : }
469 :
470 : /*
471 : * Check to see if we got any DMA Error
472 : */
473 0 : if (stat & BS_STAT_DMAERR) {
474 : #ifdef UBSEC_DEBUG
475 : volatile u_int32_t a = READ_REG(sc, BS_ERR);
476 :
477 : printf("%s: dmaerr %s@%08x\n", sc->sc_dv.dv_xname,
478 : (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR);
479 : #endif /* UBSEC_DEBUG */
480 0 : ubsecstats.hst_dmaerr++;
481 0 : ubsec_totalreset(sc);
482 0 : ubsec_feed(sc);
483 0 : }
484 :
485 0 : return (1);
486 0 : }
487 :
488 : /*
489 : * ubsec_feed() - aggregate and post requests to chip
490 : * It is assumed that the caller set splnet()
491 : */
492 : void
493 0 : ubsec_feed(struct ubsec_softc *sc)
494 : {
495 : #ifdef UBSEC_DEBUG
496 : static int max;
497 : #endif /* UBSEC_DEBUG */
498 : struct ubsec_q *q, *q2;
499 : int npkts, i;
500 : void *v;
501 : u_int32_t stat;
502 :
503 0 : npkts = sc->sc_nqueue;
504 0 : if (npkts > sc->sc_maxaggr)
505 0 : npkts = sc->sc_maxaggr;
506 0 : if (npkts < 2)
507 : goto feed1;
508 :
509 0 : if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
510 0 : if(stat & BS_STAT_DMAERR) {
511 0 : ubsec_totalreset(sc);
512 0 : ubsecstats.hst_dmaerr++;
513 0 : }
514 0 : return;
515 : }
516 :
517 : #ifdef UBSEC_DEBUG
518 : printf("merging %d records\n", npkts);
519 :
520 : /* XXX temporary aggregation statistics reporting code */
521 : if (max < npkts) {
522 : max = npkts;
523 : printf("%s: new max aggregate %d\n", sc->sc_dv.dv_xname, max);
524 : }
525 : #endif /* UBSEC_DEBUG */
526 :
527 0 : q = SIMPLEQ_FIRST(&sc->sc_queue);
528 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
529 0 : --sc->sc_nqueue;
530 :
531 0 : bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
532 : 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
533 0 : if (q->q_dst_map != NULL)
534 0 : bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
535 : 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
536 :
537 0 : q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
538 :
539 0 : for (i = 0; i < q->q_nstacked_mcrs; i++) {
540 0 : q2 = SIMPLEQ_FIRST(&sc->sc_queue);
541 0 : bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
542 : 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
543 0 : if (q2->q_dst_map != NULL)
544 0 : bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
545 : 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
546 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
547 0 : --sc->sc_nqueue;
548 :
549 0 : v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
550 : sizeof(struct ubsec_mcr_add);
551 0 : bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
552 0 : q->q_stacked_mcr[i] = q2;
553 : }
554 0 : q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
555 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
556 0 : bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
557 : 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
558 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
559 0 : WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
560 : offsetof(struct ubsec_dmachunk, d_mcr));
561 0 : return;
562 :
563 : feed1:
564 0 : while (!SIMPLEQ_EMPTY(&sc->sc_queue)) {
565 0 : if ((stat = READ_REG(sc, BS_STAT)) &
566 : (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
567 0 : if(stat & BS_STAT_DMAERR) {
568 0 : ubsec_totalreset(sc);
569 0 : ubsecstats.hst_dmaerr++;
570 0 : }
571 : break;
572 : }
573 :
574 0 : q = SIMPLEQ_FIRST(&sc->sc_queue);
575 :
576 0 : bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
577 : 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
578 0 : if (q->q_dst_map != NULL)
579 0 : bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
580 : 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
581 0 : bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
582 : 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
583 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
584 :
585 0 : WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
586 : offsetof(struct ubsec_dmachunk, d_mcr));
587 : #ifdef UBSEC_DEBUG
588 : printf("feed: q->chip %p %08x\n", q,
589 : (u_int32_t)q->q_dma->d_alloc.dma_paddr);
590 : #endif /* UBSEC_DEBUG */
591 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
592 0 : --sc->sc_nqueue;
593 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
594 : }
595 0 : }
596 :
597 : /*
598 : * Allocate a new 'session' and return an encoded session id. 'sidp'
599 : * contains our registration id, and should contain an encoded session
600 : * id on successful allocation.
601 : */
602 : int
603 0 : ubsec_newsession(u_int32_t *sidp, struct cryptoini *cri)
604 : {
605 : struct cryptoini *c, *encini = NULL, *macini = NULL;
606 : struct ubsec_softc *sc = NULL;
607 : struct ubsec_session *ses = NULL;
608 0 : MD5_CTX md5ctx;
609 0 : SHA1_CTX sha1ctx;
610 : int i, sesn;
611 :
612 0 : if (sidp == NULL || cri == NULL)
613 0 : return (EINVAL);
614 :
615 0 : for (i = 0; i < ubsec_cd.cd_ndevs; i++) {
616 0 : sc = ubsec_cd.cd_devs[i];
617 0 : if (sc == NULL || sc->sc_cid == (*sidp))
618 : break;
619 : }
620 0 : if (sc == NULL)
621 0 : return (EINVAL);
622 :
623 0 : for (c = cri; c != NULL; c = c->cri_next) {
624 0 : if (c->cri_alg == CRYPTO_MD5_HMAC ||
625 0 : c->cri_alg == CRYPTO_SHA1_HMAC) {
626 0 : if (macini)
627 0 : return (EINVAL);
628 : macini = c;
629 0 : } else if (c->cri_alg == CRYPTO_3DES_CBC ||
630 0 : c->cri_alg == CRYPTO_AES_CBC) {
631 0 : if (encini)
632 0 : return (EINVAL);
633 : encini = c;
634 : } else
635 0 : return (EINVAL);
636 : }
637 0 : if (encini == NULL && macini == NULL)
638 0 : return (EINVAL);
639 :
640 0 : if (encini && encini->cri_alg == CRYPTO_AES_CBC) {
641 0 : switch (encini->cri_klen) {
642 : case 128:
643 : case 192:
644 : case 256:
645 : break;
646 : default:
647 0 : return (EINVAL);
648 : }
649 : }
650 :
651 0 : if (sc->sc_sessions == NULL) {
652 0 : ses = sc->sc_sessions = (struct ubsec_session *)malloc(
653 : sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
654 0 : if (ses == NULL)
655 0 : return (ENOMEM);
656 : sesn = 0;
657 0 : sc->sc_nsessions = 1;
658 0 : } else {
659 0 : for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
660 0 : if (sc->sc_sessions[sesn].ses_used == 0) {
661 : ses = &sc->sc_sessions[sesn];
662 0 : break;
663 : }
664 : }
665 :
666 0 : if (ses == NULL) {
667 0 : sesn = sc->sc_nsessions;
668 0 : ses = mallocarray((sesn + 1),
669 : sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
670 0 : if (ses == NULL)
671 0 : return (ENOMEM);
672 0 : bcopy(sc->sc_sessions, ses, sesn *
673 : sizeof(struct ubsec_session));
674 0 : explicit_bzero(sc->sc_sessions, sesn *
675 : sizeof(struct ubsec_session));
676 0 : free(sc->sc_sessions, M_DEVBUF, 0);
677 0 : sc->sc_sessions = ses;
678 0 : ses = &sc->sc_sessions[sesn];
679 0 : sc->sc_nsessions++;
680 0 : }
681 : }
682 :
683 0 : bzero(ses, sizeof(struct ubsec_session));
684 0 : ses->ses_used = 1;
685 0 : if (encini) {
686 : /* Go ahead and compute key in ubsec's byte order */
687 0 : if (encini->cri_alg == CRYPTO_AES_CBC) {
688 0 : bcopy(encini->cri_key, ses->ses_key,
689 0 : encini->cri_klen / 8);
690 0 : } else
691 0 : bcopy(encini->cri_key, ses->ses_key, 24);
692 :
693 0 : SWAP32(ses->ses_key[0]);
694 0 : SWAP32(ses->ses_key[1]);
695 0 : SWAP32(ses->ses_key[2]);
696 0 : SWAP32(ses->ses_key[3]);
697 0 : SWAP32(ses->ses_key[4]);
698 0 : SWAP32(ses->ses_key[5]);
699 0 : SWAP32(ses->ses_key[6]);
700 0 : SWAP32(ses->ses_key[7]);
701 0 : }
702 :
703 0 : if (macini) {
704 0 : for (i = 0; i < macini->cri_klen / 8; i++)
705 0 : macini->cri_key[i] ^= HMAC_IPAD_VAL;
706 :
707 0 : if (macini->cri_alg == CRYPTO_MD5_HMAC) {
708 0 : MD5Init(&md5ctx);
709 0 : MD5Update(&md5ctx, macini->cri_key,
710 0 : macini->cri_klen / 8);
711 0 : MD5Update(&md5ctx, hmac_ipad_buffer,
712 0 : HMAC_MD5_BLOCK_LEN - (macini->cri_klen / 8));
713 0 : bcopy(md5ctx.state, ses->ses_hminner,
714 : sizeof(md5ctx.state));
715 0 : } else {
716 0 : SHA1Init(&sha1ctx);
717 0 : SHA1Update(&sha1ctx, macini->cri_key,
718 0 : macini->cri_klen / 8);
719 0 : SHA1Update(&sha1ctx, hmac_ipad_buffer,
720 0 : HMAC_SHA1_BLOCK_LEN - (macini->cri_klen / 8));
721 0 : bcopy(sha1ctx.state, ses->ses_hminner,
722 : sizeof(sha1ctx.state));
723 : }
724 :
725 0 : for (i = 0; i < macini->cri_klen / 8; i++)
726 0 : macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
727 :
728 0 : if (macini->cri_alg == CRYPTO_MD5_HMAC) {
729 0 : MD5Init(&md5ctx);
730 0 : MD5Update(&md5ctx, macini->cri_key,
731 0 : macini->cri_klen / 8);
732 0 : MD5Update(&md5ctx, hmac_opad_buffer,
733 0 : HMAC_MD5_BLOCK_LEN - (macini->cri_klen / 8));
734 0 : bcopy(md5ctx.state, ses->ses_hmouter,
735 : sizeof(md5ctx.state));
736 0 : } else {
737 0 : SHA1Init(&sha1ctx);
738 0 : SHA1Update(&sha1ctx, macini->cri_key,
739 0 : macini->cri_klen / 8);
740 0 : SHA1Update(&sha1ctx, hmac_opad_buffer,
741 0 : HMAC_SHA1_BLOCK_LEN - (macini->cri_klen / 8));
742 0 : bcopy(sha1ctx.state, ses->ses_hmouter,
743 : sizeof(sha1ctx.state));
744 : }
745 :
746 0 : for (i = 0; i < macini->cri_klen / 8; i++)
747 0 : macini->cri_key[i] ^= HMAC_OPAD_VAL;
748 : }
749 :
750 0 : *sidp = UBSEC_SID(sc->sc_dv.dv_unit, sesn);
751 0 : return (0);
752 0 : }
753 :
754 : /*
755 : * Deallocate a session.
756 : */
757 : int
758 0 : ubsec_freesession(u_int64_t tid)
759 : {
760 : struct ubsec_softc *sc;
761 : int card, session;
762 0 : u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
763 :
764 0 : card = UBSEC_CARD(sid);
765 0 : if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL)
766 0 : return (EINVAL);
767 0 : sc = ubsec_cd.cd_devs[card];
768 0 : session = UBSEC_SESSION(sid);
769 0 : explicit_bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
770 0 : return (0);
771 0 : }
772 :
773 : int
774 0 : ubsec_process(struct cryptop *crp)
775 : {
776 : struct ubsec_q *q = NULL;
777 : int card, err = 0, i, j, s, nicealign;
778 : struct ubsec_softc *sc;
779 : struct cryptodesc *crd1, *crd2 = NULL, *maccrd, *enccrd;
780 : int encoffset = 0, macoffset = 0, cpskip, cpoffset;
781 : int sskip, dskip, stheend, dtheend;
782 : int16_t coffset;
783 0 : struct ubsec_session *ses, key;
784 : struct ubsec_dma *dmap = NULL;
785 : u_int16_t flags = 0;
786 : int ivlen = 0, keylen = 0;
787 :
788 0 : if (crp == NULL || crp->crp_callback == NULL) {
789 0 : ubsecstats.hst_invalid++;
790 0 : return (EINVAL);
791 : }
792 0 : card = UBSEC_CARD(crp->crp_sid);
793 0 : if (card >= ubsec_cd.cd_ndevs || ubsec_cd.cd_devs[card] == NULL) {
794 0 : ubsecstats.hst_invalid++;
795 0 : return (EINVAL);
796 : }
797 :
798 0 : sc = ubsec_cd.cd_devs[card];
799 :
800 0 : s = splnet();
801 :
802 0 : if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
803 0 : ubsecstats.hst_queuefull++;
804 0 : splx(s);
805 : err = ENOMEM;
806 0 : goto errout2;
807 : }
808 :
809 : q = SIMPLEQ_FIRST(&sc->sc_freequeue);
810 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
811 0 : splx(s);
812 :
813 0 : dmap = q->q_dma; /* Save dma pointer */
814 0 : bzero(q, sizeof(struct ubsec_q));
815 0 : bzero(&key, sizeof(key));
816 :
817 0 : q->q_sesn = UBSEC_SESSION(crp->crp_sid);
818 0 : q->q_dma = dmap;
819 0 : ses = &sc->sc_sessions[q->q_sesn];
820 :
821 0 : if (crp->crp_flags & CRYPTO_F_IMBUF) {
822 0 : q->q_src_m = (struct mbuf *)crp->crp_buf;
823 0 : q->q_dst_m = (struct mbuf *)crp->crp_buf;
824 0 : } else if (crp->crp_flags & CRYPTO_F_IOV) {
825 0 : q->q_src_io = (struct uio *)crp->crp_buf;
826 0 : q->q_dst_io = (struct uio *)crp->crp_buf;
827 : } else {
828 : err = EINVAL;
829 0 : goto errout; /* XXX we don't handle contiguous blocks! */
830 : }
831 :
832 0 : bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
833 :
834 0 : dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
835 0 : dmap->d_dma->d_mcr.mcr_flags = 0;
836 0 : q->q_crp = crp;
837 :
838 0 : if (crp->crp_ndesc < 1) {
839 : err = EINVAL;
840 0 : goto errout;
841 : }
842 0 : crd1 = &crp->crp_desc[0];
843 0 : if (crp->crp_ndesc >= 2)
844 0 : crd2 = &crp->crp_desc[1];
845 :
846 0 : if (crd2 == NULL) {
847 0 : if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
848 0 : crd1->crd_alg == CRYPTO_SHA1_HMAC) {
849 : maccrd = crd1;
850 : enccrd = NULL;
851 0 : } else if (crd1->crd_alg == CRYPTO_3DES_CBC ||
852 0 : crd1->crd_alg == CRYPTO_AES_CBC) {
853 : maccrd = NULL;
854 : enccrd = crd1;
855 : } else {
856 : err = EINVAL;
857 0 : goto errout;
858 : }
859 : } else {
860 0 : if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
861 0 : crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
862 0 : (crd2->crd_alg == CRYPTO_3DES_CBC ||
863 0 : crd2->crd_alg == CRYPTO_AES_CBC) &&
864 0 : ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
865 : maccrd = crd1;
866 : enccrd = crd2;
867 0 : } else if ((crd1->crd_alg == CRYPTO_3DES_CBC ||
868 0 : crd1->crd_alg == CRYPTO_AES_CBC) &&
869 0 : (crd2->crd_alg == CRYPTO_MD5_HMAC ||
870 0 : crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
871 0 : (crd1->crd_flags & CRD_F_ENCRYPT)) {
872 : enccrd = crd1;
873 : maccrd = crd2;
874 : } else {
875 : /*
876 : * We cannot order the ubsec as requested
877 : */
878 : err = EINVAL;
879 0 : goto errout;
880 : }
881 : }
882 :
883 0 : if (enccrd) {
884 0 : if (enccrd->crd_alg == CRYPTO_AES_CBC) {
885 0 : if ((sc->sc_flags & UBS_FLAGS_AES) == 0) {
886 : err = EINVAL;
887 0 : goto errout;
888 : }
889 : flags |= htole16(UBS_PKTCTX_ENC_AES);
890 0 : switch (enccrd->crd_klen) {
891 : case 128:
892 : case 192:
893 : case 256:
894 0 : keylen = enccrd->crd_klen / 8;
895 : break;
896 : default:
897 : err = EINVAL;
898 0 : goto errout;
899 : }
900 : ivlen = 16;
901 0 : } else {
902 : flags |= htole16(UBS_PKTCTX_ENC_3DES);
903 : ivlen = 8;
904 : keylen = 24;
905 : }
906 :
907 0 : encoffset = enccrd->crd_skip;
908 :
909 0 : if (enccrd->crd_flags & CRD_F_ENCRYPT) {
910 0 : if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
911 0 : bcopy(enccrd->crd_iv, key.ses_iv, ivlen);
912 : else
913 0 : arc4random_buf(key.ses_iv, ivlen);
914 :
915 0 : if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
916 0 : if (crp->crp_flags & CRYPTO_F_IMBUF)
917 0 : err = m_copyback(q->q_src_m,
918 0 : enccrd->crd_inject,
919 0 : ivlen, key.ses_iv, M_NOWAIT);
920 0 : else if (crp->crp_flags & CRYPTO_F_IOV)
921 0 : cuio_copyback(q->q_src_io,
922 0 : enccrd->crd_inject,
923 0 : ivlen, key.ses_iv);
924 0 : if (err)
925 : goto errout;
926 : }
927 : } else {
928 0 : flags |= htole16(UBS_PKTCTX_INBOUND);
929 :
930 0 : if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
931 0 : bcopy(enccrd->crd_iv, key.ses_iv, ivlen);
932 0 : else if (crp->crp_flags & CRYPTO_F_IMBUF)
933 0 : m_copydata(q->q_src_m, enccrd->crd_inject,
934 0 : ivlen, (caddr_t)key.ses_iv);
935 0 : else if (crp->crp_flags & CRYPTO_F_IOV)
936 0 : cuio_copydata(q->q_src_io,
937 0 : enccrd->crd_inject, ivlen,
938 0 : (caddr_t)key.ses_iv);
939 : }
940 :
941 0 : for (i = 0; i < (keylen / 4); i++)
942 0 : key.ses_key[i] = ses->ses_key[i];
943 0 : for (i = 0; i < (ivlen / 4); i++)
944 0 : SWAP32(key.ses_iv[i]);
945 : }
946 :
947 0 : if (maccrd) {
948 0 : macoffset = maccrd->crd_skip;
949 :
950 0 : if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
951 0 : flags |= htole16(UBS_PKTCTX_AUTH_MD5);
952 : else
953 0 : flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
954 :
955 0 : for (i = 0; i < 5; i++) {
956 0 : key.ses_hminner[i] = ses->ses_hminner[i];
957 0 : key.ses_hmouter[i] = ses->ses_hmouter[i];
958 :
959 0 : HTOLE32(key.ses_hminner[i]);
960 0 : HTOLE32(key.ses_hmouter[i]);
961 : }
962 : }
963 :
964 0 : if (enccrd && maccrd) {
965 : /*
966 : * ubsec cannot handle packets where the end of encryption
967 : * and authentication are not the same, or where the
968 : * encrypted part begins before the authenticated part.
969 : */
970 0 : if (((encoffset + enccrd->crd_len) !=
971 0 : (macoffset + maccrd->crd_len)) ||
972 0 : (enccrd->crd_skip < maccrd->crd_skip)) {
973 : err = EINVAL;
974 0 : goto errout;
975 : }
976 : sskip = maccrd->crd_skip;
977 : cpskip = dskip = enccrd->crd_skip;
978 : stheend = maccrd->crd_len;
979 : dtheend = enccrd->crd_len;
980 0 : coffset = enccrd->crd_skip - maccrd->crd_skip;
981 : cpoffset = cpskip + dtheend;
982 : #ifdef UBSEC_DEBUG
983 : printf("mac: skip %d, len %d, inject %d\n",
984 : maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
985 : printf("enc: skip %d, len %d, inject %d\n",
986 : enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
987 : printf("src: skip %d, len %d\n", sskip, stheend);
988 : printf("dst: skip %d, len %d\n", dskip, dtheend);
989 : printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
990 : coffset, stheend, cpskip, cpoffset);
991 : #endif
992 0 : } else {
993 0 : cpskip = dskip = sskip = macoffset + encoffset;
994 0 : dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
995 : cpoffset = cpskip + dtheend;
996 : coffset = 0;
997 : }
998 :
999 0 : if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
1000 0 : 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
1001 : err = ENOMEM;
1002 0 : goto errout;
1003 : }
1004 0 : if (crp->crp_flags & CRYPTO_F_IMBUF) {
1005 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
1006 0 : q->q_src_m, BUS_DMA_NOWAIT) != 0) {
1007 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1008 0 : q->q_src_map = NULL;
1009 : err = ENOMEM;
1010 0 : goto errout;
1011 : }
1012 0 : } else if (crp->crp_flags & CRYPTO_F_IOV) {
1013 0 : if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
1014 0 : q->q_src_io, BUS_DMA_NOWAIT) != 0) {
1015 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1016 0 : q->q_src_map = NULL;
1017 : err = ENOMEM;
1018 0 : goto errout;
1019 : }
1020 : }
1021 0 : nicealign = ubsec_dmamap_aligned(q->q_src_map);
1022 :
1023 0 : dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
1024 :
1025 : #ifdef UBSEC_DEBUG
1026 : printf("src skip: %d\n", sskip);
1027 : #endif
1028 0 : for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) {
1029 : struct ubsec_pktbuf *pb;
1030 0 : bus_size_t packl = q->q_src_map->dm_segs[i].ds_len;
1031 0 : bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr;
1032 :
1033 0 : if (sskip >= packl) {
1034 0 : sskip -= packl;
1035 0 : continue;
1036 : }
1037 :
1038 0 : packl -= sskip;
1039 0 : packp += sskip;
1040 : sskip = 0;
1041 :
1042 0 : if (packl > 0xfffc) {
1043 : err = EIO;
1044 0 : goto errout;
1045 : }
1046 :
1047 0 : if (j == 0)
1048 0 : pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
1049 : else
1050 0 : pb = &dmap->d_dma->d_sbuf[j - 1];
1051 :
1052 0 : pb->pb_addr = htole32(packp);
1053 :
1054 0 : if (stheend) {
1055 0 : if (packl > stheend) {
1056 0 : pb->pb_len = htole32(stheend);
1057 : stheend = 0;
1058 0 : } else {
1059 0 : pb->pb_len = htole32(packl);
1060 0 : stheend -= packl;
1061 : }
1062 : } else
1063 0 : pb->pb_len = htole32(packl);
1064 :
1065 0 : if ((i + 1) == q->q_src_map->dm_nsegs)
1066 0 : pb->pb_next = 0;
1067 : else
1068 0 : pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1069 : offsetof(struct ubsec_dmachunk, d_sbuf[j]));
1070 0 : j++;
1071 0 : }
1072 :
1073 0 : if (enccrd == NULL && maccrd != NULL) {
1074 0 : dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
1075 0 : dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
1076 0 : dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
1077 0 : htole32(dmap->d_alloc.dma_paddr +
1078 : offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1079 : #ifdef UBSEC_DEBUG
1080 : printf("opkt: %x %x %x\n",
1081 : dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
1082 : dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
1083 : dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
1084 : #endif
1085 0 : } else {
1086 0 : if (crp->crp_flags & CRYPTO_F_IOV) {
1087 0 : if (!nicealign) {
1088 : err = EINVAL;
1089 0 : goto errout;
1090 : }
1091 0 : if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1092 : UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1093 0 : &q->q_dst_map) != 0) {
1094 : err = ENOMEM;
1095 0 : goto errout;
1096 : }
1097 0 : if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
1098 0 : q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
1099 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1100 0 : q->q_dst_map = NULL;
1101 0 : goto errout;
1102 : }
1103 0 : } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1104 0 : if (nicealign) {
1105 0 : q->q_dst_m = q->q_src_m;
1106 0 : q->q_dst_map = q->q_src_map;
1107 0 : } else {
1108 0 : q->q_dst_m = m_dup_pkt(q->q_src_m, 0,
1109 : M_NOWAIT);
1110 0 : if (q->q_dst_m == NULL) {
1111 : err = ENOMEM;
1112 0 : goto errout;
1113 : }
1114 0 : if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1115 : UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1116 0 : &q->q_dst_map) != 0) {
1117 : err = ENOMEM;
1118 0 : goto errout;
1119 : }
1120 0 : if (bus_dmamap_load_mbuf(sc->sc_dmat,
1121 : q->q_dst_map, q->q_dst_m,
1122 0 : BUS_DMA_NOWAIT) != 0) {
1123 0 : bus_dmamap_destroy(sc->sc_dmat,
1124 : q->q_dst_map);
1125 0 : q->q_dst_map = NULL;
1126 : err = ENOMEM;
1127 0 : goto errout;
1128 : }
1129 : }
1130 : } else {
1131 : err = EINVAL;
1132 0 : goto errout;
1133 : }
1134 :
1135 : #ifdef UBSEC_DEBUG
1136 : printf("dst skip: %d\n", dskip);
1137 : #endif
1138 0 : for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) {
1139 : struct ubsec_pktbuf *pb;
1140 0 : bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len;
1141 0 : bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr;
1142 :
1143 0 : if (dskip >= packl) {
1144 0 : dskip -= packl;
1145 0 : continue;
1146 : }
1147 :
1148 0 : packl -= dskip;
1149 0 : packp += dskip;
1150 : dskip = 0;
1151 :
1152 0 : if (packl > 0xfffc) {
1153 : err = EIO;
1154 0 : goto errout;
1155 : }
1156 :
1157 0 : if (j == 0)
1158 0 : pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
1159 : else
1160 0 : pb = &dmap->d_dma->d_dbuf[j - 1];
1161 :
1162 0 : pb->pb_addr = htole32(packp);
1163 :
1164 0 : if (dtheend) {
1165 0 : if (packl > dtheend) {
1166 0 : pb->pb_len = htole32(dtheend);
1167 : dtheend = 0;
1168 0 : } else {
1169 0 : pb->pb_len = htole32(packl);
1170 0 : dtheend -= packl;
1171 : }
1172 : } else
1173 0 : pb->pb_len = htole32(packl);
1174 :
1175 0 : if ((i + 1) == q->q_dst_map->dm_nsegs) {
1176 0 : if (maccrd)
1177 0 : pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1178 : offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1179 : else
1180 0 : pb->pb_next = 0;
1181 : } else
1182 0 : pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1183 : offsetof(struct ubsec_dmachunk, d_dbuf[j]));
1184 0 : j++;
1185 0 : }
1186 : }
1187 :
1188 0 : dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
1189 : offsetof(struct ubsec_dmachunk, d_ctx));
1190 :
1191 0 : if (enccrd && enccrd->crd_alg == CRYPTO_AES_CBC) {
1192 : struct ubsec_pktctx_aes128 *aes128;
1193 : struct ubsec_pktctx_aes192 *aes192;
1194 : struct ubsec_pktctx_aes256 *aes256;
1195 : struct ubsec_pktctx_hdr *ph;
1196 : u_int8_t *ctx;
1197 :
1198 0 : ctx = (u_int8_t *)(dmap->d_alloc.dma_vaddr +
1199 : offsetof(struct ubsec_dmachunk, d_ctx));
1200 :
1201 0 : ph = (struct ubsec_pktctx_hdr *)ctx;
1202 0 : ph->ph_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES);
1203 0 : ph->ph_flags = flags;
1204 0 : ph->ph_offset = htole16(coffset >> 2);
1205 :
1206 0 : switch (enccrd->crd_klen) {
1207 : case 128:
1208 0 : aes128 = (struct ubsec_pktctx_aes128 *)ctx;
1209 0 : ph->ph_len = htole16(sizeof(*aes128));
1210 0 : ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_128);
1211 0 : for (i = 0; i < 4; i++)
1212 0 : aes128->pc_aeskey[i] = key.ses_key[i];
1213 0 : for (i = 0; i < 5; i++)
1214 0 : aes128->pc_hminner[i] = key.ses_hminner[i];
1215 0 : for (i = 0; i < 5; i++)
1216 0 : aes128->pc_hmouter[i] = key.ses_hmouter[i];
1217 0 : for (i = 0; i < 4; i++)
1218 0 : aes128->pc_iv[i] = key.ses_iv[i];
1219 : break;
1220 : case 192:
1221 0 : aes192 = (struct ubsec_pktctx_aes192 *)ctx;
1222 0 : ph->ph_len = htole16(sizeof(*aes192));
1223 0 : ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_192);
1224 0 : for (i = 0; i < 6; i++)
1225 0 : aes192->pc_aeskey[i] = key.ses_key[i];
1226 0 : for (i = 0; i < 5; i++)
1227 0 : aes192->pc_hminner[i] = key.ses_hminner[i];
1228 0 : for (i = 0; i < 5; i++)
1229 0 : aes192->pc_hmouter[i] = key.ses_hmouter[i];
1230 0 : for (i = 0; i < 4; i++)
1231 0 : aes192->pc_iv[i] = key.ses_iv[i];
1232 : break;
1233 : case 256:
1234 0 : aes256 = (struct ubsec_pktctx_aes256 *)ctx;
1235 0 : ph->ph_len = htole16(sizeof(*aes256));
1236 0 : ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_256);
1237 0 : for (i = 0; i < 8; i++)
1238 0 : aes256->pc_aeskey[i] = key.ses_key[i];
1239 0 : for (i = 0; i < 5; i++)
1240 0 : aes256->pc_hminner[i] = key.ses_hminner[i];
1241 0 : for (i = 0; i < 5; i++)
1242 0 : aes256->pc_hmouter[i] = key.ses_hmouter[i];
1243 0 : for (i = 0; i < 4; i++)
1244 0 : aes256->pc_iv[i] = key.ses_iv[i];
1245 : break;
1246 : }
1247 0 : } else if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
1248 : struct ubsec_pktctx_3des *ctx;
1249 : struct ubsec_pktctx_hdr *ph;
1250 :
1251 0 : ctx = (struct ubsec_pktctx_3des *)
1252 0 : (dmap->d_alloc.dma_vaddr +
1253 : offsetof(struct ubsec_dmachunk, d_ctx));
1254 :
1255 0 : ph = (struct ubsec_pktctx_hdr *)ctx;
1256 0 : ph->ph_len = htole16(sizeof(*ctx));
1257 0 : ph->ph_type = htole16(UBS_PKTCTX_TYPE_IPSEC_3DES);
1258 0 : ph->ph_flags = flags;
1259 0 : ph->ph_offset = htole16(coffset >> 2);
1260 :
1261 0 : for (i = 0; i < 6; i++)
1262 0 : ctx->pc_deskey[i] = key.ses_key[i];
1263 0 : for (i = 0; i < 5; i++)
1264 0 : ctx->pc_hminner[i] = key.ses_hminner[i];
1265 0 : for (i = 0; i < 5; i++)
1266 0 : ctx->pc_hmouter[i] = key.ses_hmouter[i];
1267 0 : for (i = 0; i < 2; i++)
1268 0 : ctx->pc_iv[i] = key.ses_iv[i];
1269 0 : } else {
1270 0 : struct ubsec_pktctx *ctx = (struct ubsec_pktctx *)
1271 0 : (dmap->d_alloc.dma_vaddr +
1272 : offsetof(struct ubsec_dmachunk, d_ctx));
1273 :
1274 0 : ctx->pc_flags = flags;
1275 0 : ctx->pc_offset = htole16(coffset >> 2);
1276 0 : for (i = 0; i < 6; i++)
1277 0 : ctx->pc_deskey[i] = key.ses_key[i];
1278 0 : for (i = 0; i < 5; i++)
1279 0 : ctx->pc_hminner[i] = key.ses_hminner[i];
1280 0 : for (i = 0; i < 5; i++)
1281 0 : ctx->pc_hmouter[i] = key.ses_hmouter[i];
1282 0 : for (i = 0; i < 2; i++)
1283 0 : ctx->pc_iv[i] = key.ses_iv[i];
1284 : }
1285 :
1286 0 : s = splnet();
1287 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
1288 0 : sc->sc_nqueue++;
1289 0 : ubsecstats.hst_ipackets++;
1290 0 : ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize;
1291 0 : ubsec_feed(sc);
1292 0 : splx(s);
1293 0 : explicit_bzero(&key, sizeof(key));
1294 0 : return (0);
1295 :
1296 : errout:
1297 0 : if (q != NULL) {
1298 0 : if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1299 0 : m_freem(q->q_dst_m);
1300 :
1301 0 : if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1302 0 : bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1303 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1304 0 : }
1305 0 : if (q->q_src_map != NULL) {
1306 0 : bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1307 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1308 0 : }
1309 :
1310 0 : s = splnet();
1311 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1312 0 : splx(s);
1313 0 : }
1314 0 : if (err == EINVAL)
1315 0 : ubsecstats.hst_invalid++;
1316 : else
1317 0 : ubsecstats.hst_nomem++;
1318 : errout2:
1319 0 : crp->crp_etype = err;
1320 0 : crypto_done(crp);
1321 0 : explicit_bzero(&key, sizeof(key));
1322 0 : return (0);
1323 0 : }
1324 :
1325 : void
1326 0 : ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
1327 : {
1328 0 : struct cryptop *crp = (struct cryptop *)q->q_crp;
1329 : struct cryptodesc *crd;
1330 0 : struct ubsec_dma *dmap = q->q_dma;
1331 0 : u_int8_t *ctx = (u_int8_t *)(dmap->d_alloc.dma_vaddr +
1332 : offsetof(struct ubsec_dmachunk, d_ctx));
1333 0 : struct ubsec_pktctx_hdr *ph = (struct ubsec_pktctx_hdr *)ctx;
1334 : int i;
1335 :
1336 0 : ubsecstats.hst_opackets++;
1337 0 : ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
1338 :
1339 0 : bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
1340 : dmap->d_alloc.dma_map->dm_mapsize,
1341 : BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1342 0 : if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1343 0 : bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
1344 : 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1345 0 : bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1346 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1347 0 : }
1348 0 : bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
1349 : 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1350 0 : bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1351 0 : bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1352 :
1353 0 : explicit_bzero(ctx, ph->ph_len);
1354 :
1355 0 : if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
1356 0 : m_freem(q->q_src_m);
1357 0 : crp->crp_buf = (caddr_t)q->q_dst_m;
1358 0 : }
1359 :
1360 0 : for (i = 0; i < crp->crp_ndesc; i++) {
1361 0 : crd = &crp->crp_desc[i];
1362 0 : if (crd->crd_alg != CRYPTO_MD5_HMAC &&
1363 0 : crd->crd_alg != CRYPTO_SHA1_HMAC)
1364 : continue;
1365 0 : if (crp->crp_flags & CRYPTO_F_IMBUF)
1366 0 : crp->crp_etype = m_copyback((struct mbuf *)crp->crp_buf,
1367 0 : crd->crd_inject, 12,
1368 0 : dmap->d_dma->d_macbuf, M_NOWAIT);
1369 0 : else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
1370 0 : bcopy((caddr_t)dmap->d_dma->d_macbuf,
1371 : crp->crp_mac, 12);
1372 : break;
1373 : }
1374 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1375 0 : crypto_done(crp);
1376 0 : }
1377 :
1378 : /*
1379 : * feed the key generator, must be called at splnet() or higher.
1380 : */
1381 : void
1382 0 : ubsec_feed2(struct ubsec_softc *sc)
1383 : {
1384 : struct ubsec_q2 *q;
1385 :
1386 0 : while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) {
1387 0 : if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL)
1388 : break;
1389 0 : q = SIMPLEQ_FIRST(&sc->sc_queue2);
1390 :
1391 0 : bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0,
1392 : q->q_mcr.dma_map->dm_mapsize,
1393 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1394 0 : bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
1395 : q->q_ctx.dma_map->dm_mapsize,
1396 : BUS_DMASYNC_PREWRITE);
1397 :
1398 0 : WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr);
1399 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next);
1400 0 : --sc->sc_nqueue2;
1401 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next);
1402 : }
1403 0 : }
1404 :
1405 : /*
1406 : * feed the RNG (used instead of ubsec_feed2() on 5827+ devices)
1407 : */
1408 : void
1409 0 : ubsec_feed4(struct ubsec_softc *sc)
1410 : {
1411 : struct ubsec_q2 *q;
1412 :
1413 0 : while (!SIMPLEQ_EMPTY(&sc->sc_queue4)) {
1414 0 : if (READ_REG(sc, BS_STAT) & BS_STAT_MCR4_FULL)
1415 : break;
1416 0 : q = SIMPLEQ_FIRST(&sc->sc_queue4);
1417 :
1418 0 : bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0,
1419 : q->q_mcr.dma_map->dm_mapsize,
1420 : BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1421 0 : bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
1422 : q->q_ctx.dma_map->dm_mapsize,
1423 : BUS_DMASYNC_PREWRITE);
1424 :
1425 0 : WRITE_REG(sc, BS_MCR4, q->q_mcr.dma_paddr);
1426 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_queue4, q_next);
1427 0 : --sc->sc_nqueue4;
1428 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_qchip4, q, q_next);
1429 : }
1430 0 : }
1431 :
1432 : /*
1433 : * Callback for handling random numbers
1434 : */
1435 : void
1436 0 : ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q)
1437 : {
1438 : struct ubsec_ctx_keyop *ctx;
1439 :
1440 0 : ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr;
1441 0 : bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0,
1442 : q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1443 :
1444 0 : switch (q->q_type) {
1445 : #ifndef UBSEC_NO_RNG
1446 : case UBS_CTXOP_RNGSHA1:
1447 : case UBS_CTXOP_RNGBYPASS: {
1448 0 : struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q;
1449 : u_int32_t *p;
1450 : int i;
1451 :
1452 0 : bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0,
1453 : rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1454 0 : p = (u_int32_t *)rng->rng_buf.dma_vaddr;
1455 0 : for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++)
1456 0 : enqueue_randomness(*p);
1457 0 : rng->rng_used = 0;
1458 0 : timeout_add(&sc->sc_rngto, sc->sc_rnghz);
1459 : break;
1460 : }
1461 : #endif
1462 : default:
1463 0 : printf("%s: unknown ctx op: %x\n", sc->sc_dv.dv_xname,
1464 0 : letoh16(ctx->ctx_op));
1465 0 : break;
1466 : }
1467 0 : }
1468 :
1469 : #ifndef UBSEC_NO_RNG
1470 : void
1471 0 : ubsec_rng(void *vsc)
1472 : {
1473 0 : struct ubsec_softc *sc = vsc;
1474 0 : struct ubsec_q2_rng *rng = &sc->sc_rng;
1475 : struct ubsec_mcr *mcr;
1476 : struct ubsec_ctx_rngbypass *ctx;
1477 : int s, *nqueue;
1478 :
1479 0 : s = splnet();
1480 0 : if (rng->rng_used) {
1481 0 : splx(s);
1482 0 : return;
1483 : }
1484 0 : if (sc->sc_flags & UBS_FLAGS_RNG4)
1485 0 : nqueue = &sc->sc_nqueue4;
1486 : else
1487 0 : nqueue = &sc->sc_nqueue2;
1488 :
1489 0 : (*nqueue)++;
1490 0 : if (*nqueue >= UBS_MAX_NQUEUE)
1491 : goto out;
1492 :
1493 0 : mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr;
1494 0 : ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr;
1495 :
1496 0 : mcr->mcr_pkts = htole16(1);
1497 0 : mcr->mcr_flags = 0;
1498 0 : mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr);
1499 0 : mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0;
1500 0 : mcr->mcr_ipktbuf.pb_len = 0;
1501 0 : mcr->mcr_reserved = mcr->mcr_pktlen = 0;
1502 0 : mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr);
1503 0 : mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) &
1504 : UBS_PKTBUF_LEN);
1505 0 : mcr->mcr_opktbuf.pb_next = 0;
1506 :
1507 0 : ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass));
1508 0 : ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1);
1509 0 : rng->rng_q.q_type = UBS_CTXOP_RNGSHA1;
1510 :
1511 0 : bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0,
1512 : rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1513 :
1514 0 : if (sc->sc_flags & UBS_FLAGS_RNG4) {
1515 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_queue4, &rng->rng_q, q_next);
1516 0 : rng->rng_used = 1;
1517 0 : ubsec_feed4(sc);
1518 0 : } else {
1519 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next);
1520 0 : rng->rng_used = 1;
1521 0 : ubsec_feed2(sc);
1522 : }
1523 0 : splx(s);
1524 :
1525 0 : return;
1526 :
1527 : out:
1528 : /*
1529 : * Something weird happened, generate our own call back.
1530 : */
1531 0 : (*nqueue)--;
1532 0 : splx(s);
1533 0 : timeout_add(&sc->sc_rngto, sc->sc_rnghz);
1534 0 : }
1535 : #endif /* UBSEC_NO_RNG */
1536 :
1537 : int
1538 0 : ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size,
1539 : struct ubsec_dma_alloc *dma, int mapflags)
1540 : {
1541 : int r;
1542 :
1543 0 : if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1544 0 : &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0)
1545 : goto fail_0;
1546 :
1547 0 : if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1548 0 : size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1549 : goto fail_1;
1550 :
1551 0 : if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1552 0 : BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1553 : goto fail_2;
1554 :
1555 0 : if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1556 0 : size, NULL, BUS_DMA_NOWAIT)) != 0)
1557 : goto fail_3;
1558 :
1559 0 : dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1560 0 : dma->dma_size = size;
1561 0 : return (0);
1562 :
1563 : fail_3:
1564 0 : bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1565 : fail_2:
1566 0 : bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1567 : fail_1:
1568 0 : bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1569 : fail_0:
1570 0 : dma->dma_map = NULL;
1571 0 : return (r);
1572 0 : }
1573 :
1574 : void
1575 0 : ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
1576 : {
1577 0 : bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1578 0 : bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size);
1579 0 : bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1580 0 : bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1581 0 : }
1582 :
1583 : /*
1584 : * Resets the board. Values in the regesters are left as is
1585 : * from the reset (i.e. initial values are assigned elsewhere).
1586 : */
1587 : void
1588 0 : ubsec_reset_board(struct ubsec_softc *sc)
1589 : {
1590 0 : volatile u_int32_t ctrl;
1591 :
1592 : /* Reset the device */
1593 0 : ctrl = READ_REG(sc, BS_CTRL);
1594 0 : ctrl |= BS_CTRL_RESET;
1595 0 : WRITE_REG(sc, BS_CTRL, ctrl);
1596 :
1597 : /*
1598 : * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
1599 : */
1600 0 : DELAY(10);
1601 :
1602 : /* Enable RNG and interrupts on newer devices */
1603 0 : if (sc->sc_flags & UBS_FLAGS_MULTIMCR) {
1604 0 : WRITE_REG(sc, BS_CFG, BS_CFG_RNG);
1605 0 : WRITE_REG(sc, BS_INT, BS_INT_DMAINT);
1606 0 : }
1607 0 : }
1608 :
1609 : /*
1610 : * Init Broadcom registers
1611 : */
1612 : void
1613 0 : ubsec_init_board(struct ubsec_softc *sc)
1614 : {
1615 : u_int32_t ctrl;
1616 :
1617 0 : ctrl = READ_REG(sc, BS_CTRL);
1618 0 : ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
1619 0 : ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT;
1620 :
1621 0 : if (sc->sc_flags & UBS_FLAGS_KEY)
1622 0 : ctrl |= BS_CTRL_MCR2INT;
1623 : else
1624 0 : ctrl &= ~BS_CTRL_MCR2INT;
1625 :
1626 0 : if (sc->sc_flags & UBS_FLAGS_HWNORM)
1627 0 : ctrl &= ~BS_CTRL_SWNORM;
1628 :
1629 0 : if (sc->sc_flags & UBS_FLAGS_MULTIMCR) {
1630 0 : ctrl |= BS_CTRL_BSIZE240;
1631 0 : ctrl &= ~BS_CTRL_MCR3INT; /* MCR3 is reserved for SSL */
1632 :
1633 0 : if (sc->sc_flags & UBS_FLAGS_RNG4)
1634 0 : ctrl |= BS_CTRL_MCR4INT;
1635 : else
1636 0 : ctrl &= ~BS_CTRL_MCR4INT;
1637 : }
1638 :
1639 0 : WRITE_REG(sc, BS_CTRL, ctrl);
1640 0 : }
1641 :
1642 : /*
1643 : * Init Broadcom PCI registers
1644 : */
1645 : void
1646 0 : ubsec_init_pciregs(struct pci_attach_args *pa)
1647 : {
1648 0 : pci_chipset_tag_t pc = pa->pa_pc;
1649 : u_int32_t misc;
1650 :
1651 : /*
1652 : * This will set the cache line size to 1, this will
1653 : * force the BCM58xx chip just to do burst read/writes.
1654 : * Cache line read/writes are to slow
1655 : */
1656 0 : misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG);
1657 0 : misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT))
1658 0 : | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT);
1659 0 : pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc);
1660 0 : }
1661 :
1662 : /*
1663 : * Clean up after a chip crash.
1664 : * It is assumed that the caller is in splnet()
1665 : */
1666 : void
1667 0 : ubsec_cleanchip(struct ubsec_softc *sc)
1668 : {
1669 : struct ubsec_q *q;
1670 :
1671 0 : while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
1672 : q = SIMPLEQ_FIRST(&sc->sc_qchip);
1673 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
1674 0 : ubsec_free_q(sc, q);
1675 : }
1676 0 : }
1677 :
1678 : /*
1679 : * free a ubsec_q
1680 : * It is assumed that the caller is within splnet()
1681 : */
1682 : int
1683 0 : ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
1684 : {
1685 : struct ubsec_q *q2;
1686 : struct cryptop *crp;
1687 : int npkts;
1688 : int i;
1689 :
1690 0 : npkts = q->q_nstacked_mcrs;
1691 :
1692 0 : for (i = 0; i < npkts; i++) {
1693 0 : if(q->q_stacked_mcr[i]) {
1694 : q2 = q->q_stacked_mcr[i];
1695 :
1696 0 : if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
1697 0 : m_freem(q2->q_dst_m);
1698 :
1699 0 : crp = (struct cryptop *)q2->q_crp;
1700 :
1701 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
1702 :
1703 0 : crp->crp_etype = EFAULT;
1704 0 : crypto_done(crp);
1705 : } else {
1706 : break;
1707 : }
1708 : }
1709 :
1710 : /*
1711 : * Free header MCR
1712 : */
1713 0 : if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1714 0 : m_freem(q->q_dst_m);
1715 :
1716 0 : crp = (struct cryptop *)q->q_crp;
1717 :
1718 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1719 :
1720 0 : crp->crp_etype = EFAULT;
1721 0 : crypto_done(crp);
1722 0 : return(0);
1723 : }
1724 :
1725 : /*
1726 : * Routine to reset the chip and clean up.
1727 : * It is assumed that the caller is in splnet()
1728 : */
1729 : void
1730 0 : ubsec_totalreset(struct ubsec_softc *sc)
1731 : {
1732 0 : ubsec_reset_board(sc);
1733 0 : ubsec_init_board(sc);
1734 0 : ubsec_cleanchip(sc);
1735 0 : }
1736 :
1737 : int
1738 0 : ubsec_dmamap_aligned(bus_dmamap_t map)
1739 : {
1740 : int i;
1741 :
1742 0 : for (i = 0; i < map->dm_nsegs; i++) {
1743 0 : if (map->dm_segs[i].ds_addr & 3)
1744 0 : return (0);
1745 0 : if ((i != (map->dm_nsegs - 1)) &&
1746 0 : (map->dm_segs[i].ds_len & 3))
1747 0 : return (0);
1748 : }
1749 0 : return (1);
1750 0 : }
|