Line data Source code
1 : /* $OpenBSD: isa_machdep.c,v 1.29 2017/10/14 04:44:43 jsg Exp $ */
2 : /* $NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $ */
3 :
4 : #define ISA_DMA_STATS
5 :
6 : /*-
7 : * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
8 : * All rights reserved.
9 : *
10 : * This code is derived from software contributed to The NetBSD Foundation
11 : * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
12 : * NASA Ames Research Center.
13 : *
14 : * Redistribution and use in source and binary forms, with or without
15 : * modification, are permitted provided that the following conditions
16 : * are met:
17 : * 1. Redistributions of source code must retain the above copyright
18 : * notice, this list of conditions and the following disclaimer.
19 : * 2. Redistributions in binary form must reproduce the above copyright
20 : * notice, this list of conditions and the following disclaimer in the
21 : * documentation and/or other materials provided with the distribution.
22 : *
23 : * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 : * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 : * POSSIBILITY OF SUCH DAMAGE.
34 : */
35 :
36 : /*-
37 : * Copyright (c) 1993, 1994, 1996, 1997
38 : * Charles M. Hannum. All rights reserved.
39 : * Copyright (c) 1991 The Regents of the University of California.
40 : * All rights reserved.
41 : *
42 : * This code is derived from software contributed to Berkeley by
43 : * William Jolitz.
44 : *
45 : * Redistribution and use in source and binary forms, with or without
46 : * modification, are permitted provided that the following conditions
47 : * are met:
48 : * 1. Redistributions of source code must retain the above copyright
49 : * notice, this list of conditions and the following disclaimer.
50 : * 2. Redistributions in binary form must reproduce the above copyright
51 : * notice, this list of conditions and the following disclaimer in the
52 : * documentation and/or other materials provided with the distribution.
53 : * 3. Neither the name of the University nor the names of its contributors
54 : * may be used to endorse or promote products derived from this software
55 : * without specific prior written permission.
56 : *
57 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 : * SUCH DAMAGE.
68 : *
69 : * @(#)isa.c 7.2 (Berkeley) 5/13/91
70 : */
71 :
72 : #include <sys/param.h>
73 : #include <sys/systm.h>
74 : #include <sys/syslog.h>
75 : #include <sys/malloc.h>
76 : #include <sys/proc.h>
77 :
78 : #include <uvm/uvm_extern.h>
79 :
80 : #include "ioapic.h"
81 :
82 : #if NIOAPIC > 0
83 : #include <machine/i82093var.h>
84 : #include <machine/mpbiosvar.h>
85 : #endif
86 :
87 : #include <machine/intr.h>
88 : #include <machine/i8259.h>
89 :
90 : #include <dev/isa/isavar.h>
91 :
92 : #include "isadma.h"
93 :
94 : extern paddr_t avail_end;
95 :
96 : #define IDTVEC(name) __CONCAT(X,name)
97 : /* default interrupt vector table entries */
98 : typedef int (*vector)(void);
99 : extern vector IDTVEC(intr)[];
100 : void isa_strayintr(int);
101 : int fakeintr(void *);
102 :
103 : #if NISADMA > 0
104 : int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
105 : bus_size_t, bus_size_t, int, bus_dmamap_t *);
106 : void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
107 : int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
108 : bus_size_t, struct proc *, int);
109 : int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
110 : struct mbuf *, int);
111 : int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
112 : struct uio *, int);
113 : int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
114 : bus_dma_segment_t *, int, bus_size_t, int);
115 : void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
116 : void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
117 : bus_addr_t, bus_size_t, int);
118 :
119 : int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
120 : bus_size_t, bus_dma_segment_t *, int, int *, int);
121 :
122 : int _isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
123 : struct proc *);
124 : int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
125 : bus_size_t, int);
126 : void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
127 :
128 : /*
129 : * Entry points for ISA DMA. These are mostly wrappers around
130 : * the generic functions that understand how to deal with bounce
131 : * buffers, if necessary.
132 : */
133 : struct bus_dma_tag isa_bus_dma_tag = {
134 : NULL, /* _cookie */
135 : _isa_bus_dmamap_create,
136 : _isa_bus_dmamap_destroy,
137 : _isa_bus_dmamap_load,
138 : _isa_bus_dmamap_load_mbuf,
139 : _isa_bus_dmamap_load_uio,
140 : _isa_bus_dmamap_load_raw,
141 : _isa_bus_dmamap_unload,
142 : _isa_bus_dmamap_sync,
143 : _isa_bus_dmamem_alloc,
144 : _bus_dmamem_alloc_range,
145 : _bus_dmamem_free,
146 : _bus_dmamem_map,
147 : _bus_dmamem_unmap,
148 : _bus_dmamem_mmap,
149 : };
150 : #endif /* NISADMA > 0 */
151 :
152 : #define GICODE_SEL 10
153 :
154 : u_long intrstray[ICU_LEN];
155 :
156 : /*
157 : * Caught a stray interrupt, notify
158 : */
159 : void
160 0 : isa_strayintr(int irq)
161 : {
162 : /*
163 : * Stray interrupts on irq 7 occur when an interrupt line is raised
164 : * and then lowered before the CPU acknowledges it. This generally
165 : * means either the device is screwed or something is cli'ing too
166 : * long and it's timing out.
167 : */
168 0 : if (++intrstray[irq] <= 5)
169 0 : log(LOG_ERR, "stray interrupt %d%s\n", irq,
170 0 : intrstray[irq] >= 5 ? "; stopped logging" : "");
171 0 : }
172 :
173 : int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
174 : int iminlevel[ICU_LEN], imaxlevel[ICU_LEN];
175 : struct intrhand *intrhand[ICU_LEN];
176 :
177 : int
178 0 : fakeintr(void *arg)
179 : {
180 0 : return 0;
181 : }
182 :
183 : #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2)
184 :
185 : int
186 0 : isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
187 : {
188 : int i, bestirq, count;
189 : int tmp;
190 : struct intrhand **p, *q;
191 :
192 0 : if (type == IST_NONE)
193 0 : panic("intr_alloc: bogus type");
194 :
195 : bestirq = -1;
196 : count = -1;
197 :
198 : /* some interrupts should never be dynamically allocated */
199 0 : mask &= 0xdef8;
200 :
201 : /*
202 : * XXX some interrupts will be used later (6 for fdc, 12 for pms).
203 : * the right answer is to do "breadth-first" searching of devices.
204 : */
205 0 : mask &= 0xefbf;
206 :
207 0 : for (i = 0; i < ICU_LEN; i++) {
208 0 : if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
209 : continue;
210 :
211 0 : switch(intrtype[i]) {
212 : case IST_NONE:
213 : /*
214 : * if nothing's using the irq, just return it
215 : */
216 0 : *irq = i;
217 0 : return (0);
218 :
219 : case IST_EDGE:
220 : case IST_LEVEL:
221 0 : if (type != intrtype[i])
222 : continue;
223 : /*
224 : * if the irq is shareable, count the number of other
225 : * handlers, and if it's smaller than the last irq like
226 : * this, remember it
227 : *
228 : * XXX We should probably also consider the
229 : * interrupt level and stick IPL_TTY with other
230 : * IPL_TTY, etc.
231 : */
232 0 : for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
233 0 : p = &q->ih_next, tmp++)
234 : ;
235 0 : if ((bestirq == -1) || (count > tmp)) {
236 : bestirq = i;
237 : count = tmp;
238 0 : }
239 : break;
240 :
241 : case IST_PULSE:
242 : /* this just isn't shareable */
243 : continue;
244 : }
245 : }
246 :
247 0 : if (bestirq == -1)
248 0 : return (1);
249 :
250 0 : *irq = bestirq;
251 :
252 0 : return (0);
253 0 : }
254 :
255 : /*
256 : * Just check to see if an IRQ is available/can be shared.
257 : * 0 = interrupt not available
258 : * 1 = interrupt shareable
259 : * 2 = interrupt all to ourself
260 : */
261 : int
262 0 : isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
263 : {
264 0 : if (!LEGAL_IRQ(irq) || type == IST_NONE)
265 0 : return (0);
266 :
267 0 : switch (intrtype[irq]) {
268 : case IST_NONE:
269 0 : return (2);
270 : break;
271 : case IST_LEVEL:
272 0 : if (type != intrtype[irq])
273 0 : return (0);
274 0 : return (1);
275 : break;
276 : case IST_EDGE:
277 : case IST_PULSE:
278 0 : if (type != IST_NONE)
279 0 : return (0);
280 : }
281 0 : return (1);
282 0 : }
283 :
284 : /*
285 : * Set up an interrupt handler to start being called.
286 : * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
287 : */
288 : void *
289 0 : isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
290 : int (*ih_fun)(void *), void *ih_arg, char *ih_what)
291 : {
292 : struct pic *pic = &i8259_pic;
293 : int pin = irq;
294 :
295 : #if NIOAPIC > 0
296 : struct mp_intr_map *mip;
297 :
298 0 : if (mp_busses != NULL) {
299 0 : if (mp_isa_bus == NULL)
300 0 : panic("no isa bus");
301 :
302 0 : for (mip = mp_isa_bus->mb_intrs; mip != NULL;
303 0 : mip = mip->next) {
304 0 : if (mip->bus_pin == pin) {
305 0 : pin = APIC_IRQ_PIN(mip->ioapic_ih);
306 0 : pic = &mip->ioapic->sc_pic;
307 0 : break;
308 : }
309 : }
310 : }
311 : #endif
312 :
313 0 : KASSERT(pic);
314 :
315 0 : return intr_establish(irq, pic, pin, type, level, ih_fun,
316 : ih_arg, ih_what);
317 : }
318 :
319 : /*
320 : * Deregister an interrupt handler.
321 : */
322 : void
323 0 : isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
324 : {
325 0 : intr_disestablish(arg);
326 0 : return;
327 : }
328 :
329 : void
330 0 : isa_attach_hook(struct device *parent, struct device *self,
331 : struct isabus_attach_args *iba)
332 : {
333 : extern int isa_has_been_seen;
334 :
335 : /*
336 : * Notify others that might need to know that the ISA bus
337 : * has now been attached.
338 : */
339 0 : if (isa_has_been_seen)
340 0 : panic("isaattach: ISA bus already seen!");
341 0 : isa_has_been_seen = 1;
342 0 : }
343 :
344 : #if NISADMA > 0
345 : /**********************************************************************
346 : * bus.h dma interface entry points
347 : **********************************************************************/
348 :
349 : #ifdef ISA_DMA_STATS
350 : #define STAT_INCR(v) (v)++
351 : #define STAT_DECR(v) do { \
352 : if ((v) == 0) \
353 : printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
354 : else \
355 : (v)--; \
356 : } while (0)
357 : u_long isa_dma_stats_loads;
358 : u_long isa_dma_stats_bounces;
359 : u_long isa_dma_stats_nbouncebufs;
360 : #else
361 : #define STAT_INCR(v)
362 : #define STAT_DECR(v)
363 : #endif
364 :
365 : /*
366 : * Create an ISA DMA map.
367 : */
368 : int
369 0 : _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
370 : bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
371 : {
372 : struct isa_dma_cookie *cookie;
373 : bus_dmamap_t map;
374 : int error, cookieflags;
375 : void *cookiestore;
376 : size_t cookiesize;
377 :
378 : /* Call common function to create the basic map. */
379 0 : error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
380 : flags, dmamp);
381 0 : if (error)
382 0 : return (error);
383 :
384 0 : map = *dmamp;
385 0 : map->_dm_cookie = NULL;
386 :
387 : cookiesize = sizeof(struct isa_dma_cookie);
388 :
389 : /*
390 : * ISA only has 24-bits of address space. This means
391 : * we can't DMA to pages over 16M. In order to DMA to
392 : * arbitrary buffers, we use "bounce buffers" - pages
393 : * in memory below the 16M boundary. On DMA reads,
394 : * DMA happens to the bounce buffers, and is copied into
395 : * the caller's buffer. On writes, data is copied into
396 : * the bounce buffer, and the DMA happens from those
397 : * pages. To software using the DMA mapping interface,
398 : * this looks simply like a data cache.
399 : *
400 : * If we have more than 16M of RAM in the system, we may
401 : * need bounce buffers. We check and remember that here.
402 : *
403 : * There are exceptions, however. VLB devices can do
404 : * 32-bit DMA, and indicate that here.
405 : *
406 : * ...or, there is an opposite case. The most segments
407 : * a transfer will require is (maxxfer / NBPG) + 1. If
408 : * the caller can't handle that many segments (e.g. the
409 : * ISA DMA controller), we may have to bounce it as well.
410 : */
411 : cookieflags = 0;
412 0 : if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
413 0 : (flags & ISABUS_DMA_32BIT) == 0) ||
414 0 : ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
415 : cookieflags |= ID_MIGHT_NEED_BOUNCE;
416 0 : cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
417 0 : }
418 :
419 : /*
420 : * Allocate our cookie.
421 : */
422 0 : if ((cookiestore = malloc(cookiesize, M_DEVBUF,
423 0 : (flags & BUS_DMA_NOWAIT) ?
424 0 : (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO))) == NULL) {
425 : error = ENOMEM;
426 0 : goto out;
427 : }
428 0 : cookie = (struct isa_dma_cookie *)cookiestore;
429 0 : cookie->id_flags = cookieflags;
430 0 : map->_dm_cookie = cookie;
431 :
432 0 : if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
433 : /*
434 : * Allocate the bounce pages now if the caller
435 : * wishes us to do so.
436 : */
437 0 : if ((flags & BUS_DMA_ALLOCNOW) == 0)
438 : goto out;
439 :
440 0 : error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
441 0 : }
442 :
443 : out:
444 0 : if (error) {
445 0 : free(map->_dm_cookie, M_DEVBUF, cookiesize);
446 0 : _bus_dmamap_destroy(t, map);
447 0 : }
448 0 : return (error);
449 0 : }
450 :
451 : /*
452 : * Destroy an ISA DMA map.
453 : */
454 : void
455 0 : _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
456 : {
457 0 : struct isa_dma_cookie *cookie = map->_dm_cookie;
458 :
459 : /*
460 : * Free any bounce pages this map might hold.
461 : */
462 0 : if (cookie->id_flags & ID_HAS_BOUNCE)
463 0 : _isa_dma_free_bouncebuf(t, map);
464 :
465 0 : free(cookie, M_DEVBUF, 0);
466 0 : _bus_dmamap_destroy(t, map);
467 0 : }
468 :
469 : /*
470 : * Load an ISA DMA map with a linear buffer.
471 : */
472 : int
473 0 : _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
474 : bus_size_t buflen, struct proc *p, int flags)
475 : {
476 0 : struct isa_dma_cookie *cookie = map->_dm_cookie;
477 : int error;
478 :
479 0 : STAT_INCR(isa_dma_stats_loads);
480 :
481 : /*
482 : * Check to see if we might need to bounce the transfer.
483 : */
484 0 : if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
485 : /*
486 : * Check if all pages are below the bounce
487 : * threshold. If they are, don't bother bouncing.
488 : */
489 0 : if (_isa_dma_check_buffer(buf, buflen,
490 0 : map->_dm_segcnt, map->_dm_boundary, p) == 0)
491 0 : return (_bus_dmamap_load(t, map, buf, buflen,
492 : p, flags));
493 :
494 0 : STAT_INCR(isa_dma_stats_bounces);
495 :
496 : /*
497 : * Allocate bounce pages, if necessary.
498 : */
499 0 : if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
500 0 : error = _isa_dma_alloc_bouncebuf(t, map, buflen,
501 : flags);
502 0 : if (error)
503 0 : return (error);
504 : }
505 :
506 : /*
507 : * Cache a pointer to the caller's buffer and
508 : * load the DMA map with the bounce buffer.
509 : */
510 0 : cookie->id_origbuf = buf;
511 0 : cookie->id_origbuflen = buflen;
512 0 : error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
513 : buflen, p, flags);
514 :
515 0 : if (error) {
516 : /*
517 : * Free the bounce pages, unless our resources
518 : * are reserved for our exclusive use.
519 : */
520 0 : if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
521 0 : _isa_dma_free_bouncebuf(t, map);
522 : }
523 :
524 : /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
525 0 : cookie->id_flags |= ID_IS_BOUNCING;
526 0 : } else {
527 : /*
528 : * Just use the generic load function.
529 : */
530 0 : error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
531 : }
532 :
533 0 : return (error);
534 0 : }
535 :
536 : /*
537 : * Like _isa_bus_dmamap_load(), but for mbufs.
538 : */
539 : int
540 0 : _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
541 : int flags)
542 : {
543 :
544 0 : panic("_isa_bus_dmamap_load_mbuf: not implemented");
545 : }
546 :
547 : /*
548 : * Like _isa_bus_dmamap_load(), but for uios.
549 : */
550 : int
551 0 : _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
552 : int flags)
553 : {
554 :
555 0 : panic("_isa_bus_dmamap_load_uio: not implemented");
556 : }
557 :
558 : /*
559 : * Like _isa_bus_dmamap_load(), but for raw memory allocated with
560 : * bus_dmamem_alloc().
561 : */
562 : int
563 0 : _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
564 : bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
565 : {
566 :
567 0 : panic("_isa_bus_dmamap_load_raw: not implemented");
568 : }
569 :
570 : /*
571 : * Unload an ISA DMA map.
572 : */
573 : void
574 0 : _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
575 : {
576 0 : struct isa_dma_cookie *cookie = map->_dm_cookie;
577 :
578 : /*
579 : * If we have bounce pages, free them, unless they're
580 : * reserved for our exclusive use.
581 : */
582 0 : if ((cookie->id_flags & ID_HAS_BOUNCE) &&
583 0 : (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
584 0 : _isa_dma_free_bouncebuf(t, map);
585 :
586 0 : cookie->id_flags &= ~ID_IS_BOUNCING;
587 :
588 : /*
589 : * Do the generic bits of the unload.
590 : */
591 0 : _bus_dmamap_unload(t, map);
592 0 : }
593 :
594 : /*
595 : * Synchronize an ISA DMA map.
596 : */
597 : void
598 0 : _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
599 : bus_size_t len, int op)
600 : {
601 0 : struct isa_dma_cookie *cookie = map->_dm_cookie;
602 :
603 : #ifdef DEBUG
604 : if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
605 : if (offset >= map->dm_mapsize)
606 : panic("_isa_bus_dmamap_sync: bad offset");
607 : if (len == 0 || (offset + len) > map->dm_mapsize)
608 : panic("_isa_bus_dmamap_sync: bad length");
609 : }
610 : #endif
611 : #ifdef DIAGNOSTIC
612 0 : if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
613 0 : (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
614 0 : panic("_isa_bus_dmamap_sync: mix PRE and POST");
615 : #endif /* DIAGNOSTIC */
616 :
617 : /* PREREAD and POSTWRITE are no-ops */
618 0 : if (op & BUS_DMASYNC_PREWRITE) {
619 : /*
620 : * If we're bouncing this transfer, copy the
621 : * caller's buffer to the bounce buffer.
622 : */
623 0 : if (cookie->id_flags & ID_IS_BOUNCING)
624 0 : memcpy(cookie->id_bouncebuf + offset,
625 : cookie->id_origbuf + offset, len);
626 : }
627 :
628 0 : _bus_dmamap_sync(t, map, offset, len, op);
629 :
630 0 : if (op & BUS_DMASYNC_POSTREAD) {
631 : /*
632 : * If we're bouncing this transfer, copy the
633 : * bounce buffer to the caller's buffer.
634 : */
635 0 : if (cookie->id_flags & ID_IS_BOUNCING)
636 0 : memcpy(cookie->id_origbuf + offset,
637 : cookie->id_bouncebuf + offset, len);
638 : }
639 0 : }
640 :
641 : /*
642 : * Allocate memory safe for ISA DMA.
643 : */
644 : int
645 0 : _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
646 : bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
647 : int flags)
648 : {
649 : int error;
650 :
651 : /* Try in ISA addressable region first */
652 0 : error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
653 : segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
654 0 : if (!error)
655 0 : return (error);
656 :
657 : /* Otherwise try anywhere (we'll bounce later) */
658 0 : error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
659 : segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1);
660 0 : return (error);
661 0 : }
662 :
663 : /**********************************************************************
664 : * ISA DMA utility functions
665 : **********************************************************************/
666 :
667 : /*
668 : * Return 0 if all pages in the passed buffer lie within the DMA'able
669 : * range RAM.
670 : */
671 : int
672 0 : _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
673 : bus_size_t boundary, struct proc *p)
674 : {
675 0 : vaddr_t vaddr = (vaddr_t)buf;
676 : vaddr_t endva;
677 0 : paddr_t pa, lastpa;
678 0 : u_long pagemask = ~(boundary - 1);
679 : pmap_t pmap;
680 : int nsegs;
681 :
682 0 : endva = round_page(vaddr + buflen);
683 :
684 : nsegs = 1;
685 : lastpa = 0;
686 :
687 0 : if (p != NULL)
688 0 : pmap = p->p_vmspace->vm_map.pmap;
689 : else
690 : pmap = pmap_kernel();
691 :
692 0 : for (; vaddr < endva; vaddr += NBPG) {
693 : /*
694 : * Get physical address for this segment.
695 : */
696 0 : pmap_extract(pmap, (vaddr_t)vaddr, &pa);
697 0 : pa = trunc_page(pa);
698 :
699 : /*
700 : * Is it below the DMA'able threshold?
701 : */
702 0 : if (pa > ISA_DMA_BOUNCE_THRESHOLD)
703 0 : return (EINVAL);
704 :
705 0 : if (lastpa) {
706 : /*
707 : * Check excessive segment count.
708 : */
709 0 : if (lastpa + NBPG != pa) {
710 0 : if (++nsegs > segcnt)
711 0 : return (EFBIG);
712 : }
713 :
714 : /*
715 : * Check boundary restriction.
716 : */
717 0 : if (boundary) {
718 0 : if ((lastpa ^ pa) & pagemask)
719 0 : return (EINVAL);
720 : }
721 : }
722 0 : lastpa = pa;
723 : }
724 :
725 0 : return (0);
726 0 : }
727 :
728 : int
729 0 : _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size,
730 : int flags)
731 : {
732 0 : struct isa_dma_cookie *cookie = map->_dm_cookie;
733 : int error = 0;
734 :
735 0 : cookie->id_bouncebuflen = round_page(size);
736 0 : error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
737 0 : NBPG, map->_dm_boundary, cookie->id_bouncesegs,
738 0 : map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
739 : 0, ISA_DMA_BOUNCE_THRESHOLD);
740 0 : if (error)
741 : goto out;
742 0 : error = _bus_dmamem_map(t, cookie->id_bouncesegs,
743 0 : cookie->id_nbouncesegs, cookie->id_bouncebuflen,
744 0 : (caddr_t *)&cookie->id_bouncebuf, flags);
745 :
746 : out:
747 0 : if (error) {
748 0 : _bus_dmamem_free(t, cookie->id_bouncesegs,
749 0 : cookie->id_nbouncesegs);
750 0 : cookie->id_bouncebuflen = 0;
751 0 : cookie->id_nbouncesegs = 0;
752 0 : } else {
753 0 : cookie->id_flags |= ID_HAS_BOUNCE;
754 0 : STAT_INCR(isa_dma_stats_nbouncebufs);
755 : }
756 :
757 0 : return (error);
758 : }
759 :
760 : void
761 0 : _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
762 : {
763 0 : struct isa_dma_cookie *cookie = map->_dm_cookie;
764 :
765 0 : STAT_DECR(isa_dma_stats_nbouncebufs);
766 :
767 0 : _bus_dmamem_unmap(t, cookie->id_bouncebuf,
768 0 : cookie->id_bouncebuflen);
769 0 : _bus_dmamem_free(t, cookie->id_bouncesegs,
770 0 : cookie->id_nbouncesegs);
771 0 : cookie->id_bouncebuflen = 0;
772 0 : cookie->id_nbouncesegs = 0;
773 0 : cookie->id_flags &= ~ID_HAS_BOUNCE;
774 0 : }
775 : #endif /* NISADMA > 0 */
|