Line data Source code
1 : /* $OpenBSD: qlw.c,v 1.31 2017/01/24 02:28:17 visa Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2011 David Gwynne <dlg@openbsd.org>
5 : * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
6 : * Copyright (c) 2014 Mark Kettenis <kettenis@openbsd.org>
7 : *
8 : * Permission to use, copy, modify, and distribute this software for any
9 : * purpose with or without fee is hereby granted, provided that the above
10 : * copyright notice and this permission notice appear in all copies.
11 : *
12 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 : */
20 :
21 : #include <sys/param.h>
22 : #include <sys/systm.h>
23 : #include <sys/atomic.h>
24 : #include <sys/device.h>
25 : #include <sys/ioctl.h>
26 : #include <sys/malloc.h>
27 : #include <sys/kernel.h>
28 : #include <sys/mutex.h>
29 : #include <sys/rwlock.h>
30 : #include <sys/sensors.h>
31 : #include <sys/queue.h>
32 :
33 : #include <machine/bus.h>
34 :
35 : #include <scsi/scsi_all.h>
36 : #include <scsi/scsiconf.h>
37 :
38 : #include <dev/ic/qlwreg.h>
39 : #include <dev/ic/qlwvar.h>
40 :
41 : #ifndef SMALL_KERNEL
42 : #define QLW_DEBUG
43 : #endif
44 :
45 : #ifdef QLW_DEBUG
46 : #define DPRINTF(m, f...) do { if ((qlwdebug & (m)) == (m)) printf(f); } \
47 : while (0)
48 : #define QLW_D_MBOX 0x01
49 : #define QLW_D_INTR 0x02
50 : #define QLW_D_PORT 0x04
51 : #define QLW_D_IO 0x08
52 : #define QLW_D_IOCB 0x10
53 : int qlwdebug = QLW_D_PORT | QLW_D_INTR | QLW_D_MBOX;
54 : #else
55 : #define DPRINTF(m, f...)
56 : #endif
57 :
58 : struct cfdriver qlw_cd = {
59 : NULL,
60 : "qlw",
61 : DV_DULL
62 : };
63 :
64 : void qlw_scsi_cmd(struct scsi_xfer *);
65 : int qlw_scsi_probe(struct scsi_link *);
66 :
67 : u_int16_t qlw_read(struct qlw_softc *, bus_size_t);
68 : void qlw_write(struct qlw_softc *, bus_size_t, u_int16_t);
69 : void qlw_host_cmd(struct qlw_softc *sc, u_int16_t);
70 :
71 : int qlw_mbox(struct qlw_softc *, int, int);
72 : void qlw_mbox_putaddr(u_int16_t *, struct qlw_dmamem *);
73 : u_int16_t qlw_read_mbox(struct qlw_softc *, int);
74 : void qlw_write_mbox(struct qlw_softc *, int, u_int16_t);
75 :
76 : int qlw_config_bus(struct qlw_softc *, int);
77 : int qlw_config_target(struct qlw_softc *, int, int);
78 : void qlw_update_bus(struct qlw_softc *, int);
79 : void qlw_update_target(struct qlw_softc *, int, int);
80 : void qlw_update_task(void *);
81 :
82 : void qlw_handle_intr(struct qlw_softc *, u_int16_t, u_int16_t);
83 : void qlw_set_ints(struct qlw_softc *, int);
84 : int qlw_read_isr(struct qlw_softc *, u_int16_t *, u_int16_t *);
85 : void qlw_clear_isr(struct qlw_softc *, u_int16_t);
86 :
87 : void qlw_update(struct qlw_softc *, int);
88 : void qlw_put_marker(struct qlw_softc *, int, void *);
89 : void qlw_put_cmd(struct qlw_softc *, void *, struct scsi_xfer *,
90 : struct qlw_ccb *);
91 : void qlw_put_cont(struct qlw_softc *, void *, struct scsi_xfer *,
92 : struct qlw_ccb *, int);
93 : struct qlw_ccb *qlw_handle_resp(struct qlw_softc *, u_int16_t);
94 : void qlw_get_header(struct qlw_softc *, struct qlw_iocb_hdr *,
95 : int *, int *);
96 : void qlw_put_header(struct qlw_softc *, struct qlw_iocb_hdr *,
97 : int, int);
98 : void qlw_put_data_seg(struct qlw_softc *, struct qlw_iocb_seg *,
99 : bus_dmamap_t, int);
100 :
101 : int qlw_softreset(struct qlw_softc *);
102 : void qlw_dma_burst_enable(struct qlw_softc *);
103 :
104 : int qlw_async(struct qlw_softc *, u_int16_t);
105 :
106 : int qlw_load_firmware_words(struct qlw_softc *, const u_int16_t *,
107 : u_int16_t);
108 : int qlw_load_firmware(struct qlw_softc *);
109 : int qlw_read_nvram(struct qlw_softc *);
110 : void qlw_parse_nvram_1040(struct qlw_softc *, int);
111 : void qlw_parse_nvram_1080(struct qlw_softc *, int);
112 : void qlw_init_defaults(struct qlw_softc *, int);
113 :
114 : struct qlw_dmamem *qlw_dmamem_alloc(struct qlw_softc *, size_t);
115 : void qlw_dmamem_free(struct qlw_softc *, struct qlw_dmamem *);
116 :
117 : int qlw_alloc_ccbs(struct qlw_softc *);
118 : void qlw_free_ccbs(struct qlw_softc *);
119 : void *qlw_get_ccb(void *);
120 : void qlw_put_ccb(void *, void *);
121 :
122 : #ifdef QLW_DEBUG
123 : void qlw_dump_iocb(struct qlw_softc *, void *, int);
124 : void qlw_dump_iocb_segs(struct qlw_softc *, void *, int);
125 : #else
126 : #define qlw_dump_iocb(sc, h, fl) do { /* nothing */ } while (0)
127 : #define qlw_dump_iocb_segs(sc, h, fl) do { /* nothing */ } while (0)
128 : #endif
129 :
130 : static inline int
131 0 : qlw_xs_bus(struct qlw_softc *sc, struct scsi_xfer *xs)
132 : {
133 0 : return ((xs->sc_link->scsibus == sc->sc_link[0].scsibus) ? 0 : 1);
134 : }
135 :
136 : static inline u_int16_t
137 0 : qlw_swap16(struct qlw_softc *sc, u_int16_t value)
138 : {
139 0 : if (sc->sc_isp_gen == QLW_GEN_ISP1000)
140 0 : return htobe16(value);
141 : else
142 0 : return htole16(value);
143 0 : }
144 :
145 : static inline u_int32_t
146 0 : qlw_swap32(struct qlw_softc *sc, u_int32_t value)
147 : {
148 0 : if (sc->sc_isp_gen == QLW_GEN_ISP1000)
149 0 : return htobe32(value);
150 : else
151 0 : return htole32(value);
152 0 : }
153 :
154 : static inline u_int16_t
155 0 : qlw_queue_read(struct qlw_softc *sc, bus_size_t offset)
156 : {
157 0 : return qlw_read(sc, sc->sc_mbox_base + offset);
158 : }
159 :
160 : static inline void
161 0 : qlw_queue_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
162 : {
163 0 : qlw_write(sc, sc->sc_mbox_base + offset, value);
164 0 : }
165 :
166 : struct scsi_adapter qlw_switch = {
167 : qlw_scsi_cmd,
168 : scsi_minphys,
169 : qlw_scsi_probe,
170 : NULL, /* scsi_free */
171 : NULL /* ioctl */
172 : };
173 :
174 : int
175 0 : qlw_attach(struct qlw_softc *sc)
176 : {
177 0 : struct scsibus_attach_args saa;
178 : void (*parse_nvram)(struct qlw_softc *, int);
179 : int reset_delay;
180 : int bus;
181 :
182 0 : task_set(&sc->sc_update_task, qlw_update_task, sc);
183 :
184 0 : switch (sc->sc_isp_gen) {
185 : case QLW_GEN_ISP1000:
186 0 : sc->sc_nvram_size = 0;
187 0 : break;
188 : case QLW_GEN_ISP1040:
189 0 : sc->sc_nvram_size = 128;
190 0 : sc->sc_nvram_minversion = 2;
191 : parse_nvram = qlw_parse_nvram_1040;
192 0 : break;
193 : case QLW_GEN_ISP1080:
194 : case QLW_GEN_ISP12160:
195 0 : sc->sc_nvram_size = 256;
196 0 : sc->sc_nvram_minversion = 1;
197 : parse_nvram = qlw_parse_nvram_1080;
198 0 : break;
199 :
200 : default:
201 0 : printf("unknown isp type\n");
202 0 : return (ENXIO);
203 : }
204 :
205 : /* after reset, mbox registers 1-3 should contain the string "ISP " */
206 0 : if (qlw_read_mbox(sc, 1) != 0x4953 ||
207 0 : qlw_read_mbox(sc, 2) != 0x5020 ||
208 0 : qlw_read_mbox(sc, 3) != 0x2020) {
209 : /* try releasing the risc processor */
210 0 : qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
211 0 : }
212 :
213 0 : qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
214 0 : if (qlw_softreset(sc) != 0) {
215 0 : printf("softreset failed\n");
216 0 : return (ENXIO);
217 : }
218 :
219 0 : for (bus = 0; bus < sc->sc_numbusses; bus++)
220 0 : qlw_init_defaults(sc, bus);
221 :
222 0 : if (qlw_read_nvram(sc) == 0) {
223 0 : for (bus = 0; bus < sc->sc_numbusses; bus++)
224 0 : parse_nvram(sc, bus);
225 : }
226 :
227 : #ifndef ISP_NOFIRMWARE
228 0 : if (sc->sc_firmware && qlw_load_firmware(sc)) {
229 0 : printf("firmware load failed\n");
230 0 : return (ENXIO);
231 : }
232 : #endif
233 :
234 : /* execute firmware */
235 0 : sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE;
236 0 : sc->sc_mbox[1] = QLW_CODE_ORG;
237 0 : if (qlw_mbox(sc, 0x0003, 0x0001)) {
238 0 : printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
239 0 : return (ENXIO);
240 : }
241 :
242 0 : delay(250000); /* from isp(4) */
243 :
244 0 : sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE;
245 0 : if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN,
246 : QLW_MBOX_ABOUT_FIRMWARE_OUT)) {
247 0 : printf("ISP not talking after firmware exec: %x\n",
248 0 : sc->sc_mbox[0]);
249 0 : return (ENXIO);
250 : }
251 : /* The ISP1000 firmware we use doesn't return a version number. */
252 0 : if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) {
253 0 : sc->sc_mbox[1] = 1;
254 0 : sc->sc_mbox[2] = 37;
255 0 : sc->sc_mbox[3] = 0;
256 0 : sc->sc_mbox[6] = 0;
257 0 : }
258 0 : printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc),
259 0 : sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
260 :
261 : /* work out how many ccbs to allocate */
262 0 : sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS;
263 0 : if (qlw_mbox(sc, 0x0001, 0x0007)) {
264 0 : printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]);
265 0 : return (ENXIO);
266 : }
267 0 : sc->sc_maxrequests = sc->sc_mbox[2];
268 0 : if (sc->sc_maxrequests > 512)
269 0 : sc->sc_maxrequests = 512;
270 0 : for (bus = 0; bus < sc->sc_numbusses; bus++) {
271 0 : if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests)
272 0 : sc->sc_max_queue_depth[bus] = sc->sc_maxrequests;
273 : }
274 :
275 : /*
276 : * On some 1020/1040 variants the response queue is limited to
277 : * 256 entries. We don't really need all that many anyway.
278 : */
279 0 : sc->sc_maxresponses = sc->sc_maxrequests / 2;
280 0 : if (sc->sc_maxresponses < 64)
281 0 : sc->sc_maxresponses = 64;
282 :
283 : /* We may need up to 3 request entries per SCSI command. */
284 0 : sc->sc_maxccbs = sc->sc_maxrequests / 3;
285 :
286 : /* Allegedly the FIFO is busted on the 1040A. */
287 0 : if (sc->sc_isp_type == QLW_ISP1040A)
288 0 : sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK;
289 0 : qlw_write(sc, QLW_CFG1, sc->sc_isp_config);
290 :
291 0 : if (sc->sc_isp_config & QLW_BURST_ENABLE)
292 0 : qlw_dma_burst_enable(sc);
293 :
294 0 : sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES;
295 0 : sc->sc_mbox[1] = 0;
296 0 : if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY)
297 0 : sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY;
298 0 : if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) {
299 0 : printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]);
300 0 : return (ENXIO);
301 : }
302 :
303 0 : sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE;
304 0 : sc->sc_mbox[1] = sc->sc_clock;
305 0 : if (qlw_mbox(sc, 0x0003, 0x0001)) {
306 0 : printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]);
307 0 : return (ENXIO);
308 : }
309 :
310 0 : sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT;
311 0 : sc->sc_mbox[1] = sc->sc_retry_count[0];
312 0 : sc->sc_mbox[2] = sc->sc_retry_delay[0];
313 0 : sc->sc_mbox[6] = sc->sc_retry_count[1];
314 0 : sc->sc_mbox[7] = sc->sc_retry_delay[1];
315 0 : if (qlw_mbox(sc, 0x00c7, 0x0001)) {
316 0 : printf("couldn't set retry count: %x\n", sc->sc_mbox[0]);
317 0 : return (ENXIO);
318 : }
319 :
320 0 : sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP;
321 0 : sc->sc_mbox[1] = sc->sc_async_data_setup[0];
322 0 : sc->sc_mbox[2] = sc->sc_async_data_setup[1];
323 0 : if (qlw_mbox(sc, 0x0007, 0x0001)) {
324 0 : printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]);
325 0 : return (ENXIO);
326 : }
327 :
328 0 : sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION;
329 0 : sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5;
330 0 : sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4;
331 0 : sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5;
332 0 : sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4;
333 0 : if (qlw_mbox(sc, 0x0007, 0x0001)) {
334 0 : printf("couldn't set active negation: %x\n", sc->sc_mbox[0]);
335 0 : return (ENXIO);
336 : }
337 :
338 0 : sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT;
339 0 : sc->sc_mbox[1] = sc->sc_tag_age_limit[0];
340 0 : sc->sc_mbox[2] = sc->sc_tag_age_limit[1];
341 0 : if (qlw_mbox(sc, 0x0007, 0x0001)) {
342 0 : printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]);
343 0 : return (ENXIO);
344 : }
345 :
346 0 : sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT;
347 0 : sc->sc_mbox[1] = sc->sc_selection_timeout[0];
348 0 : sc->sc_mbox[2] = sc->sc_selection_timeout[1];
349 0 : if (qlw_mbox(sc, 0x0007, 0x0001)) {
350 0 : printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]);
351 0 : return (ENXIO);
352 : }
353 :
354 0 : for (bus = 0; bus < sc->sc_numbusses; bus++) {
355 0 : if (qlw_config_bus(sc, bus))
356 0 : return (ENXIO);
357 : }
358 :
359 0 : if (qlw_alloc_ccbs(sc)) {
360 : /* error already printed */
361 0 : return (ENOMEM);
362 : }
363 :
364 0 : sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE;
365 0 : sc->sc_mbox[1] = sc->sc_maxrequests;
366 0 : qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests);
367 0 : sc->sc_mbox[4] = 0;
368 0 : if (qlw_mbox(sc, 0x00df, 0x0001)) {
369 0 : printf("couldn't init request queue: %x\n", sc->sc_mbox[0]);
370 0 : goto free_ccbs;
371 : }
372 :
373 0 : sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE;
374 0 : sc->sc_mbox[1] = sc->sc_maxresponses;
375 0 : qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses);
376 0 : sc->sc_mbox[5] = 0;
377 0 : if (qlw_mbox(sc, 0x00ef, 0x0001)) {
378 0 : printf("couldn't init response queue: %x\n", sc->sc_mbox[0]);
379 0 : goto free_ccbs;
380 : }
381 :
382 : reset_delay = 0;
383 0 : for (bus = 0; bus < sc->sc_numbusses; bus++) {
384 0 : sc->sc_mbox[0] = QLW_MBOX_BUS_RESET;
385 0 : sc->sc_mbox[1] = sc->sc_reset_delay[bus];
386 0 : sc->sc_mbox[2] = bus;
387 0 : if (qlw_mbox(sc, 0x0007, 0x0001)) {
388 0 : printf("couldn't reset bus: %x\n", sc->sc_mbox[0]);
389 0 : goto free_ccbs;
390 : }
391 0 : sc->sc_marker_required[bus] = 1;
392 0 : sc->sc_update_required[bus] = 0xffff;
393 :
394 0 : if (sc->sc_reset_delay[bus] > reset_delay)
395 0 : reset_delay = sc->sc_reset_delay[bus];
396 : }
397 :
398 : /* wait for the busses to settle */
399 0 : delay(reset_delay * 1000000);
400 :
401 : /* we should be good to go now, attach scsibus */
402 0 : for (bus = 0; bus < sc->sc_numbusses; bus++) {
403 0 : sc->sc_link[bus].adapter = &qlw_switch;
404 0 : sc->sc_link[bus].adapter_softc = sc;
405 0 : sc->sc_link[bus].adapter_target = sc->sc_initiator[bus];
406 0 : sc->sc_link[bus].adapter_buswidth = QLW_MAX_TARGETS;
407 0 : sc->sc_link[bus].openings = sc->sc_max_queue_depth[bus];
408 0 : sc->sc_link[bus].pool = &sc->sc_iopool;
409 :
410 0 : memset(&saa, 0, sizeof(saa));
411 0 : saa.saa_sc_link = &sc->sc_link[bus];
412 :
413 : /* config_found() returns the scsibus attached to us */
414 0 : sc->sc_scsibus[bus] = (struct scsibus_softc *)
415 0 : config_found(&sc->sc_dev, &saa, scsiprint);
416 :
417 0 : qlw_update_bus(sc, bus);
418 : }
419 :
420 0 : sc->sc_running = 1;
421 0 : return(0);
422 :
423 : free_ccbs:
424 0 : qlw_free_ccbs(sc);
425 0 : return (ENXIO);
426 0 : }
427 :
428 : int
429 0 : qlw_detach(struct qlw_softc *sc, int flags)
430 : {
431 0 : return (0);
432 : }
433 :
434 : int
435 0 : qlw_config_bus(struct qlw_softc *sc, int bus)
436 : {
437 : int target, err;
438 :
439 0 : sc->sc_mbox[0] = QLW_MBOX_SET_INITIATOR_ID;
440 0 : sc->sc_mbox[1] = (bus << 7) | sc->sc_initiator[bus];
441 :
442 0 : if (qlw_mbox(sc, 0x0003, 0x0001)) {
443 0 : printf("couldn't set initiator id: %x\n", sc->sc_mbox[0]);
444 0 : return (ENXIO);
445 : }
446 :
447 0 : for (target = 0; target < QLW_MAX_TARGETS; target++) {
448 0 : err = qlw_config_target(sc, bus, target);
449 0 : if (err)
450 0 : return (err);
451 : }
452 :
453 0 : return (0);
454 0 : }
455 :
456 : int
457 0 : qlw_config_target(struct qlw_softc *sc, int bus, int target)
458 : {
459 : int lun;
460 :
461 0 : sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
462 0 : sc->sc_mbox[1] = (((bus << 7) | target) << 8);
463 0 : sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
464 0 : sc->sc_mbox[2] &= QLW_TARGET_SAFE;
465 0 : sc->sc_mbox[2] |= QLW_TARGET_NARROW | QLW_TARGET_ASYNC;
466 0 : sc->sc_mbox[3] = 0;
467 :
468 0 : if (qlw_mbox(sc, 0x000f, 0x0001)) {
469 0 : printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
470 0 : return (ENXIO);
471 : }
472 :
473 0 : for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
474 0 : sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
475 0 : sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
476 0 : sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
477 0 : sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
478 0 : if (qlw_mbox(sc, 0x000f, 0x0001)) {
479 0 : printf("couldn't set lun parameters: %x\n",
480 0 : sc->sc_mbox[0]);
481 0 : return (ENXIO);
482 : }
483 : }
484 :
485 0 : return (0);
486 0 : }
487 :
488 : void
489 0 : qlw_update_bus(struct qlw_softc *sc, int bus)
490 : {
491 : int target;
492 :
493 0 : for (target = 0; target < QLW_MAX_TARGETS; target++)
494 0 : qlw_update_target(sc, bus, target);
495 0 : }
496 :
497 : void
498 0 : qlw_update_target(struct qlw_softc *sc, int bus, int target)
499 : {
500 : struct scsi_link *link;
501 : int lun;
502 :
503 0 : if ((sc->sc_update_required[bus] & (1 << target)) == 0)
504 0 : return;
505 0 : atomic_clearbits_int(&sc->sc_update_required[bus], (1 << target));
506 :
507 0 : link = scsi_get_link(sc->sc_scsibus[bus], target, 0);
508 0 : if (link == NULL)
509 0 : return;
510 :
511 0 : sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
512 0 : sc->sc_mbox[1] = (((bus << 7) | target) << 8);
513 0 : sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
514 0 : sc->sc_mbox[2] |= QLW_TARGET_RENEG;
515 0 : sc->sc_mbox[2] &= ~QLW_TARGET_QFRZ;
516 0 : if (link->quirks & SDEV_NOSYNC)
517 0 : sc->sc_mbox[2] &= ~QLW_TARGET_SYNC;
518 0 : if (link->quirks & SDEV_NOWIDE)
519 0 : sc->sc_mbox[2] &= ~QLW_TARGET_WIDE;
520 0 : if (link->quirks & SDEV_NOTAGS)
521 0 : sc->sc_mbox[2] &= ~QLW_TARGET_TAGS;
522 :
523 0 : sc->sc_mbox[3] = sc->sc_target[bus][target].qt_sync_period;
524 0 : sc->sc_mbox[3] |= (sc->sc_target[bus][target].qt_sync_offset << 8);
525 :
526 0 : if (qlw_mbox(sc, 0x000f, 0x0001)) {
527 0 : printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
528 0 : return;
529 : }
530 :
531 : /* XXX do PPR detection */
532 :
533 0 : for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
534 0 : sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
535 0 : sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
536 0 : sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
537 0 : sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
538 0 : if (qlw_mbox(sc, 0x000f, 0x0001)) {
539 0 : printf("couldn't set lun parameters: %x\n",
540 0 : sc->sc_mbox[0]);
541 0 : return;
542 : }
543 : }
544 0 : }
545 :
546 : void
547 0 : qlw_update_task(void *xsc)
548 : {
549 0 : struct qlw_softc *sc = xsc;
550 : int bus;
551 :
552 0 : for (bus = 0; bus < sc->sc_numbusses; bus++)
553 0 : qlw_update_bus(sc, bus);
554 0 : }
555 :
556 : struct qlw_ccb *
557 0 : qlw_handle_resp(struct qlw_softc *sc, u_int16_t id)
558 : {
559 : struct qlw_ccb *ccb;
560 : struct qlw_iocb_hdr *hdr;
561 : struct qlw_iocb_status *status;
562 : struct scsi_xfer *xs;
563 : u_int32_t handle;
564 0 : int entry_type;
565 0 : int flags;
566 : int bus;
567 :
568 : ccb = NULL;
569 0 : hdr = QLW_DMA_KVA(sc->sc_responses) + (id * QLW_QUEUE_ENTRY_SIZE);
570 :
571 0 : bus_dmamap_sync(sc->sc_dmat,
572 : QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE,
573 : QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD);
574 :
575 0 : qlw_get_header(sc, hdr, &entry_type, &flags);
576 0 : switch (entry_type) {
577 : case QLW_IOCB_STATUS:
578 0 : status = (struct qlw_iocb_status *)hdr;
579 0 : handle = qlw_swap32(sc, status->handle);
580 0 : if (handle > sc->sc_maxccbs) {
581 0 : panic("bad completed command handle: %d (> %d)",
582 : handle, sc->sc_maxccbs);
583 : }
584 :
585 0 : ccb = &sc->sc_ccbs[handle];
586 0 : xs = ccb->ccb_xs;
587 0 : if (xs == NULL) {
588 0 : DPRINTF(QLW_D_INTR, "%s: got status for inactive"
589 : " ccb %d\n", DEVNAME(sc), handle);
590 0 : qlw_dump_iocb(sc, hdr, QLW_D_INTR);
591 : ccb = NULL;
592 0 : break;
593 : }
594 0 : if (xs->io != ccb) {
595 0 : panic("completed command handle doesn't match xs "
596 : "(handle %d, ccb %p, xs->io %p)", handle, ccb,
597 : xs->io);
598 : }
599 :
600 0 : if (xs->datalen > 0) {
601 0 : bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
602 : ccb->ccb_dmamap->dm_mapsize,
603 : (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
604 : BUS_DMASYNC_POSTWRITE);
605 0 : bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
606 0 : }
607 :
608 0 : bus = qlw_xs_bus(sc, xs);
609 0 : xs->status = qlw_swap16(sc, status->scsi_status);
610 0 : switch (qlw_swap16(sc, status->completion)) {
611 : case QLW_IOCB_STATUS_COMPLETE:
612 0 : if (qlw_swap16(sc, status->scsi_status) &
613 : QLW_SCSI_STATUS_SENSE_VALID) {
614 0 : memcpy(&xs->sense, status->sense_data,
615 : sizeof(xs->sense));
616 0 : xs->error = XS_SENSE;
617 0 : } else {
618 0 : xs->error = XS_NOERROR;
619 : }
620 0 : xs->resid = 0;
621 0 : break;
622 :
623 : case QLW_IOCB_STATUS_INCOMPLETE:
624 0 : if (flags & QLW_STATE_GOT_TARGET) {
625 0 : xs->error = XS_DRIVER_STUFFUP;
626 0 : } else {
627 0 : xs->error = XS_SELTIMEOUT;
628 : }
629 : break;
630 :
631 : case QLW_IOCB_STATUS_DMA_ERROR:
632 0 : DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc));
633 : /* set resid apparently? */
634 : break;
635 :
636 : case QLW_IOCB_STATUS_RESET:
637 0 : DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n",
638 : DEVNAME(sc));
639 0 : sc->sc_marker_required[bus] = 1;
640 0 : xs->error = XS_RESET;
641 0 : break;
642 :
643 : case QLW_IOCB_STATUS_ABORTED:
644 0 : DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc));
645 0 : sc->sc_marker_required[bus] = 1;
646 0 : xs->error = XS_DRIVER_STUFFUP;
647 0 : break;
648 :
649 : case QLW_IOCB_STATUS_TIMEOUT:
650 0 : DPRINTF(QLW_D_INTR, "%s: command timed out\n",
651 : DEVNAME(sc));
652 0 : xs->error = XS_TIMEOUT;
653 0 : break;
654 :
655 : case QLW_IOCB_STATUS_DATA_OVERRUN:
656 : case QLW_IOCB_STATUS_DATA_UNDERRUN:
657 0 : xs->resid = qlw_swap32(sc, status->resid);
658 0 : xs->error = XS_NOERROR;
659 0 : break;
660 :
661 : case QLW_IOCB_STATUS_QUEUE_FULL:
662 0 : DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc));
663 0 : xs->error = XS_BUSY;
664 0 : break;
665 :
666 : case QLW_IOCB_STATUS_WIDE_FAILED:
667 0 : DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc));
668 0 : sc->sc_link->quirks |= SDEV_NOWIDE;
669 0 : atomic_setbits_int(&sc->sc_update_required[bus],
670 0 : 1 << xs->sc_link->target);
671 0 : task_add(systq, &sc->sc_update_task);
672 0 : xs->resid = qlw_swap32(sc, status->resid);
673 0 : xs->error = XS_NOERROR;
674 0 : break;
675 :
676 : case QLW_IOCB_STATUS_SYNCXFER_FAILED:
677 0 : DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc));
678 0 : sc->sc_link->quirks |= SDEV_NOSYNC;
679 0 : atomic_setbits_int(&sc->sc_update_required[bus],
680 0 : 1 << xs->sc_link->target);
681 0 : task_add(systq, &sc->sc_update_task);
682 0 : xs->resid = qlw_swap32(sc, status->resid);
683 0 : xs->error = XS_NOERROR;
684 0 : break;
685 :
686 : default:
687 0 : DPRINTF(QLW_D_INTR, "%s: unexpected completion"
688 : " status %x\n", DEVNAME(sc),
689 : qlw_swap16(sc, status->completion));
690 0 : qlw_dump_iocb(sc, hdr, QLW_D_INTR);
691 0 : xs->error = XS_DRIVER_STUFFUP;
692 0 : break;
693 : }
694 : break;
695 :
696 : default:
697 0 : DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n",
698 : DEVNAME(sc), entry_type);
699 0 : qlw_dump_iocb(sc, hdr, QLW_D_INTR);
700 0 : break;
701 : }
702 :
703 0 : return (ccb);
704 0 : }
705 :
706 : void
707 0 : qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info)
708 : {
709 : int i;
710 : u_int16_t rspin;
711 : struct qlw_ccb *ccb;
712 :
713 0 : switch (isr) {
714 : case QLW_INT_TYPE_ASYNC:
715 0 : qlw_async(sc, info);
716 0 : qlw_clear_isr(sc, isr);
717 0 : break;
718 :
719 : case QLW_INT_TYPE_IO:
720 0 : qlw_clear_isr(sc, isr);
721 0 : rspin = qlw_queue_read(sc, QLW_RESP_IN);
722 0 : if (rspin == sc->sc_last_resp_id) {
723 : /* seems to happen a lot on 2200s when mbox commands
724 : * complete but it doesn't want to give us the register
725 : * semaphore, or something.
726 : *
727 : * if we're waiting on a mailbox command, don't ack
728 : * the interrupt yet.
729 : */
730 0 : if (sc->sc_mbox_pending) {
731 0 : DPRINTF(QLW_D_MBOX, "%s: ignoring premature"
732 : " mbox int\n", DEVNAME(sc));
733 0 : return;
734 : }
735 :
736 : break;
737 : }
738 :
739 0 : if (sc->sc_responses == NULL)
740 : break;
741 :
742 0 : DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n",
743 : DEVNAME(sc), sc->sc_last_resp_id, rspin);
744 :
745 0 : do {
746 0 : ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
747 0 : if (ccb)
748 0 : scsi_done(ccb->ccb_xs);
749 :
750 0 : sc->sc_last_resp_id++;
751 0 : sc->sc_last_resp_id %= sc->sc_maxresponses;
752 0 : } while (sc->sc_last_resp_id != rspin);
753 :
754 0 : qlw_queue_write(sc, QLW_RESP_OUT, rspin);
755 0 : break;
756 :
757 : case QLW_INT_TYPE_MBOX:
758 0 : if (sc->sc_mbox_pending) {
759 0 : if (info == QLW_MBOX_COMPLETE) {
760 0 : for (i = 1; i < nitems(sc->sc_mbox); i++) {
761 0 : sc->sc_mbox[i] = qlw_read_mbox(sc, i);
762 : }
763 : } else {
764 0 : sc->sc_mbox[0] = info;
765 : }
766 0 : wakeup(sc->sc_mbox);
767 0 : } else {
768 0 : DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:"
769 : " %x\n", DEVNAME(sc), info);
770 : }
771 0 : qlw_clear_isr(sc, isr);
772 0 : break;
773 :
774 : default:
775 : /* maybe log something? */
776 : break;
777 : }
778 0 : }
779 :
780 : int
781 0 : qlw_intr(void *xsc)
782 : {
783 0 : struct qlw_softc *sc = xsc;
784 0 : u_int16_t isr;
785 0 : u_int16_t info;
786 :
787 0 : if (qlw_read_isr(sc, &isr, &info) == 0)
788 0 : return (0);
789 :
790 0 : qlw_handle_intr(sc, isr, info);
791 0 : return (1);
792 0 : }
793 :
794 : int
795 0 : qlw_scsi_probe(struct scsi_link *link)
796 : {
797 0 : if (link->lun >= QLW_MAX_LUNS)
798 0 : return (EINVAL);
799 :
800 0 : return (0);
801 0 : }
802 :
803 : void
804 0 : qlw_scsi_cmd(struct scsi_xfer *xs)
805 : {
806 0 : struct scsi_link *link = xs->sc_link;
807 0 : struct qlw_softc *sc = link->adapter_softc;
808 : struct qlw_ccb *ccb;
809 : struct qlw_iocb_req0 *iocb;
810 0 : struct qlw_ccb_list list;
811 : u_int16_t req, rspin;
812 : int offset, error, done;
813 : bus_dmamap_t dmap;
814 : int bus;
815 : int seg;
816 :
817 0 : if (xs->cmdlen > sizeof(iocb->cdb)) {
818 0 : DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),
819 : xs->cmdlen);
820 0 : memset(&xs->sense, 0, sizeof(xs->sense));
821 0 : xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
822 0 : xs->sense.flags = SKEY_ILLEGAL_REQUEST;
823 0 : xs->sense.add_sense_code = 0x20;
824 0 : xs->error = XS_SENSE;
825 0 : scsi_done(xs);
826 0 : return;
827 : }
828 :
829 0 : ccb = xs->io;
830 0 : dmap = ccb->ccb_dmamap;
831 0 : if (xs->datalen > 0) {
832 0 : error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,
833 : xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
834 : BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
835 0 : if (error) {
836 0 : xs->error = XS_DRIVER_STUFFUP;
837 0 : scsi_done(xs);
838 0 : return;
839 : }
840 :
841 0 : bus_dmamap_sync(sc->sc_dmat, dmap, 0,
842 : dmap->dm_mapsize,
843 : (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
844 : BUS_DMASYNC_PREWRITE);
845 0 : }
846 :
847 0 : mtx_enter(&sc->sc_queue_mtx);
848 :
849 : /* put in a sync marker if required */
850 0 : bus = qlw_xs_bus(sc, xs);
851 0 : if (sc->sc_marker_required[bus]) {
852 0 : req = sc->sc_next_req_id++;
853 0 : if (sc->sc_next_req_id == sc->sc_maxrequests)
854 0 : sc->sc_next_req_id = 0;
855 :
856 0 : DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n",
857 : DEVNAME(sc), req);
858 0 : offset = (req * QLW_QUEUE_ENTRY_SIZE);
859 0 : iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
860 0 : bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
861 : offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
862 0 : qlw_put_marker(sc, bus, iocb);
863 0 : bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
864 : offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
865 0 : qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
866 0 : sc->sc_marker_required[bus] = 0;
867 0 : }
868 :
869 0 : req = sc->sc_next_req_id++;
870 0 : if (sc->sc_next_req_id == sc->sc_maxrequests)
871 0 : sc->sc_next_req_id = 0;
872 :
873 0 : offset = (req * QLW_QUEUE_ENTRY_SIZE);
874 0 : iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
875 0 : bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
876 : QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
877 :
878 0 : ccb->ccb_xs = xs;
879 :
880 0 : DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req);
881 0 : qlw_put_cmd(sc, iocb, xs, ccb);
882 : seg = QLW_IOCB_SEGS_PER_CMD;
883 :
884 0 : bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
885 : QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
886 :
887 0 : while (seg < ccb->ccb_dmamap->dm_nsegs) {
888 0 : req = sc->sc_next_req_id++;
889 0 : if (sc->sc_next_req_id == sc->sc_maxrequests)
890 0 : sc->sc_next_req_id = 0;
891 :
892 0 : offset = (req * QLW_QUEUE_ENTRY_SIZE);
893 0 : iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
894 0 : bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
895 : QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
896 :
897 0 : DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req);
898 0 : qlw_put_cont(sc, iocb, xs, ccb, seg);
899 0 : seg += QLW_IOCB_SEGS_PER_CONT;
900 :
901 0 : bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
902 : QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
903 : }
904 :
905 0 : qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
906 :
907 0 : if (!ISSET(xs->flags, SCSI_POLL)) {
908 0 : mtx_leave(&sc->sc_queue_mtx);
909 0 : return;
910 : }
911 :
912 : done = 0;
913 0 : SIMPLEQ_INIT(&list);
914 0 : do {
915 0 : u_int16_t isr, info;
916 :
917 0 : delay(100);
918 :
919 0 : if (qlw_read_isr(sc, &isr, &info) == 0) {
920 0 : continue;
921 : }
922 :
923 0 : if (isr != QLW_INT_TYPE_IO) {
924 0 : qlw_handle_intr(sc, isr, info);
925 0 : continue;
926 : }
927 :
928 0 : qlw_clear_isr(sc, isr);
929 :
930 0 : rspin = qlw_queue_read(sc, QLW_RESP_IN);
931 0 : while (rspin != sc->sc_last_resp_id) {
932 0 : ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
933 :
934 0 : sc->sc_last_resp_id++;
935 0 : if (sc->sc_last_resp_id == sc->sc_maxresponses)
936 0 : sc->sc_last_resp_id = 0;
937 :
938 0 : if (ccb != NULL)
939 0 : SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link);
940 0 : if (ccb == xs->io)
941 0 : done = 1;
942 : }
943 0 : qlw_queue_write(sc, QLW_RESP_OUT, rspin);
944 0 : } while (done == 0);
945 :
946 0 : mtx_leave(&sc->sc_queue_mtx);
947 :
948 0 : while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
949 0 : SIMPLEQ_REMOVE_HEAD(&list, ccb_link);
950 0 : scsi_done(ccb->ccb_xs);
951 : }
952 0 : }
953 :
954 : u_int16_t
955 0 : qlw_read(struct qlw_softc *sc, bus_size_t offset)
956 : {
957 : u_int16_t v;
958 0 : v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
959 0 : bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
960 : BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
961 0 : return (v);
962 : }
963 :
964 : void
965 0 : qlw_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
966 : {
967 0 : bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value);
968 0 : bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
969 : BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
970 0 : }
971 :
972 : u_int16_t
973 0 : qlw_read_mbox(struct qlw_softc *sc, int mbox)
974 : {
975 : /* could range-check mboxes according to chip type? */
976 0 : return (qlw_read(sc, sc->sc_mbox_base + (mbox * 2)));
977 : }
978 :
979 : void
980 0 : qlw_write_mbox(struct qlw_softc *sc, int mbox, u_int16_t value)
981 : {
982 0 : qlw_write(sc, sc->sc_mbox_base + (mbox * 2), value);
983 0 : }
984 :
985 : void
986 0 : qlw_host_cmd(struct qlw_softc *sc, u_int16_t cmd)
987 : {
988 0 : qlw_write(sc, sc->sc_host_cmd_ctrl, cmd << QLW_HOST_CMD_SHIFT);
989 0 : }
990 :
991 : #define MBOX_COMMAND_TIMEOUT 4000
992 :
993 : int
994 0 : qlw_mbox(struct qlw_softc *sc, int maskin, int maskout)
995 : {
996 : int i;
997 : int result = 0;
998 : int rv;
999 :
1000 0 : sc->sc_mbox_pending = 1;
1001 0 : for (i = 0; i < nitems(sc->sc_mbox); i++) {
1002 0 : if (maskin & (1 << i)) {
1003 0 : qlw_write_mbox(sc, i, sc->sc_mbox[i]);
1004 0 : }
1005 : }
1006 0 : qlw_host_cmd(sc, QLW_HOST_CMD_SET_HOST_INT);
1007 :
1008 0 : if (sc->sc_running == 0) {
1009 0 : for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) {
1010 0 : u_int16_t isr, info;
1011 :
1012 0 : delay(100);
1013 :
1014 0 : if (qlw_read_isr(sc, &isr, &info) == 0)
1015 0 : continue;
1016 :
1017 0 : switch (isr) {
1018 : case QLW_INT_TYPE_MBOX:
1019 0 : result = info;
1020 0 : break;
1021 :
1022 : default:
1023 0 : qlw_handle_intr(sc, isr, info);
1024 0 : break;
1025 : }
1026 0 : }
1027 : } else {
1028 0 : tsleep(sc->sc_mbox, PRIBIO, "qlw_mbox", 0);
1029 0 : result = sc->sc_mbox[0];
1030 : }
1031 :
1032 0 : switch (result) {
1033 : case QLW_MBOX_COMPLETE:
1034 0 : for (i = 1; i < nitems(sc->sc_mbox); i++) {
1035 0 : sc->sc_mbox[i] = (maskout & (1 << i)) ?
1036 0 : qlw_read_mbox(sc, i) : 0;
1037 : }
1038 : rv = 0;
1039 0 : break;
1040 :
1041 : case 0:
1042 : /* timed out; do something? */
1043 0 : DPRINTF(QLW_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc));
1044 : rv = 1;
1045 0 : break;
1046 :
1047 : default:
1048 0 : sc->sc_mbox[0] = result;
1049 : rv = result;
1050 0 : break;
1051 : }
1052 :
1053 0 : qlw_clear_isr(sc, QLW_INT_TYPE_MBOX);
1054 0 : sc->sc_mbox_pending = 0;
1055 0 : return (rv);
1056 : }
1057 :
1058 : void
1059 0 : qlw_mbox_putaddr(u_int16_t *mbox, struct qlw_dmamem *mem)
1060 : {
1061 0 : mbox[2] = (QLW_DMA_DVA(mem) >> 16) & 0xffff;
1062 0 : mbox[3] = (QLW_DMA_DVA(mem) >> 0) & 0xffff;
1063 0 : mbox[6] = (QLW_DMA_DVA(mem) >> 48) & 0xffff;
1064 0 : mbox[7] = (QLW_DMA_DVA(mem) >> 32) & 0xffff;
1065 0 : }
1066 :
1067 : void
1068 0 : qlw_set_ints(struct qlw_softc *sc, int enabled)
1069 : {
1070 0 : u_int16_t v = enabled ? (QLW_INT_REQ | QLW_RISC_INT_REQ) : 0;
1071 0 : qlw_write(sc, QLW_INT_CTRL, v);
1072 0 : }
1073 :
1074 : int
1075 0 : qlw_read_isr(struct qlw_softc *sc, u_int16_t *isr, u_int16_t *info)
1076 : {
1077 : u_int16_t int_status;
1078 :
1079 0 : if (qlw_read(sc, QLW_SEMA) & QLW_SEMA_LOCK) {
1080 0 : *info = qlw_read_mbox(sc, 0);
1081 0 : if (*info & QLW_MBOX_HAS_STATUS)
1082 0 : *isr = QLW_INT_TYPE_MBOX;
1083 : else
1084 0 : *isr = QLW_INT_TYPE_ASYNC;
1085 : } else {
1086 0 : int_status = qlw_read(sc, QLW_INT_STATUS);
1087 0 : if ((int_status & (QLW_INT_REQ | QLW_RISC_INT_REQ)) == 0)
1088 0 : return (0);
1089 :
1090 0 : *isr = QLW_INT_TYPE_IO;
1091 : }
1092 :
1093 0 : return (1);
1094 0 : }
1095 :
1096 : void
1097 0 : qlw_clear_isr(struct qlw_softc *sc, u_int16_t isr)
1098 : {
1099 0 : qlw_host_cmd(sc, QLW_HOST_CMD_CLR_RISC_INT);
1100 0 : switch (isr) {
1101 : case QLW_INT_TYPE_MBOX:
1102 : case QLW_INT_TYPE_ASYNC:
1103 0 : qlw_write(sc, QLW_SEMA, 0);
1104 0 : break;
1105 : default:
1106 : break;
1107 : }
1108 0 : }
1109 :
1110 : int
1111 0 : qlw_softreset(struct qlw_softc *sc)
1112 : {
1113 : int i;
1114 :
1115 0 : qlw_set_ints(sc, 0);
1116 :
1117 : /* reset */
1118 0 : qlw_write(sc, QLW_INT_CTRL, QLW_RESET);
1119 0 : delay(100);
1120 : /* clear data and control dma engines? */
1121 :
1122 : /* wait for soft reset to clear */
1123 0 : for (i = 0; i < 1000; i++) {
1124 0 : if ((qlw_read(sc, QLW_INT_CTRL) & QLW_RESET) == 0)
1125 : break;
1126 :
1127 0 : delay(100);
1128 : }
1129 :
1130 0 : if (i == 1000) {
1131 0 : DPRINTF(QLW_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc));
1132 0 : qlw_set_ints(sc, 0);
1133 0 : return (ENXIO);
1134 : }
1135 :
1136 0 : qlw_write(sc, QLW_CFG1, 0);
1137 :
1138 : /* reset risc processor */
1139 0 : qlw_host_cmd(sc, QLW_HOST_CMD_RESET);
1140 0 : delay(100);
1141 0 : qlw_write(sc, QLW_SEMA, 0);
1142 0 : qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1143 :
1144 : /* reset queue pointers */
1145 0 : qlw_queue_write(sc, QLW_REQ_IN, 0);
1146 0 : qlw_queue_write(sc, QLW_REQ_OUT, 0);
1147 0 : qlw_queue_write(sc, QLW_RESP_IN, 0);
1148 0 : qlw_queue_write(sc, QLW_RESP_OUT, 0);
1149 :
1150 0 : qlw_set_ints(sc, 1);
1151 0 : qlw_host_cmd(sc, QLW_HOST_CMD_BIOS);
1152 :
1153 : /* do a basic mailbox operation to check we're alive */
1154 0 : sc->sc_mbox[0] = QLW_MBOX_NOP;
1155 0 : if (qlw_mbox(sc, 0x0001, 0x0001)) {
1156 0 : DPRINTF(QLW_D_INTR, "%s: ISP not responding after reset\n",
1157 : DEVNAME(sc));
1158 0 : return (ENXIO);
1159 : }
1160 :
1161 0 : return (0);
1162 0 : }
1163 :
1164 : void
1165 0 : qlw_dma_burst_enable(struct qlw_softc *sc)
1166 : {
1167 0 : if (sc->sc_isp_gen == QLW_GEN_ISP1000 ||
1168 0 : sc->sc_isp_gen == QLW_GEN_ISP1040) {
1169 0 : qlw_write(sc, QLW_CDMA_CFG,
1170 0 : qlw_read(sc, QLW_CDMA_CFG) | QLW_DMA_BURST_ENABLE);
1171 0 : qlw_write(sc, QLW_DDMA_CFG,
1172 0 : qlw_read(sc, QLW_DDMA_CFG) | QLW_DMA_BURST_ENABLE);
1173 0 : } else {
1174 0 : qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
1175 0 : qlw_write(sc, QLW_CFG1,
1176 0 : qlw_read(sc, QLW_CFG1) | QLW_DMA_BANK);
1177 0 : qlw_write(sc, QLW_CDMA_CFG_1080,
1178 0 : qlw_read(sc, QLW_CDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1179 0 : qlw_write(sc, QLW_DDMA_CFG_1080,
1180 0 : qlw_read(sc, QLW_DDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1181 0 : qlw_write(sc, QLW_CFG1,
1182 0 : qlw_read(sc, QLW_CFG1) & ~QLW_DMA_BANK);
1183 0 : qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1184 : }
1185 0 : }
1186 :
1187 : void
1188 0 : qlw_update(struct qlw_softc *sc, int task)
1189 : {
1190 : /* do things */
1191 0 : }
1192 :
1193 : int
1194 0 : qlw_async(struct qlw_softc *sc, u_int16_t info)
1195 : {
1196 : int bus;
1197 :
1198 0 : switch (info) {
1199 : case QLW_ASYNC_BUS_RESET:
1200 0 : DPRINTF(QLW_D_PORT, "%s: bus reset\n", DEVNAME(sc));
1201 0 : bus = qlw_read_mbox(sc, 6);
1202 0 : sc->sc_marker_required[bus] = 1;
1203 0 : break;
1204 :
1205 : #if 0
1206 : case QLW_ASYNC_SYSTEM_ERROR:
1207 : qla_update(sc, QLW_UPDATE_SOFTRESET);
1208 : break;
1209 :
1210 : case QLW_ASYNC_REQ_XFER_ERROR:
1211 : qla_update(sc, QLW_UPDATE_SOFTRESET);
1212 : break;
1213 :
1214 : case QLW_ASYNC_RSP_XFER_ERROR:
1215 : qla_update(sc, QLW_UPDATE_SOFTRESET);
1216 : break;
1217 : #endif
1218 :
1219 : case QLW_ASYNC_SCSI_CMD_COMPLETE:
1220 : /* shouldn't happen, we disable fast posting */
1221 : break;
1222 :
1223 : case QLW_ASYNC_CTIO_COMPLETE:
1224 : /* definitely shouldn't happen, we don't do target mode */
1225 : break;
1226 :
1227 : default:
1228 0 : DPRINTF(QLW_D_INTR, "%s: unknown async %x\n", DEVNAME(sc),
1229 : info);
1230 : break;
1231 : }
1232 0 : return (1);
1233 : }
1234 :
1235 : #ifdef QLW_DEBUG
1236 : void
1237 0 : qlw_dump_iocb(struct qlw_softc *sc, void *buf, int flags)
1238 : {
1239 : u_int8_t *iocb = buf;
1240 : int l;
1241 : int b;
1242 :
1243 0 : if ((qlwdebug & flags) == 0)
1244 0 : return;
1245 :
1246 0 : printf("%s: iocb:\n", DEVNAME(sc));
1247 0 : for (l = 0; l < 4; l++) {
1248 0 : for (b = 0; b < 16; b++) {
1249 0 : printf(" %2.2x", iocb[(l*16)+b]);
1250 : }
1251 0 : printf("\n");
1252 : }
1253 0 : }
1254 :
1255 : void
1256 0 : qlw_dump_iocb_segs(struct qlw_softc *sc, void *segs, int n)
1257 : {
1258 : u_int8_t *buf = segs;
1259 : int s, b;
1260 0 : if ((qlwdebug & QLW_D_IOCB) == 0)
1261 0 : return;
1262 :
1263 0 : printf("%s: iocb segs:\n", DEVNAME(sc));
1264 0 : for (s = 0; s < n; s++) {
1265 0 : for (b = 0; b < sizeof(struct qlw_iocb_seg); b++) {
1266 0 : printf(" %2.2x", buf[(s*(sizeof(struct qlw_iocb_seg)))
1267 0 : + b]);
1268 : }
1269 0 : printf("\n");
1270 : }
1271 0 : }
1272 : #endif
1273 :
1274 : /*
1275 : * The PCI bus is little-endian whereas SBus is big-endian. This
1276 : * leads to some differences in byte twisting of DMA transfers of
1277 : * request and response queue entries. Most fields can be treated as
1278 : * 16-bit or 32-bit with the endianness of the bus, but the header
1279 : * fields end up being swapped by the ISP1000's SBus interface.
1280 : */
1281 :
1282 : void
1283 0 : qlw_get_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1284 : int *type, int *flags)
1285 : {
1286 0 : if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1287 0 : *type = hdr->entry_count;
1288 0 : *flags = hdr->seqno;
1289 0 : } else {
1290 0 : *type = hdr->entry_type;
1291 0 : *flags = hdr->flags;
1292 : }
1293 0 : }
1294 :
1295 : void
1296 0 : qlw_put_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1297 : int type, int count)
1298 : {
1299 0 : if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1300 0 : hdr->entry_type = count;
1301 0 : hdr->entry_count = type;
1302 0 : hdr->seqno = 0;
1303 0 : hdr->flags = 0;
1304 0 : } else {
1305 0 : hdr->entry_type = type;
1306 0 : hdr->entry_count = count;
1307 0 : hdr->seqno = 0;
1308 0 : hdr->flags = 0;
1309 : }
1310 0 : }
1311 :
1312 : void
1313 0 : qlw_put_data_seg(struct qlw_softc *sc, struct qlw_iocb_seg *seg,
1314 : bus_dmamap_t dmap, int num)
1315 : {
1316 0 : seg->seg_addr = qlw_swap32(sc, dmap->dm_segs[num].ds_addr);
1317 0 : seg->seg_len = qlw_swap32(sc, dmap->dm_segs[num].ds_len);
1318 0 : }
1319 :
1320 : void
1321 0 : qlw_put_marker(struct qlw_softc *sc, int bus, void *buf)
1322 : {
1323 0 : struct qlw_iocb_marker *marker = buf;
1324 :
1325 0 : qlw_put_header(sc, &marker->hdr, QLW_IOCB_MARKER, 1);
1326 :
1327 : /* could be more specific here; isp(4) isn't */
1328 0 : marker->device = qlw_swap16(sc, (bus << 7) << 8);
1329 0 : marker->modifier = qlw_swap16(sc, QLW_IOCB_MARKER_SYNC_ALL);
1330 0 : qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1331 0 : }
1332 :
1333 : void
1334 0 : qlw_put_cmd(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1335 : struct qlw_ccb *ccb)
1336 : {
1337 0 : struct qlw_iocb_req0 *req = buf;
1338 : int entry_count = 1;
1339 : u_int16_t dir;
1340 : int seg, nsegs;
1341 : int seg_count;
1342 : int timeout = 0;
1343 : int bus, target, lun;
1344 :
1345 0 : if (xs->datalen == 0) {
1346 : dir = QLW_IOCB_CMD_NO_DATA;
1347 : seg_count = 1;
1348 0 : } else {
1349 0 : dir = xs->flags & SCSI_DATA_IN ? QLW_IOCB_CMD_READ_DATA :
1350 : QLW_IOCB_CMD_WRITE_DATA;
1351 0 : seg_count = ccb->ccb_dmamap->dm_nsegs;
1352 0 : nsegs = ccb->ccb_dmamap->dm_nsegs - QLW_IOCB_SEGS_PER_CMD;
1353 0 : while (nsegs > 0) {
1354 0 : entry_count++;
1355 0 : nsegs -= QLW_IOCB_SEGS_PER_CONT;
1356 : }
1357 0 : for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1358 0 : if (seg >= QLW_IOCB_SEGS_PER_CMD)
1359 : break;
1360 0 : qlw_put_data_seg(sc, &req->segs[seg],
1361 : ccb->ccb_dmamap, seg);
1362 : }
1363 : }
1364 :
1365 0 : if (sc->sc_running && (xs->sc_link->quirks & SDEV_NOTAGS) == 0)
1366 0 : dir |= QLW_IOCB_CMD_SIMPLE_QUEUE;
1367 :
1368 0 : qlw_put_header(sc, &req->hdr, QLW_IOCB_CMD_TYPE_0, entry_count);
1369 :
1370 : /*
1371 : * timeout is in seconds. make sure it's at least 1 if a timeout
1372 : * was specified in xs
1373 : */
1374 0 : if (xs->timeout != 0)
1375 0 : timeout = MAX(1, xs->timeout/1000);
1376 :
1377 0 : req->flags = qlw_swap16(sc, dir);
1378 0 : req->seg_count = qlw_swap16(sc, seg_count);
1379 0 : req->timeout = qlw_swap16(sc, timeout);
1380 :
1381 0 : bus = qlw_xs_bus(sc, xs);
1382 0 : target = xs->sc_link->target;
1383 0 : lun = xs->sc_link->lun;
1384 0 : req->device = qlw_swap16(sc, (((bus << 7) | target) << 8) | lun);
1385 :
1386 0 : memcpy(req->cdb, xs->cmd, xs->cmdlen);
1387 0 : req->ccblen = qlw_swap16(sc, xs->cmdlen);
1388 :
1389 0 : req->handle = qlw_swap32(sc, ccb->ccb_id);
1390 :
1391 0 : qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1392 0 : }
1393 :
1394 : void
1395 0 : qlw_put_cont(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1396 : struct qlw_ccb *ccb, int seg0)
1397 : {
1398 0 : struct qlw_iocb_cont0 *cont = buf;
1399 : int seg;
1400 :
1401 0 : qlw_put_header(sc, &cont->hdr, QLW_IOCB_CONT_TYPE_0, 1);
1402 :
1403 0 : for (seg = seg0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1404 0 : if ((seg - seg0) >= QLW_IOCB_SEGS_PER_CONT)
1405 : break;
1406 0 : qlw_put_data_seg(sc, &cont->segs[seg - seg0],
1407 : ccb->ccb_dmamap, seg);
1408 : }
1409 0 : }
1410 :
1411 : #ifndef ISP_NOFIRMWARE
1412 : int
1413 0 : qlw_load_firmware_words(struct qlw_softc *sc, const u_int16_t *src,
1414 : u_int16_t dest)
1415 : {
1416 : u_int16_t i;
1417 :
1418 0 : for (i = 0; i < src[3]; i++) {
1419 0 : sc->sc_mbox[0] = QLW_MBOX_WRITE_RAM_WORD;
1420 0 : sc->sc_mbox[1] = i + dest;
1421 0 : sc->sc_mbox[2] = src[i];
1422 0 : if (qlw_mbox(sc, 0x07, 0x01)) {
1423 0 : printf("firmware load failed\n");
1424 0 : return (1);
1425 : }
1426 : }
1427 :
1428 0 : sc->sc_mbox[0] = QLW_MBOX_VERIFY_CSUM;
1429 0 : sc->sc_mbox[1] = dest;
1430 0 : if (qlw_mbox(sc, 0x0003, 0x0003)) {
1431 0 : printf("verification of chunk at %x failed: %x\n",
1432 0 : dest, sc->sc_mbox[1]);
1433 0 : return (1);
1434 : }
1435 :
1436 0 : return (0);
1437 0 : }
1438 :
1439 : int
1440 0 : qlw_load_firmware(struct qlw_softc *sc)
1441 : {
1442 0 : return qlw_load_firmware_words(sc, sc->sc_firmware, QLW_CODE_ORG);
1443 : }
1444 :
1445 : #endif /* !ISP_NOFIRMWARE */
1446 :
1447 : int
1448 0 : qlw_read_nvram(struct qlw_softc *sc)
1449 : {
1450 0 : u_int16_t data[sizeof(sc->sc_nvram) >> 1];
1451 : u_int16_t req, cmd, val;
1452 : u_int8_t csum;
1453 : int i, bit;
1454 : int reqcmd;
1455 : int nbits;
1456 :
1457 0 : if (sc->sc_nvram_size == 0)
1458 0 : return (1);
1459 :
1460 0 : if (sc->sc_nvram_size == 128) {
1461 : reqcmd = (QLW_NVRAM_CMD_READ << 6);
1462 : nbits = 8;
1463 0 : } else {
1464 : reqcmd = (QLW_NVRAM_CMD_READ << 8);
1465 : nbits = 10;
1466 : }
1467 :
1468 0 : qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1469 0 : delay(10);
1470 0 : qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL | QLW_NVRAM_CLOCK);
1471 0 : delay(10);
1472 :
1473 0 : for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1474 0 : req = i | reqcmd;
1475 :
1476 : /* write each bit out through the nvram register */
1477 0 : for (bit = nbits; bit >= 0; bit--) {
1478 : cmd = QLW_NVRAM_CHIP_SEL;
1479 0 : if ((req >> bit) & 1) {
1480 : cmd |= QLW_NVRAM_DATA_OUT;
1481 0 : }
1482 0 : qlw_write(sc, QLW_NVRAM, cmd);
1483 0 : delay(10);
1484 0 : qlw_read(sc, QLW_NVRAM);
1485 :
1486 0 : qlw_write(sc, QLW_NVRAM, cmd | QLW_NVRAM_CLOCK);
1487 0 : delay(10);
1488 0 : qlw_read(sc, QLW_NVRAM);
1489 :
1490 0 : qlw_write(sc, QLW_NVRAM, cmd);
1491 0 : delay(10);
1492 0 : qlw_read(sc, QLW_NVRAM);
1493 : }
1494 :
1495 : /* read the result back */
1496 : val = 0;
1497 0 : for (bit = 0; bit < 16; bit++) {
1498 0 : val <<= 1;
1499 0 : qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL |
1500 : QLW_NVRAM_CLOCK);
1501 0 : delay(10);
1502 0 : if (qlw_read(sc, QLW_NVRAM) & QLW_NVRAM_DATA_IN)
1503 0 : val |= 1;
1504 0 : delay(10);
1505 :
1506 0 : qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1507 0 : delay(10);
1508 0 : qlw_read(sc, QLW_NVRAM);
1509 : }
1510 :
1511 0 : qlw_write(sc, QLW_NVRAM, 0);
1512 0 : delay(10);
1513 0 : qlw_read(sc, QLW_NVRAM);
1514 :
1515 0 : data[i] = letoh16(val);
1516 : }
1517 :
1518 : csum = 0;
1519 0 : for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1520 0 : csum += data[i] & 0xff;
1521 0 : csum += data[i] >> 8;
1522 : }
1523 :
1524 0 : memcpy(&sc->sc_nvram, data, sizeof(sc->sc_nvram));
1525 : /* id field should be 'ISP ', version should high enough */
1526 0 : if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
1527 0 : sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' ||
1528 0 : sc->sc_nvram.nvram_version < sc->sc_nvram_minversion ||
1529 0 : (csum != 0)) {
1530 0 : printf("%s: nvram corrupt\n", DEVNAME(sc));
1531 0 : return (1);
1532 : }
1533 0 : return (0);
1534 0 : }
1535 :
1536 : void
1537 0 : qlw_parse_nvram_1040(struct qlw_softc *sc, int bus)
1538 : {
1539 0 : struct qlw_nvram_1040 *nv = (struct qlw_nvram_1040 *)&sc->sc_nvram;
1540 : int target;
1541 :
1542 0 : KASSERT(bus == 0);
1543 :
1544 0 : if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1545 0 : sc->sc_initiator[0] = (nv->config1 >> 4);
1546 :
1547 0 : sc->sc_retry_count[0] = nv->retry_count;
1548 0 : sc->sc_retry_delay[0] = nv->retry_delay;
1549 0 : sc->sc_reset_delay[0] = nv->reset_delay;
1550 0 : sc->sc_tag_age_limit[0] = nv->tag_age_limit;
1551 0 : sc->sc_selection_timeout[0] = letoh16(nv->selection_timeout);
1552 0 : sc->sc_max_queue_depth[0] = letoh16(nv->max_queue_depth);
1553 0 : sc->sc_async_data_setup[0] = (nv->config2 & 0x0f);
1554 0 : sc->sc_req_ack_active_neg[0] = ((nv->config2 & 0x10) >> 4);
1555 0 : sc->sc_data_line_active_neg[0] = ((nv->config2 & 0x20) >> 5);
1556 :
1557 0 : for (target = 0; target < QLW_MAX_TARGETS; target++) {
1558 0 : struct qlw_target *qt = &sc->sc_target[0][target];
1559 :
1560 0 : qt->qt_params = (nv->target[target].parameter << 8);
1561 0 : qt->qt_exec_throttle = nv->target[target].execution_throttle;
1562 0 : qt->qt_sync_period = nv->target[target].sync_period;
1563 0 : qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1564 : }
1565 0 : }
1566 :
1567 : void
1568 0 : qlw_parse_nvram_1080(struct qlw_softc *sc, int bus)
1569 : {
1570 0 : struct qlw_nvram_1080 *nvram = (struct qlw_nvram_1080 *)&sc->sc_nvram;
1571 0 : struct qlw_nvram_bus *nv = &nvram->bus[bus];
1572 : int target;
1573 :
1574 0 : sc->sc_isp_config = nvram->isp_config;
1575 0 : sc->sc_fw_features = nvram->fw_features;
1576 :
1577 0 : if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1578 0 : sc->sc_initiator[bus] = (nv->config1 & 0x0f);
1579 :
1580 0 : sc->sc_retry_count[bus] = nv->retry_count;
1581 0 : sc->sc_retry_delay[bus] = nv->retry_delay;
1582 0 : sc->sc_reset_delay[bus] = nv->reset_delay;
1583 0 : sc->sc_selection_timeout[bus] = letoh16(nv->selection_timeout);
1584 0 : sc->sc_max_queue_depth[bus] = letoh16(nv->max_queue_depth);
1585 0 : sc->sc_async_data_setup[bus] = (nv->config2 & 0x0f);
1586 0 : sc->sc_req_ack_active_neg[bus] = ((nv->config2 & 0x10) >> 4);
1587 0 : sc->sc_data_line_active_neg[bus] = ((nv->config2 & 0x20) >> 5);
1588 :
1589 0 : for (target = 0; target < QLW_MAX_TARGETS; target++) {
1590 0 : struct qlw_target *qt = &sc->sc_target[bus][target];
1591 :
1592 0 : qt->qt_params = (nv->target[target].parameter << 8);
1593 0 : qt->qt_exec_throttle = nv->target[target].execution_throttle;
1594 0 : qt->qt_sync_period = nv->target[target].sync_period;
1595 0 : if (sc->sc_isp_gen == QLW_GEN_ISP12160)
1596 0 : qt->qt_sync_offset = nv->target[target].flags & 0x1f;
1597 : else
1598 0 : qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1599 : }
1600 0 : }
1601 :
1602 : void
1603 0 : qlw_init_defaults(struct qlw_softc *sc, int bus)
1604 : {
1605 : int target;
1606 :
1607 0 : switch (sc->sc_isp_gen) {
1608 : case QLW_GEN_ISP1000:
1609 : break;
1610 : case QLW_GEN_ISP1040:
1611 0 : sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_64;
1612 0 : break;
1613 : case QLW_GEN_ISP1080:
1614 : case QLW_GEN_ISP12160:
1615 0 : sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_128;
1616 0 : sc->sc_fw_features = QLW_FW_FEATURE_LVD_NOTIFY;
1617 0 : break;
1618 : }
1619 :
1620 0 : sc->sc_retry_count[bus] = 0;
1621 0 : sc->sc_retry_delay[bus] = 0;
1622 0 : sc->sc_reset_delay[bus] = 3;
1623 0 : sc->sc_tag_age_limit[bus] = 8;
1624 0 : sc->sc_selection_timeout[bus] = 250;
1625 0 : sc->sc_max_queue_depth[bus] = 32;
1626 0 : if (sc->sc_clock > 40)
1627 0 : sc->sc_async_data_setup[bus] = 9;
1628 : else
1629 0 : sc->sc_async_data_setup[bus] = 6;
1630 0 : sc->sc_req_ack_active_neg[bus] = 1;
1631 0 : sc->sc_data_line_active_neg[bus] = 1;
1632 :
1633 0 : for (target = 0; target < QLW_MAX_TARGETS; target++) {
1634 0 : struct qlw_target *qt = &sc->sc_target[bus][target];
1635 :
1636 0 : qt->qt_params = QLW_TARGET_DEFAULT;
1637 0 : qt->qt_exec_throttle = 16;
1638 0 : qt->qt_sync_period = 10;
1639 0 : qt->qt_sync_offset = 12;
1640 : }
1641 0 : }
1642 :
1643 : struct qlw_dmamem *
1644 0 : qlw_dmamem_alloc(struct qlw_softc *sc, size_t size)
1645 : {
1646 : struct qlw_dmamem *m;
1647 0 : int nsegs;
1648 :
1649 0 : m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1650 0 : if (m == NULL)
1651 0 : return (NULL);
1652 :
1653 0 : m->qdm_size = size;
1654 :
1655 0 : if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1656 0 : BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0)
1657 : goto qdmfree;
1658 :
1659 0 : if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,
1660 0 : &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1661 : goto destroy;
1662 :
1663 0 : if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,
1664 0 : BUS_DMA_NOWAIT) != 0)
1665 : goto free;
1666 :
1667 0 : if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,
1668 0 : BUS_DMA_NOWAIT) != 0)
1669 : goto unmap;
1670 :
1671 0 : return (m);
1672 :
1673 : unmap:
1674 0 : bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1675 : free:
1676 0 : bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1677 : destroy:
1678 0 : bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1679 : qdmfree:
1680 0 : free(m, M_DEVBUF, sizeof(*m));
1681 :
1682 0 : return (NULL);
1683 0 : }
1684 :
1685 : void
1686 0 : qlw_dmamem_free(struct qlw_softc *sc, struct qlw_dmamem *m)
1687 : {
1688 0 : bus_dmamap_unload(sc->sc_dmat, m->qdm_map);
1689 0 : bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1690 0 : bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1691 0 : bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1692 0 : free(m, M_DEVBUF, sizeof(*m));
1693 0 : }
1694 :
1695 : int
1696 0 : qlw_alloc_ccbs(struct qlw_softc *sc)
1697 : {
1698 : struct qlw_ccb *ccb;
1699 : u_int8_t *cmd;
1700 : int i;
1701 :
1702 0 : SIMPLEQ_INIT(&sc->sc_ccb_free);
1703 0 : mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1704 0 : mtx_init(&sc->sc_queue_mtx, IPL_BIO);
1705 :
1706 0 : sc->sc_ccbs = mallocarray(sc->sc_maxccbs, sizeof(struct qlw_ccb),
1707 : M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1708 0 : if (sc->sc_ccbs == NULL) {
1709 0 : printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1710 0 : return (1);
1711 : }
1712 :
1713 0 : sc->sc_requests = qlw_dmamem_alloc(sc, sc->sc_maxrequests *
1714 : QLW_QUEUE_ENTRY_SIZE);
1715 0 : if (sc->sc_requests == NULL) {
1716 0 : printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1717 0 : goto free_ccbs;
1718 : }
1719 0 : sc->sc_responses = qlw_dmamem_alloc(sc, sc->sc_maxresponses *
1720 : QLW_QUEUE_ENTRY_SIZE);
1721 0 : if (sc->sc_responses == NULL) {
1722 0 : printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc));
1723 0 : goto free_req;
1724 : }
1725 :
1726 0 : cmd = QLW_DMA_KVA(sc->sc_requests);
1727 0 : memset(cmd, 0, QLW_QUEUE_ENTRY_SIZE * sc->sc_maxccbs);
1728 0 : for (i = 0; i < sc->sc_maxccbs; i++) {
1729 0 : ccb = &sc->sc_ccbs[i];
1730 :
1731 0 : if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1732 : QLW_MAX_SEGS, MAXPHYS, 0,
1733 : BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1734 0 : &ccb->ccb_dmamap) != 0) {
1735 0 : printf("%s: unable to create dma map\n", DEVNAME(sc));
1736 : goto free_maps;
1737 : }
1738 :
1739 0 : ccb->ccb_sc = sc;
1740 0 : ccb->ccb_id = i;
1741 :
1742 0 : qlw_put_ccb(sc, ccb);
1743 : }
1744 :
1745 0 : scsi_iopool_init(&sc->sc_iopool, sc, qlw_get_ccb, qlw_put_ccb);
1746 0 : return (0);
1747 :
1748 : free_maps:
1749 0 : while ((ccb = qlw_get_ccb(sc)) != NULL)
1750 0 : bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1751 :
1752 0 : qlw_dmamem_free(sc, sc->sc_responses);
1753 : free_req:
1754 0 : qlw_dmamem_free(sc, sc->sc_requests);
1755 : free_ccbs:
1756 0 : free(sc->sc_ccbs, M_DEVBUF, 0);
1757 :
1758 0 : return (1);
1759 0 : }
1760 :
1761 : void
1762 0 : qlw_free_ccbs(struct qlw_softc *sc)
1763 : {
1764 : struct qlw_ccb *ccb;
1765 :
1766 0 : scsi_iopool_destroy(&sc->sc_iopool);
1767 0 : while ((ccb = qlw_get_ccb(sc)) != NULL)
1768 0 : bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1769 0 : qlw_dmamem_free(sc, sc->sc_responses);
1770 0 : qlw_dmamem_free(sc, sc->sc_requests);
1771 0 : free(sc->sc_ccbs, M_DEVBUF, 0);
1772 0 : }
1773 :
1774 : void *
1775 0 : qlw_get_ccb(void *xsc)
1776 : {
1777 0 : struct qlw_softc *sc = xsc;
1778 : struct qlw_ccb *ccb;
1779 :
1780 0 : mtx_enter(&sc->sc_ccb_mtx);
1781 0 : ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
1782 0 : if (ccb != NULL) {
1783 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1784 : }
1785 0 : mtx_leave(&sc->sc_ccb_mtx);
1786 0 : return (ccb);
1787 : }
1788 :
1789 : void
1790 0 : qlw_put_ccb(void *xsc, void *io)
1791 : {
1792 0 : struct qlw_softc *sc = xsc;
1793 0 : struct qlw_ccb *ccb = io;
1794 :
1795 0 : ccb->ccb_xs = NULL;
1796 0 : mtx_enter(&sc->sc_ccb_mtx);
1797 0 : SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1798 0 : mtx_leave(&sc->sc_ccb_mtx);
1799 0 : }
|