Line data Source code
1 : /* $OpenBSD: sd.c,v 1.275 2017/12/30 23:08:29 guenther Exp $ */
2 : /* $NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $ */
3 :
4 : /*-
5 : * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
6 : * All rights reserved.
7 : *
8 : * This code is derived from software contributed to The NetBSD Foundation
9 : * by Charles M. Hannum.
10 : *
11 : * Redistribution and use in source and binary forms, with or without
12 : * modification, are permitted provided that the following conditions
13 : * are met:
14 : * 1. Redistributions of source code must retain the above copyright
15 : * notice, this list of conditions and the following disclaimer.
16 : * 2. Redistributions in binary form must reproduce the above copyright
17 : * notice, this list of conditions and the following disclaimer in the
18 : * documentation and/or other materials provided with the distribution.
19 : *
20 : * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 : * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 : * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 : * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 : * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 : * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 : * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 : * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 : * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 : * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 : * POSSIBILITY OF SUCH DAMAGE.
31 : */
32 :
33 : /*
34 : * Originally written by Julian Elischer (julian@dialix.oz.au)
35 : * for TRW Financial Systems for use under the MACH(2.5) operating system.
36 : *
37 : * TRW Financial Systems, in accordance with their agreement with Carnegie
38 : * Mellon University, makes this software available to CMU to distribute
39 : * or use in any manner that they see fit as long as this message is kept with
40 : * the software. For this reason TFS also grants any other persons or
41 : * organisations permission to use or modify this software.
42 : *
43 : * TFS supplies this software to be publicly redistributed
44 : * on the understanding that TFS is not responsible for the correct
45 : * functioning of this software in any circumstances.
46 : *
47 : * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
48 : */
49 :
50 : #include <sys/stdint.h>
51 : #include <sys/param.h>
52 : #include <sys/systm.h>
53 : #include <sys/timeout.h>
54 : #include <sys/fcntl.h>
55 : #include <sys/stat.h>
56 : #include <sys/ioctl.h>
57 : #include <sys/mtio.h>
58 : #include <sys/mutex.h>
59 : #include <sys/buf.h>
60 : #include <sys/uio.h>
61 : #include <sys/malloc.h>
62 : #include <sys/pool.h>
63 : #include <sys/errno.h>
64 : #include <sys/device.h>
65 : #include <sys/disklabel.h>
66 : #include <sys/disk.h>
67 : #include <sys/conf.h>
68 : #include <sys/scsiio.h>
69 : #include <sys/dkio.h>
70 : #include <sys/reboot.h>
71 :
72 : #include <scsi/scsi_all.h>
73 : #include <scsi/scsi_disk.h>
74 : #include <scsi/scsiconf.h>
75 : #include <scsi/sdvar.h>
76 :
77 : #include <ufs/ffs/fs.h> /* for BBSIZE and SBSIZE */
78 :
79 : #include <sys/vnode.h>
80 :
81 : int sdmatch(struct device *, void *, void *);
82 : void sdattach(struct device *, struct device *, void *);
83 : int sdactivate(struct device *, int);
84 : int sddetach(struct device *, int);
85 :
86 : void sdminphys(struct buf *);
87 : int sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int);
88 : void sdstart(struct scsi_xfer *);
89 : int sd_interpret_sense(struct scsi_xfer *);
90 : int sd_read_cap_10(struct sd_softc *, int);
91 : int sd_read_cap_16(struct sd_softc *, int);
92 : int sd_size(struct sd_softc *, int);
93 : int sd_thin_pages(struct sd_softc *, int);
94 : int sd_vpd_block_limits(struct sd_softc *, int);
95 : int sd_vpd_thin(struct sd_softc *, int);
96 : int sd_thin_params(struct sd_softc *, int);
97 : int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
98 : int sd_flush(struct sd_softc *, int);
99 :
100 : void viscpy(u_char *, u_char *, int);
101 :
102 : int sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *);
103 : int sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *);
104 :
105 : void sd_cmd_rw6(struct scsi_xfer *, int, u_int64_t, u_int);
106 : void sd_cmd_rw10(struct scsi_xfer *, int, u_int64_t, u_int);
107 : void sd_cmd_rw12(struct scsi_xfer *, int, u_int64_t, u_int);
108 : void sd_cmd_rw16(struct scsi_xfer *, int, u_int64_t, u_int);
109 :
110 : void sd_buf_done(struct scsi_xfer *);
111 :
112 : struct cfattach sd_ca = {
113 : sizeof(struct sd_softc), sdmatch, sdattach,
114 : sddetach, sdactivate
115 : };
116 :
117 : struct cfdriver sd_cd = {
118 : NULL, "sd", DV_DISK
119 : };
120 :
121 : const struct scsi_inquiry_pattern sd_patterns[] = {
122 : {T_DIRECT, T_FIXED,
123 : "", "", ""},
124 : {T_DIRECT, T_REMOV,
125 : "", "", ""},
126 : {T_RDIRECT, T_FIXED,
127 : "", "", ""},
128 : {T_RDIRECT, T_REMOV,
129 : "", "", ""},
130 : {T_OPTICAL, T_FIXED,
131 : "", "", ""},
132 : {T_OPTICAL, T_REMOV,
133 : "", "", ""},
134 : };
135 :
136 : #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit))
137 :
138 : int
139 0 : sdmatch(struct device *parent, void *match, void *aux)
140 : {
141 0 : struct scsi_attach_args *sa = aux;
142 0 : int priority;
143 :
144 0 : (void)scsi_inqmatch(sa->sa_inqbuf,
145 : sd_patterns, nitems(sd_patterns),
146 : sizeof(sd_patterns[0]), &priority);
147 :
148 0 : return (priority);
149 0 : }
150 :
151 : /*
152 : * The routine called by the low level scsi routine when it discovers
153 : * a device suitable for this driver.
154 : */
155 : void
156 0 : sdattach(struct device *parent, struct device *self, void *aux)
157 : {
158 0 : struct sd_softc *sc = (struct sd_softc *)self;
159 0 : struct scsi_attach_args *sa = aux;
160 0 : struct disk_parms *dp = &sc->params;
161 0 : struct scsi_link *link = sa->sa_sc_link;
162 0 : int sd_autoconf = scsi_autoconf | SCSI_SILENT |
163 0 : SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE;
164 0 : struct dk_cache dkc;
165 : int error, result, sortby = BUFQ_DEFAULT;
166 :
167 : SC_DEBUG(link, SDEV_DB2, ("sdattach:\n"));
168 :
169 : /*
170 : * Store information needed to contact our base driver
171 : */
172 0 : sc->sc_link = link;
173 0 : link->interpret_sense = sd_interpret_sense;
174 0 : link->device_softc = sc;
175 :
176 0 : if ((link->flags & SDEV_ATAPI) && (link->flags & SDEV_REMOVABLE))
177 0 : link->quirks |= SDEV_NOSYNCCACHE;
178 :
179 0 : if (!(link->inqdata.flags & SID_RelAdr))
180 0 : link->quirks |= SDEV_ONLYBIG;
181 :
182 : /*
183 : * Note if this device is ancient. This is used in sdminphys().
184 : */
185 0 : if (!(link->flags & SDEV_ATAPI) &&
186 0 : SCSISPC(sa->sa_inqbuf->version) == 0)
187 0 : sc->flags |= SDF_ANCIENT;
188 :
189 : /*
190 : * Use the subdriver to request information regarding
191 : * the drive. We cannot use interrupts yet, so the
192 : * request must specify this.
193 : */
194 0 : printf("\n");
195 :
196 0 : scsi_xsh_set(&sc->sc_xsh, link, sdstart);
197 0 : timeout_set(&sc->sc_timeout, (void (*)(void *))scsi_xsh_add,
198 0 : &sc->sc_xsh);
199 :
200 : /* Spin up non-UMASS devices ready or not. */
201 0 : if ((link->flags & SDEV_UMASS) == 0)
202 0 : scsi_start(link, SSS_START, sd_autoconf);
203 :
204 : /*
205 : * Some devices (e.g. BlackBerry Pearl) won't admit they have
206 : * media loaded unless its been locked in.
207 : */
208 0 : if ((link->flags & SDEV_REMOVABLE) != 0)
209 0 : scsi_prevent(link, PR_PREVENT, sd_autoconf);
210 :
211 : /* Check that it is still responding and ok. */
212 0 : error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3,
213 : sd_autoconf);
214 :
215 0 : if (error)
216 0 : result = SDGP_RESULT_OFFLINE;
217 : else
218 0 : result = sd_get_parms(sc, &sc->params, sd_autoconf);
219 :
220 0 : if ((link->flags & SDEV_REMOVABLE) != 0)
221 0 : scsi_prevent(link, PR_ALLOW, sd_autoconf);
222 :
223 0 : switch (result) {
224 : case SDGP_RESULT_OK:
225 0 : printf("%s: %lluMB, %lu bytes/sector, %llu sectors",
226 0 : sc->sc_dev.dv_xname,
227 0 : dp->disksize / (1048576 / dp->secsize), dp->secsize,
228 : dp->disksize);
229 0 : if (ISSET(sc->flags, SDF_THIN)) {
230 : sortby = BUFQ_FIFO;
231 0 : printf(", thin");
232 0 : }
233 0 : if (ISSET(link->flags, SDEV_READONLY)) {
234 0 : printf(", readonly");
235 0 : }
236 0 : printf("\n");
237 0 : break;
238 :
239 : case SDGP_RESULT_OFFLINE:
240 : break;
241 :
242 : #ifdef DIAGNOSTIC
243 : default:
244 0 : panic("sdattach: unknown result (%#x) from get_parms", result);
245 : break;
246 : #endif
247 : }
248 :
249 : /*
250 : * Initialize disk structures.
251 : */
252 0 : sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
253 0 : bufq_init(&sc->sc_bufq, sortby);
254 :
255 : /*
256 : * Enable write cache by default.
257 : */
258 0 : memset(&dkc, 0, sizeof(dkc));
259 0 : if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) {
260 0 : dkc.wrcache = 1;
261 0 : sd_ioctl_cache(sc, DIOCSCACHE, &dkc);
262 0 : }
263 :
264 : /* Attach disk. */
265 0 : disk_attach(&sc->sc_dev, &sc->sc_dk);
266 0 : }
267 :
268 : int
269 0 : sdactivate(struct device *self, int act)
270 : {
271 : struct scsi_link *link;
272 0 : struct sd_softc *sc = (struct sd_softc *)self;
273 :
274 0 : if (sc->flags & SDF_DYING)
275 0 : return (ENXIO);
276 0 : link = sc->sc_link;
277 :
278 0 : switch (act) {
279 : case DVACT_SUSPEND:
280 : /*
281 : * We flush the cache, since we our next step before
282 : * DVACT_POWERDOWN might be a hibernate operation.
283 : */
284 0 : if ((sc->flags & SDF_DIRTY) != 0)
285 0 : sd_flush(sc, SCSI_AUTOCONF);
286 : break;
287 : case DVACT_POWERDOWN:
288 : /*
289 : * Stop the disk. Stopping the disk should flush the
290 : * cache, but we are paranoid so we flush the cache
291 : * first. We're cold at this point, so we poll for
292 : * completion.
293 : */
294 0 : if ((sc->flags & SDF_DIRTY) != 0)
295 0 : sd_flush(sc, SCSI_AUTOCONF);
296 0 : if (boothowto & RB_POWERDOWN)
297 0 : scsi_start(link, SSS_STOP,
298 : SCSI_IGNORE_ILLEGAL_REQUEST |
299 : SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF);
300 : break;
301 : case DVACT_RESUME:
302 0 : scsi_start(link, SSS_START,
303 : SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF);
304 0 : break;
305 : case DVACT_DEACTIVATE:
306 0 : sc->flags |= SDF_DYING;
307 0 : timeout_del(&sc->sc_timeout);
308 0 : scsi_xsh_del(&sc->sc_xsh);
309 0 : break;
310 : }
311 0 : return (0);
312 0 : }
313 :
314 : int
315 0 : sddetach(struct device *self, int flags)
316 : {
317 0 : struct sd_softc *sc = (struct sd_softc *)self;
318 :
319 0 : bufq_drain(&sc->sc_bufq);
320 :
321 0 : disk_gone(sdopen, self->dv_unit);
322 :
323 : /* Detach disk. */
324 0 : bufq_destroy(&sc->sc_bufq);
325 0 : disk_detach(&sc->sc_dk);
326 :
327 0 : return (0);
328 : }
329 :
330 : /*
331 : * Open the device. Make sure the partition info is as up-to-date as can be.
332 : */
333 : int
334 0 : sdopen(dev_t dev, int flag, int fmt, struct proc *p)
335 : {
336 : struct scsi_link *link;
337 : struct sd_softc *sc;
338 : int error = 0, part, rawopen, unit;
339 :
340 0 : unit = DISKUNIT(dev);
341 0 : part = DISKPART(dev);
342 :
343 0 : rawopen = (part == RAW_PART) && (fmt == S_IFCHR);
344 :
345 0 : sc = sdlookup(unit);
346 0 : if (sc == NULL)
347 0 : return (ENXIO);
348 0 : if (sc->flags & SDF_DYING) {
349 0 : device_unref(&sc->sc_dev);
350 0 : return (ENXIO);
351 : }
352 0 : link = sc->sc_link;
353 :
354 0 : if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) {
355 0 : device_unref(&sc->sc_dev);
356 0 : return (EACCES);
357 : }
358 :
359 : SC_DEBUG(link, SDEV_DB1,
360 : ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
361 : sd_cd.cd_ndevs, part));
362 :
363 0 : if ((error = disk_lock(&sc->sc_dk)) != 0) {
364 0 : device_unref(&sc->sc_dev);
365 0 : return (error);
366 : }
367 :
368 0 : if (sc->sc_dk.dk_openmask != 0) {
369 : /*
370 : * If any partition is open, but the disk has been invalidated,
371 : * disallow further opens of non-raw partition.
372 : */
373 0 : if (sc->flags & SDF_DYING) {
374 : error = ENXIO;
375 0 : goto die;
376 : }
377 0 : if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
378 : if (rawopen)
379 : goto out;
380 : error = EIO;
381 0 : goto bad;
382 : }
383 : } else {
384 : /* Spin up non-UMASS devices ready or not. */
385 0 : if (sc->flags & SDF_DYING) {
386 : error = ENXIO;
387 0 : goto die;
388 : }
389 0 : if ((link->flags & SDEV_UMASS) == 0)
390 0 : scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT :
391 0 : 0) | SCSI_IGNORE_ILLEGAL_REQUEST |
392 : SCSI_IGNORE_MEDIA_CHANGE);
393 :
394 : /* Use sd_interpret_sense() for sense errors.
395 : *
396 : * But only after spinning the disk up! Just in case a broken
397 : * device returns "Initialization command required." and causes
398 : * a loop of scsi_start() calls.
399 : */
400 0 : if (sc->flags & SDF_DYING) {
401 : error = ENXIO;
402 0 : goto die;
403 : }
404 0 : link->flags |= SDEV_OPEN;
405 :
406 : /*
407 : * Try to prevent the unloading of a removable device while
408 : * it's open. But allow the open to proceed if the device can't
409 : * be locked in.
410 : */
411 0 : if ((link->flags & SDEV_REMOVABLE) != 0) {
412 0 : scsi_prevent(link, PR_PREVENT, SCSI_SILENT |
413 : SCSI_IGNORE_ILLEGAL_REQUEST |
414 : SCSI_IGNORE_MEDIA_CHANGE);
415 0 : }
416 :
417 : /* Check that it is still responding and ok. */
418 0 : if (sc->flags & SDF_DYING) {
419 : error = ENXIO;
420 0 : goto die;
421 : }
422 0 : error = scsi_test_unit_ready(link,
423 : TEST_READY_RETRIES, SCSI_SILENT |
424 : SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE);
425 0 : if (error) {
426 0 : if (rawopen) {
427 : error = 0;
428 0 : goto out;
429 : } else
430 : goto bad;
431 : }
432 :
433 : /* Load the physical device parameters. */
434 0 : if (sc->flags & SDF_DYING) {
435 : error = ENXIO;
436 0 : goto die;
437 : }
438 0 : link->flags |= SDEV_MEDIA_LOADED;
439 0 : if (sd_get_parms(sc, &sc->params, (rawopen ? SCSI_SILENT : 0))
440 0 : == SDGP_RESULT_OFFLINE) {
441 0 : if (sc->flags & SDF_DYING) {
442 : error = ENXIO;
443 0 : goto die;
444 : }
445 0 : link->flags &= ~SDEV_MEDIA_LOADED;
446 : error = ENXIO;
447 0 : goto bad;
448 : }
449 : SC_DEBUG(link, SDEV_DB3, ("Params loaded\n"));
450 :
451 : /* Load the partition info if not already loaded. */
452 0 : error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0);
453 0 : if (error == EIO || error == ENXIO)
454 : goto bad;
455 : SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n"));
456 : }
457 :
458 : out:
459 0 : if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
460 0 : goto bad;
461 :
462 : SC_DEBUG(link, SDEV_DB3, ("open complete\n"));
463 :
464 : /* It's OK to fall through because dk_openmask is now non-zero. */
465 : bad:
466 0 : if (sc->sc_dk.dk_openmask == 0) {
467 0 : if (sc->flags & SDF_DYING) {
468 : error = ENXIO;
469 0 : goto die;
470 : }
471 0 : if ((link->flags & SDEV_REMOVABLE) != 0)
472 0 : scsi_prevent(link, PR_ALLOW, SCSI_SILENT |
473 : SCSI_IGNORE_ILLEGAL_REQUEST |
474 : SCSI_IGNORE_MEDIA_CHANGE);
475 0 : if (sc->flags & SDF_DYING) {
476 : error = ENXIO;
477 0 : goto die;
478 : }
479 0 : link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
480 0 : }
481 :
482 : die:
483 0 : disk_unlock(&sc->sc_dk);
484 0 : device_unref(&sc->sc_dev);
485 0 : return (error);
486 0 : }
487 :
488 : /*
489 : * Close the device. Only called if we are the last occurrence of an open
490 : * device. Convenient now but usually a pain.
491 : */
492 : int
493 0 : sdclose(dev_t dev, int flag, int fmt, struct proc *p)
494 : {
495 : struct scsi_link *link;
496 : struct sd_softc *sc;
497 0 : int part = DISKPART(dev);
498 : int error = 0;
499 :
500 0 : sc = sdlookup(DISKUNIT(dev));
501 0 : if (sc == NULL)
502 0 : return (ENXIO);
503 0 : if (sc->flags & SDF_DYING) {
504 0 : device_unref(&sc->sc_dev);
505 0 : return (ENXIO);
506 : }
507 0 : link = sc->sc_link;
508 :
509 0 : disk_lock_nointr(&sc->sc_dk);
510 :
511 0 : disk_closepart(&sc->sc_dk, part, fmt);
512 :
513 0 : if (((flag & FWRITE) != 0 || sc->sc_dk.dk_openmask == 0) &&
514 0 : (sc->flags & SDF_DIRTY) != 0)
515 0 : sd_flush(sc, 0);
516 :
517 0 : if (sc->sc_dk.dk_openmask == 0) {
518 0 : if (sc->flags & SDF_DYING) {
519 : error = ENXIO;
520 0 : goto die;
521 : }
522 0 : if ((link->flags & SDEV_REMOVABLE) != 0)
523 0 : scsi_prevent(link, PR_ALLOW,
524 : SCSI_IGNORE_ILLEGAL_REQUEST |
525 : SCSI_IGNORE_NOT_READY | SCSI_SILENT);
526 0 : if (sc->flags & SDF_DYING) {
527 : error = ENXIO;
528 0 : goto die;
529 : }
530 0 : link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
531 :
532 0 : if (link->flags & SDEV_EJECTING) {
533 0 : scsi_start(link, SSS_STOP|SSS_LOEJ, 0);
534 0 : if (sc->flags & SDF_DYING) {
535 : error = ENXIO;
536 0 : goto die;
537 : }
538 0 : link->flags &= ~SDEV_EJECTING;
539 0 : }
540 :
541 0 : timeout_del(&sc->sc_timeout);
542 0 : scsi_xsh_del(&sc->sc_xsh);
543 0 : }
544 :
545 : die:
546 0 : disk_unlock(&sc->sc_dk);
547 0 : device_unref(&sc->sc_dev);
548 0 : return (error);
549 0 : }
550 :
551 : /*
552 : * Actually translate the requested transfer into one the physical driver
553 : * can understand. The transfer is described by a buf and will include
554 : * only one physical transfer.
555 : */
556 : void
557 0 : sdstrategy(struct buf *bp)
558 : {
559 : struct scsi_link *link;
560 : struct sd_softc *sc;
561 : int s;
562 :
563 0 : sc = sdlookup(DISKUNIT(bp->b_dev));
564 0 : if (sc == NULL) {
565 0 : bp->b_error = ENXIO;
566 0 : goto bad;
567 : }
568 0 : if (sc->flags & SDF_DYING) {
569 0 : bp->b_error = ENXIO;
570 0 : goto bad;
571 : }
572 0 : link = sc->sc_link;
573 :
574 : SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n",
575 : bp->b_bcount, (long long)bp->b_blkno));
576 : /*
577 : * If the device has been made invalid, error out
578 : */
579 0 : if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
580 0 : if (link->flags & SDEV_OPEN)
581 0 : bp->b_error = EIO;
582 : else
583 0 : bp->b_error = ENODEV;
584 : goto bad;
585 : }
586 :
587 : /* Validate the request. */
588 0 : if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
589 : goto done;
590 :
591 : /* Place it in the queue of disk activities for this disk. */
592 0 : bufq_queue(&sc->sc_bufq, bp);
593 :
594 : /*
595 : * Tell the device to get going on the transfer if it's
596 : * not doing anything, otherwise just wait for completion
597 : */
598 0 : scsi_xsh_add(&sc->sc_xsh);
599 :
600 0 : device_unref(&sc->sc_dev);
601 0 : return;
602 :
603 : bad:
604 0 : bp->b_flags |= B_ERROR;
605 0 : bp->b_resid = bp->b_bcount;
606 : done:
607 0 : s = splbio();
608 0 : biodone(bp);
609 0 : splx(s);
610 0 : if (sc != NULL)
611 0 : device_unref(&sc->sc_dev);
612 0 : }
613 :
614 : void
615 0 : sd_cmd_rw6(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
616 : {
617 0 : struct scsi_rw *cmd = (struct scsi_rw *)xs->cmd;
618 :
619 0 : cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND;
620 0 : _lto3b(secno, cmd->addr);
621 0 : cmd->length = nsecs;
622 :
623 0 : xs->cmdlen = sizeof(*cmd);
624 0 : }
625 :
626 : void
627 0 : sd_cmd_rw10(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
628 : {
629 0 : struct scsi_rw_big *cmd = (struct scsi_rw_big *)xs->cmd;
630 :
631 0 : cmd->opcode = read ? READ_BIG : WRITE_BIG;
632 0 : _lto4b(secno, cmd->addr);
633 0 : _lto2b(nsecs, cmd->length);
634 :
635 0 : xs->cmdlen = sizeof(*cmd);
636 0 : }
637 :
638 : void
639 0 : sd_cmd_rw12(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
640 : {
641 0 : struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)xs->cmd;
642 :
643 0 : cmd->opcode = read ? READ_12 : WRITE_12;
644 0 : _lto4b(secno, cmd->addr);
645 0 : _lto4b(nsecs, cmd->length);
646 :
647 0 : xs->cmdlen = sizeof(*cmd);
648 0 : }
649 :
650 : void
651 0 : sd_cmd_rw16(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
652 : {
653 0 : struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)xs->cmd;
654 :
655 0 : cmd->opcode = read ? READ_16 : WRITE_16;
656 0 : _lto8b(secno, cmd->addr);
657 0 : _lto4b(nsecs, cmd->length);
658 :
659 0 : xs->cmdlen = sizeof(*cmd);
660 0 : }
661 :
662 : /*
663 : * sdstart looks to see if there is a buf waiting for the device
664 : * and that the device is not already busy. If both are true,
665 : * It dequeues the buf and creates a scsi command to perform the
666 : * transfer in the buf. The transfer request will call scsi_done
667 : * on completion, which will in turn call this routine again
668 : * so that the next queued transfer is performed.
669 : * The bufs are queued by the strategy routine (sdstrategy)
670 : *
671 : * This routine is also called after other non-queued requests
672 : * have been made of the scsi driver, to ensure that the queue
673 : * continues to be drained.
674 : */
675 : void
676 0 : sdstart(struct scsi_xfer *xs)
677 : {
678 0 : struct scsi_link *link = xs->sc_link;
679 0 : struct sd_softc *sc = link->device_softc;
680 : struct buf *bp;
681 : u_int64_t secno;
682 : int nsecs;
683 : int read;
684 : struct partition *p;
685 :
686 0 : if (sc->flags & SDF_DYING) {
687 0 : scsi_xs_put(xs);
688 0 : return;
689 : }
690 0 : if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
691 0 : bufq_drain(&sc->sc_bufq);
692 0 : scsi_xs_put(xs);
693 0 : return;
694 : }
695 :
696 0 : bp = bufq_dequeue(&sc->sc_bufq);
697 0 : if (bp == NULL) {
698 0 : scsi_xs_put(xs);
699 0 : return;
700 : }
701 :
702 0 : secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);
703 :
704 0 : p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
705 0 : secno += DL_GETPOFFSET(p);
706 0 : nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
707 0 : read = bp->b_flags & B_READ;
708 :
709 : /*
710 : * Fill out the scsi command. If the transfer will
711 : * fit in a "small" cdb, use it.
712 : */
713 0 : if (!(link->flags & SDEV_ATAPI) &&
714 0 : !(link->quirks & SDEV_ONLYBIG) &&
715 0 : ((secno & 0x1fffff) == secno) &&
716 0 : ((nsecs & 0xff) == nsecs))
717 0 : sd_cmd_rw6(xs, read, secno, nsecs);
718 0 : else if (((secno & 0xffffffff) == secno) &&
719 0 : ((nsecs & 0xffff) == nsecs))
720 0 : sd_cmd_rw10(xs, read, secno, nsecs);
721 0 : else if (((secno & 0xffffffff) == secno) &&
722 : ((nsecs & 0xffffffff) == nsecs))
723 0 : sd_cmd_rw12(xs, read, secno, nsecs);
724 : else
725 0 : sd_cmd_rw16(xs, read, secno, nsecs);
726 :
727 0 : xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
728 0 : xs->timeout = 60000;
729 0 : xs->data = bp->b_data;
730 0 : xs->datalen = bp->b_bcount;
731 :
732 0 : xs->done = sd_buf_done;
733 0 : xs->cookie = bp;
734 0 : xs->bp = bp;
735 :
736 : /* Instrumentation. */
737 0 : disk_busy(&sc->sc_dk);
738 :
739 : /* Mark disk as dirty. */
740 0 : if (!read)
741 0 : sc->flags |= SDF_DIRTY;
742 :
743 0 : scsi_xs_exec(xs);
744 :
745 : /* move onto the next io */
746 0 : if (ISSET(sc->flags, SDF_WAITING))
747 0 : CLR(sc->flags, SDF_WAITING);
748 0 : else if (bufq_peek(&sc->sc_bufq))
749 0 : scsi_xsh_add(&sc->sc_xsh);
750 0 : }
751 :
752 : void
753 0 : sd_buf_done(struct scsi_xfer *xs)
754 : {
755 0 : struct sd_softc *sc = xs->sc_link->device_softc;
756 0 : struct buf *bp = xs->cookie;
757 : int error, s;
758 :
759 0 : switch (xs->error) {
760 : case XS_NOERROR:
761 0 : bp->b_error = 0;
762 0 : bp->b_resid = xs->resid;
763 0 : break;
764 :
765 : case XS_SENSE:
766 : case XS_SHORTSENSE:
767 : #ifdef SCSIDEBUG
768 : scsi_sense_print_debug(xs);
769 : #endif
770 0 : error = sd_interpret_sense(xs);
771 0 : if (error == 0) {
772 0 : bp->b_error = 0;
773 0 : bp->b_resid = xs->resid;
774 0 : break;
775 : }
776 0 : if (error != ERESTART) {
777 0 : bp->b_error = error;
778 0 : xs->retries = 0;
779 0 : }
780 : goto retry;
781 :
782 : case XS_BUSY:
783 0 : if (xs->retries) {
784 0 : if (scsi_delay(xs, 1) != ERESTART)
785 0 : xs->retries = 0;
786 : }
787 : goto retry;
788 :
789 : case XS_TIMEOUT:
790 : retry:
791 0 : if (xs->retries--) {
792 0 : scsi_xs_exec(xs);
793 0 : return;
794 : }
795 : /* FALLTHROUGH */
796 :
797 : default:
798 0 : if (bp->b_error == 0)
799 0 : bp->b_error = EIO;
800 0 : bp->b_flags |= B_ERROR;
801 0 : bp->b_resid = bp->b_bcount;
802 0 : break;
803 : }
804 :
805 0 : disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid, bp->b_blkno,
806 0 : bp->b_flags & B_READ);
807 :
808 0 : s = splbio();
809 0 : biodone(bp);
810 0 : splx(s);
811 0 : scsi_xs_put(xs);
812 0 : }
813 :
814 : void
815 0 : sdminphys(struct buf *bp)
816 : {
817 : struct scsi_link *link;
818 : struct sd_softc *sc;
819 : long max;
820 :
821 0 : sc = sdlookup(DISKUNIT(bp->b_dev));
822 0 : if (sc == NULL)
823 0 : return; /* XXX - right way to fail this? */
824 0 : if (sc->flags & SDF_DYING) {
825 0 : device_unref(&sc->sc_dev);
826 0 : return;
827 : }
828 0 : link = sc->sc_link;
829 :
830 : /*
831 : * If the device is ancient, we want to make sure that
832 : * the transfer fits into a 6-byte cdb.
833 : *
834 : * XXX Note that the SCSI-I spec says that 256-block transfers
835 : * are allowed in a 6-byte read/write, and are specified
836 : * by setting the "length" to 0. However, we're conservative
837 : * here, allowing only 255-block transfers in case an
838 : * ancient device gets confused by length == 0. A length of 0
839 : * in a 10-byte read/write actually means 0 blocks.
840 : */
841 0 : if (sc->flags & SDF_ANCIENT) {
842 0 : max = sc->sc_dk.dk_label->d_secsize * 0xff;
843 :
844 0 : if (bp->b_bcount > max)
845 0 : bp->b_bcount = max;
846 : }
847 :
848 0 : (*link->adapter->scsi_minphys)(bp, link);
849 :
850 0 : device_unref(&sc->sc_dev);
851 0 : }
852 :
853 : int
854 0 : sdread(dev_t dev, struct uio *uio, int ioflag)
855 : {
856 0 : return (physio(sdstrategy, dev, B_READ, sdminphys, uio));
857 : }
858 :
859 : int
860 0 : sdwrite(dev_t dev, struct uio *uio, int ioflag)
861 : {
862 0 : return (physio(sdstrategy, dev, B_WRITE, sdminphys, uio));
863 : }
864 :
865 : /*
866 : * Perform special action on behalf of the user
867 : * Knows about the internals of this device
868 : */
869 : int
870 0 : sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
871 : {
872 : struct scsi_link *link;
873 : struct sd_softc *sc;
874 : struct disklabel *lp;
875 : int error = 0;
876 0 : int part = DISKPART(dev);
877 :
878 0 : sc = sdlookup(DISKUNIT(dev));
879 0 : if (sc == NULL)
880 0 : return (ENXIO);
881 0 : if (sc->flags & SDF_DYING) {
882 0 : device_unref(&sc->sc_dev);
883 0 : return (ENXIO);
884 : }
885 0 : link = sc->sc_link;
886 :
887 : SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd));
888 :
889 : /*
890 : * If the device is not valid.. abandon ship
891 : */
892 0 : if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
893 0 : switch (cmd) {
894 : case DIOCLOCK:
895 : case DIOCEJECT:
896 : case SCIOCIDENTIFY:
897 : case SCIOCCOMMAND:
898 : case SCIOCDEBUG:
899 0 : if (part == RAW_PART)
900 : break;
901 : /* FALLTHROUGH */
902 : default:
903 0 : if ((link->flags & SDEV_OPEN) == 0) {
904 : error = ENODEV;
905 0 : goto exit;
906 : } else {
907 : error = EIO;
908 0 : goto exit;
909 : }
910 : }
911 : }
912 :
913 0 : switch (cmd) {
914 : case DIOCRLDINFO:
915 0 : lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
916 0 : sdgetdisklabel(dev, sc, lp, 0);
917 0 : memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp));
918 0 : free(lp, M_TEMP, sizeof(*lp));
919 0 : goto exit;
920 :
921 : case DIOCGPDINFO:
922 0 : sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
923 0 : goto exit;
924 :
925 : case DIOCGDINFO:
926 0 : *(struct disklabel *)addr = *(sc->sc_dk.dk_label);
927 0 : goto exit;
928 :
929 : case DIOCGPART:
930 0 : ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
931 0 : ((struct partinfo *)addr)->part =
932 0 : &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
933 0 : goto exit;
934 :
935 : case DIOCWDINFO:
936 : case DIOCSDINFO:
937 0 : if ((flag & FWRITE) == 0) {
938 : error = EBADF;
939 0 : goto exit;
940 : }
941 :
942 0 : if ((error = disk_lock(&sc->sc_dk)) != 0)
943 : goto exit;
944 :
945 0 : error = setdisklabel(sc->sc_dk.dk_label,
946 0 : (struct disklabel *)addr, sc->sc_dk.dk_openmask);
947 0 : if (error == 0) {
948 0 : if (cmd == DIOCWDINFO)
949 0 : error = writedisklabel(DISKLABELDEV(dev),
950 0 : sdstrategy, sc->sc_dk.dk_label);
951 : }
952 :
953 0 : disk_unlock(&sc->sc_dk);
954 0 : goto exit;
955 :
956 : case DIOCLOCK:
957 0 : error = scsi_prevent(link,
958 0 : (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0);
959 0 : goto exit;
960 :
961 : case MTIOCTOP:
962 0 : if (((struct mtop *)addr)->mt_op != MTOFFL) {
963 : error = EIO;
964 0 : goto exit;
965 : }
966 : /* FALLTHROUGH */
967 : case DIOCEJECT:
968 0 : if ((link->flags & SDEV_REMOVABLE) == 0) {
969 : error = ENOTTY;
970 0 : goto exit;
971 : }
972 0 : link->flags |= SDEV_EJECTING;
973 0 : goto exit;
974 :
975 : case DIOCINQ:
976 0 : error = scsi_do_ioctl(link, cmd, addr, flag);
977 0 : if (error == ENOTTY)
978 0 : error = sd_ioctl_inquiry(sc,
979 0 : (struct dk_inquiry *)addr);
980 : goto exit;
981 :
982 : case DIOCSCACHE:
983 0 : if (!ISSET(flag, FWRITE)) {
984 : error = EBADF;
985 0 : goto exit;
986 : }
987 : /* FALLTHROUGH */
988 : case DIOCGCACHE:
989 0 : error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr);
990 0 : goto exit;
991 :
992 : case DIOCCACHESYNC:
993 0 : if (!ISSET(flag, FWRITE)) {
994 : error = EBADF;
995 0 : goto exit;
996 : }
997 0 : if ((sc->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)
998 0 : error = sd_flush(sc, 0);
999 : goto exit;
1000 :
1001 : default:
1002 0 : if (part != RAW_PART) {
1003 : error = ENOTTY;
1004 0 : goto exit;
1005 : }
1006 0 : error = scsi_do_ioctl(link, cmd, addr, flag);
1007 0 : }
1008 :
1009 : exit:
1010 0 : device_unref(&sc->sc_dev);
1011 0 : return (error);
1012 0 : }
1013 :
1014 : int
1015 0 : sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
1016 : {
1017 : struct scsi_link *link;
1018 : struct scsi_vpd_serial *vpd;
1019 :
1020 0 : vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);
1021 :
1022 0 : if (sc->flags & SDF_DYING) {
1023 0 : dma_free(vpd, sizeof(*vpd));
1024 0 : return (ENXIO);
1025 : }
1026 0 : link = sc->sc_link;
1027 :
1028 0 : bzero(di, sizeof(struct dk_inquiry));
1029 0 : scsi_strvis(di->vendor, link->inqdata.vendor,
1030 : sizeof(link->inqdata.vendor));
1031 0 : scsi_strvis(di->product, link->inqdata.product,
1032 : sizeof(link->inqdata.product));
1033 0 : scsi_strvis(di->revision, link->inqdata.revision,
1034 : sizeof(link->inqdata.revision));
1035 :
1036 : /* the serial vpd page is optional */
1037 0 : if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0)
1038 0 : scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
1039 : else
1040 0 : strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));
1041 :
1042 0 : dma_free(vpd, sizeof(*vpd));
1043 0 : return (0);
1044 0 : }
1045 :
1046 : int
1047 0 : sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc)
1048 : {
1049 : struct scsi_link *link;
1050 : union scsi_mode_sense_buf *buf;
1051 0 : struct page_caching_mode *mode = NULL;
1052 : u_int wrcache, rdcache;
1053 0 : int big;
1054 : int rv;
1055 :
1056 0 : if (sc->flags & SDF_DYING)
1057 0 : return (ENXIO);
1058 0 : link = sc->sc_link;
1059 :
1060 0 : if (ISSET(link->flags, SDEV_UMASS))
1061 0 : return (EOPNOTSUPP);
1062 :
1063 : /* see if the adapter has special handling */
1064 0 : rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0);
1065 0 : if (rv != ENOTTY)
1066 0 : return (rv);
1067 :
1068 0 : buf = dma_alloc(sizeof(*buf), PR_WAITOK);
1069 0 : if (buf == NULL)
1070 0 : return (ENOMEM);
1071 :
1072 0 : if (sc->flags & SDF_DYING) {
1073 : rv = ENXIO;
1074 0 : goto done;
1075 : }
1076 0 : rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE,
1077 0 : buf, (void **)&mode, NULL, NULL, NULL,
1078 0 : sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big);
1079 0 : if (rv != 0)
1080 : goto done;
1081 :
1082 0 : if ((mode == NULL) || (!DISK_PGCODE(mode, PAGE_CACHING_MODE))) {
1083 : rv = EIO;
1084 0 : goto done;
1085 : }
1086 :
1087 0 : wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0);
1088 0 : rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1);
1089 :
1090 0 : switch (cmd) {
1091 : case DIOCGCACHE:
1092 0 : dkc->wrcache = wrcache;
1093 0 : dkc->rdcache = rdcache;
1094 0 : break;
1095 :
1096 : case DIOCSCACHE:
1097 0 : if (dkc->wrcache == wrcache && dkc->rdcache == rdcache)
1098 : break;
1099 :
1100 0 : if (dkc->wrcache)
1101 0 : SET(mode->flags, PG_CACHE_FL_WCE);
1102 : else
1103 0 : CLR(mode->flags, PG_CACHE_FL_WCE);
1104 :
1105 0 : if (dkc->rdcache)
1106 0 : CLR(mode->flags, PG_CACHE_FL_RCD);
1107 : else
1108 0 : SET(mode->flags, PG_CACHE_FL_RCD);
1109 :
1110 0 : if (sc->flags & SDF_DYING) {
1111 : rv = ENXIO;
1112 0 : goto done;
1113 : }
1114 0 : if (big) {
1115 0 : rv = scsi_mode_select_big(link, SMS_PF,
1116 0 : &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000);
1117 0 : } else {
1118 0 : rv = scsi_mode_select(link, SMS_PF,
1119 0 : &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000);
1120 : }
1121 : break;
1122 : }
1123 :
1124 : done:
1125 0 : dma_free(buf, sizeof(*buf));
1126 0 : return (rv);
1127 0 : }
1128 :
1129 : /*
1130 : * Load the label information on the named device
1131 : */
1132 : int
1133 0 : sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp,
1134 : int spoofonly)
1135 : {
1136 : struct scsi_link *link;
1137 : size_t len;
1138 0 : char packname[sizeof(lp->d_packname) + 1];
1139 0 : char product[17], vendor[9];
1140 :
1141 0 : if (sc->flags & SDF_DYING)
1142 0 : return (ENXIO);
1143 0 : link = sc->sc_link;
1144 :
1145 0 : bzero(lp, sizeof(struct disklabel));
1146 :
1147 0 : lp->d_secsize = sc->params.secsize;
1148 0 : lp->d_ntracks = sc->params.heads;
1149 0 : lp->d_nsectors = sc->params.sectors;
1150 0 : lp->d_ncylinders = sc->params.cyls;
1151 0 : lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1152 0 : if (lp->d_secpercyl == 0) {
1153 0 : lp->d_secpercyl = 100;
1154 : /* as long as it's not 0 - readdisklabel divides by it */
1155 0 : }
1156 :
1157 0 : lp->d_type = DTYPE_SCSI;
1158 0 : if ((link->inqdata.device & SID_TYPE) == T_OPTICAL)
1159 0 : strncpy(lp->d_typename, "SCSI optical",
1160 : sizeof(lp->d_typename));
1161 : else
1162 0 : strncpy(lp->d_typename, "SCSI disk",
1163 : sizeof(lp->d_typename));
1164 :
1165 : /*
1166 : * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit
1167 : * then leave out '<vendor> ' and use only as much of '<product>' as
1168 : * does fit.
1169 : */
1170 0 : viscpy(vendor, link->inqdata.vendor, 8);
1171 0 : viscpy(product, link->inqdata.product, 16);
1172 0 : len = snprintf(packname, sizeof(packname), "%s %s", vendor, product);
1173 0 : if (len > sizeof(lp->d_packname)) {
1174 0 : strlcpy(packname, product, sizeof(packname));
1175 0 : len = strlen(packname);
1176 0 : }
1177 : /*
1178 : * It is safe to use len as the count of characters to copy because
1179 : * packname is sizeof(lp->d_packname)+1, the string in packname is
1180 : * always null terminated and len does not count the terminating null.
1181 : * d_packname is not a null terminated string.
1182 : */
1183 0 : memcpy(lp->d_packname, packname, len);
1184 :
1185 0 : DL_SETDSIZE(lp, sc->params.disksize);
1186 0 : lp->d_version = 1;
1187 0 : lp->d_flags = 0;
1188 :
1189 : /* XXX - these values for BBSIZE and SBSIZE assume ffs */
1190 0 : lp->d_bbsize = BBSIZE;
1191 0 : lp->d_sbsize = SBSIZE;
1192 :
1193 0 : lp->d_magic = DISKMAGIC;
1194 0 : lp->d_magic2 = DISKMAGIC;
1195 0 : lp->d_checksum = dkcksum(lp);
1196 :
1197 : /*
1198 : * Call the generic disklabel extraction routine
1199 : */
1200 0 : return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly);
1201 0 : }
1202 :
1203 :
1204 : /*
1205 : * Check Errors
1206 : */
1207 : int
1208 0 : sd_interpret_sense(struct scsi_xfer *xs)
1209 : {
1210 0 : struct scsi_sense_data *sense = &xs->sense;
1211 0 : struct scsi_link *link = xs->sc_link;
1212 0 : u_int8_t serr = sense->error_code & SSD_ERRCODE;
1213 : int retval;
1214 :
1215 : /*
1216 : * Let the generic code handle everything except a few categories of
1217 : * LUN not ready errors on open devices.
1218 : */
1219 0 : if (((link->flags & SDEV_OPEN) == 0) ||
1220 0 : (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) ||
1221 0 : ((sense->flags & SSD_KEY) != SKEY_NOT_READY) ||
1222 0 : (sense->extra_len < 6))
1223 0 : return (scsi_interpret_sense(xs));
1224 :
1225 0 : if ((xs->flags & SCSI_IGNORE_NOT_READY) != 0)
1226 0 : return (0);
1227 :
1228 0 : switch (ASC_ASCQ(sense)) {
1229 : case SENSE_NOT_READY_BECOMING_READY:
1230 : SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n"));
1231 0 : retval = scsi_delay(xs, 5);
1232 0 : break;
1233 :
1234 : case SENSE_NOT_READY_INIT_REQUIRED:
1235 : SC_DEBUG(link, SDEV_DB1, ("spinning up\n"));
1236 0 : retval = scsi_start(link, SSS_START,
1237 : SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP);
1238 0 : if (retval == 0)
1239 0 : retval = ERESTART;
1240 0 : else if (retval == ENOMEM)
1241 : /* Can't issue the command. Fall back on a delay. */
1242 0 : retval = scsi_delay(xs, 5);
1243 : else
1244 : SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n",
1245 : retval));
1246 : break;
1247 :
1248 : default:
1249 0 : retval = scsi_interpret_sense(xs);
1250 0 : break;
1251 : }
1252 :
1253 0 : return (retval);
1254 0 : }
1255 :
1256 : daddr_t
1257 0 : sdsize(dev_t dev)
1258 : {
1259 : struct disklabel *lp;
1260 : struct sd_softc *sc;
1261 : int part, omask;
1262 : daddr_t size;
1263 :
1264 0 : sc = sdlookup(DISKUNIT(dev));
1265 0 : if (sc == NULL)
1266 0 : return -1;
1267 0 : if (sc->flags & SDF_DYING) {
1268 : size = -1;
1269 0 : goto exit;
1270 : }
1271 :
1272 0 : part = DISKPART(dev);
1273 0 : omask = sc->sc_dk.dk_openmask & (1 << part);
1274 :
1275 0 : if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) {
1276 : size = -1;
1277 0 : goto exit;
1278 : }
1279 :
1280 0 : lp = sc->sc_dk.dk_label;
1281 0 : if (sc->flags & SDF_DYING) {
1282 : size = -1;
1283 0 : goto exit;
1284 : }
1285 0 : if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0)
1286 0 : size = -1;
1287 0 : else if (lp->d_partitions[part].p_fstype != FS_SWAP)
1288 0 : size = -1;
1289 : else
1290 0 : size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
1291 0 : if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1292 0 : size = -1;
1293 :
1294 : exit:
1295 0 : device_unref(&sc->sc_dev);
1296 0 : return size;
1297 0 : }
1298 :
1299 : /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1300 : static int sddoingadump;
1301 :
1302 : /*
1303 : * dump all of physical memory into the partition specified, starting
1304 : * at offset 'dumplo' into the partition.
1305 : */
1306 : int
1307 0 : sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1308 : {
1309 : struct sd_softc *sc; /* disk unit to do the I/O */
1310 : struct disklabel *lp; /* disk's disklabel */
1311 : int unit, part;
1312 : u_int32_t sectorsize; /* size of a disk sector */
1313 : u_int64_t nsects; /* number of sectors in partition */
1314 : u_int64_t sectoff; /* sector offset of partition */
1315 : u_int64_t totwrt; /* total number of sectors left to write */
1316 : u_int32_t nwrt; /* current number of sectors to write */
1317 : struct scsi_xfer *xs; /* ... convenience */
1318 : int rv;
1319 :
1320 : /* Check if recursive dump; if so, punt. */
1321 0 : if (sddoingadump)
1322 0 : return EFAULT;
1323 0 : if (blkno < 0)
1324 0 : return EINVAL;
1325 :
1326 : /* Mark as active early. */
1327 0 : sddoingadump = 1;
1328 :
1329 0 : unit = DISKUNIT(dev); /* Decompose unit & partition. */
1330 0 : part = DISKPART(dev);
1331 :
1332 : /* Check for acceptable drive number. */
1333 0 : if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL)
1334 0 : return ENXIO;
1335 :
1336 : /*
1337 : * XXX Can't do this check, since the media might have been
1338 : * XXX marked `invalid' by successful unmounting of all
1339 : * XXX filesystems.
1340 : */
1341 : #if 0
1342 : /* Make sure it was initialized. */
1343 : if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) != SDEV_MEDIA_LOADED)
1344 : return ENXIO;
1345 : #endif
1346 :
1347 : /* Convert to disk sectors. Request must be a multiple of size. */
1348 0 : lp = sc->sc_dk.dk_label;
1349 0 : sectorsize = lp->d_secsize;
1350 0 : if ((size % sectorsize) != 0)
1351 0 : return EFAULT;
1352 0 : if ((blkno % DL_BLKSPERSEC(lp)) != 0)
1353 0 : return EFAULT;
1354 0 : totwrt = size / sectorsize;
1355 0 : blkno = DL_BLKTOSEC(lp, blkno);
1356 :
1357 0 : nsects = DL_GETPSIZE(&lp->d_partitions[part]);
1358 0 : sectoff = DL_GETPOFFSET(&lp->d_partitions[part]);
1359 :
1360 : /* Check transfer bounds against partition size. */
1361 0 : if ((blkno + totwrt) > nsects)
1362 0 : return EINVAL;
1363 :
1364 : /* Offset block number to start of partition. */
1365 0 : blkno += sectoff;
1366 :
1367 0 : while (totwrt > 0) {
1368 0 : if (totwrt > UINT32_MAX)
1369 0 : nwrt = UINT32_MAX;
1370 : else
1371 0 : nwrt = totwrt;
1372 :
1373 : #ifndef SD_DUMP_NOT_TRUSTED
1374 0 : xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP);
1375 0 : if (xs == NULL)
1376 0 : return (ENOMEM);
1377 :
1378 0 : xs->timeout = 10000;
1379 0 : xs->flags |= SCSI_DATA_OUT;
1380 0 : xs->data = va;
1381 0 : xs->datalen = nwrt * sectorsize;
1382 :
1383 0 : sd_cmd_rw10(xs, 0, blkno, nwrt); /* XXX */
1384 :
1385 0 : rv = scsi_xs_sync(xs);
1386 0 : scsi_xs_put(xs);
1387 0 : if (rv != 0)
1388 0 : return (ENXIO);
1389 : #else /* SD_DUMP_NOT_TRUSTED */
1390 : /* Let's just talk about this first... */
1391 : printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va,
1392 : (long long)blkno);
1393 : delay(500 * 1000); /* half a second */
1394 : #endif /* SD_DUMP_NOT_TRUSTED */
1395 :
1396 : /* update block count */
1397 0 : totwrt -= nwrt;
1398 0 : blkno += nwrt;
1399 0 : va += sectorsize * nwrt;
1400 : }
1401 :
1402 0 : sddoingadump = 0;
1403 :
1404 0 : return (0);
1405 0 : }
1406 :
1407 : /*
1408 : * Copy up to len chars from src to dst, ignoring non-printables.
1409 : * Must be room for len+1 chars in dst so we can write the NUL.
1410 : * Does not assume src is NUL-terminated.
1411 : */
1412 : void
1413 0 : viscpy(u_char *dst, u_char *src, int len)
1414 : {
1415 0 : while (len > 0 && *src != '\0') {
1416 0 : if (*src < 0x20 || *src >= 0x80) {
1417 0 : src++;
1418 0 : continue;
1419 : }
1420 0 : *dst++ = *src++;
1421 0 : len--;
1422 : }
1423 0 : *dst = '\0';
1424 0 : }
1425 :
1426 : int
1427 0 : sd_read_cap_10(struct sd_softc *sc, int flags)
1428 : {
1429 0 : struct scsi_read_capacity cdb;
1430 : struct scsi_read_cap_data *rdcap;
1431 : struct scsi_xfer *xs;
1432 : int rv = ENOMEM;
1433 :
1434 0 : CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1435 :
1436 0 : rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1437 0 : PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1438 0 : if (rdcap == NULL)
1439 0 : return (ENOMEM);
1440 :
1441 0 : if (sc->flags & SDF_DYING) {
1442 : rv = ENXIO;
1443 0 : goto done;
1444 : }
1445 0 : xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
1446 0 : if (xs == NULL)
1447 : goto done;
1448 :
1449 0 : bzero(&cdb, sizeof(cdb));
1450 : cdb.opcode = READ_CAPACITY;
1451 :
1452 0 : memcpy(xs->cmd, &cdb, sizeof(cdb));
1453 0 : xs->cmdlen = sizeof(cdb);
1454 0 : xs->data = (void *)rdcap;
1455 0 : xs->datalen = sizeof(*rdcap);
1456 0 : xs->timeout = 20000;
1457 :
1458 0 : rv = scsi_xs_sync(xs);
1459 0 : scsi_xs_put(xs);
1460 :
1461 0 : if (rv == 0) {
1462 0 : sc->params.disksize = _4btol(rdcap->addr) + 1ll;
1463 0 : sc->params.secsize = _4btol(rdcap->length);
1464 0 : CLR(sc->flags, SDF_THIN);
1465 0 : }
1466 :
1467 : done:
1468 0 : dma_free(rdcap, sizeof(*rdcap));
1469 0 : return (rv);
1470 0 : }
1471 :
1472 : int
1473 0 : sd_read_cap_16(struct sd_softc *sc, int flags)
1474 : {
1475 0 : struct scsi_read_capacity_16 cdb;
1476 : struct scsi_read_cap_data_16 *rdcap;
1477 : struct scsi_xfer *xs;
1478 : int rv = ENOMEM;
1479 :
1480 0 : CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1481 :
1482 0 : rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1483 0 : PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1484 0 : if (rdcap == NULL)
1485 0 : return (ENOMEM);
1486 :
1487 0 : if (sc->flags & SDF_DYING) {
1488 : rv = ENXIO;
1489 0 : goto done;
1490 : }
1491 0 : xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
1492 0 : if (xs == NULL)
1493 : goto done;
1494 :
1495 0 : bzero(&cdb, sizeof(cdb));
1496 0 : cdb.opcode = READ_CAPACITY_16;
1497 0 : cdb.byte2 = SRC16_SERVICE_ACTION;
1498 0 : _lto4b(sizeof(*rdcap), cdb.length);
1499 :
1500 0 : memcpy(xs->cmd, &cdb, sizeof(cdb));
1501 0 : xs->cmdlen = sizeof(cdb);
1502 0 : xs->data = (void *)rdcap;
1503 0 : xs->datalen = sizeof(*rdcap);
1504 0 : xs->timeout = 20000;
1505 :
1506 0 : rv = scsi_xs_sync(xs);
1507 0 : scsi_xs_put(xs);
1508 :
1509 0 : if (rv == 0) {
1510 0 : if (_8btol(rdcap->addr) == 0) {
1511 : rv = EIO;
1512 0 : goto done;
1513 : }
1514 :
1515 0 : sc->params.disksize = _8btol(rdcap->addr) + 1;
1516 0 : sc->params.secsize = _4btol(rdcap->length);
1517 0 : if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
1518 0 : SET(sc->flags, SDF_THIN);
1519 : else
1520 0 : CLR(sc->flags, SDF_THIN);
1521 : }
1522 :
1523 : done:
1524 0 : dma_free(rdcap, sizeof(*rdcap));
1525 0 : return (rv);
1526 0 : }
1527 :
1528 : int
1529 0 : sd_size(struct sd_softc *sc, int flags)
1530 : {
1531 : int rv;
1532 :
1533 0 : if (sc->flags & SDF_DYING)
1534 0 : return (ENXIO);
1535 0 : if (SCSISPC(sc->sc_link->inqdata.version) >= 3) {
1536 0 : rv = sd_read_cap_16(sc, flags);
1537 0 : if (rv != 0)
1538 0 : rv = sd_read_cap_10(sc, flags);
1539 : } else {
1540 0 : rv = sd_read_cap_10(sc, flags);
1541 0 : if (rv == 0 && sc->params.disksize == 0x100000000ll)
1542 0 : rv = sd_read_cap_16(sc, flags);
1543 : }
1544 :
1545 0 : return (rv);
1546 0 : }
1547 :
1548 : int
1549 0 : sd_thin_pages(struct sd_softc *sc, int flags)
1550 : {
1551 : struct scsi_vpd_hdr *pg;
1552 : size_t len = 0;
1553 : u_int8_t *pages;
1554 : int i, score = 0;
1555 : int rv;
1556 :
1557 0 : pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1558 0 : PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1559 0 : if (pg == NULL)
1560 0 : return (ENOMEM);
1561 :
1562 0 : if (sc->flags & SDF_DYING) {
1563 : rv = ENXIO;
1564 0 : goto done;
1565 : }
1566 0 : rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1567 : SI_PG_SUPPORTED, flags);
1568 0 : if (rv != 0)
1569 : goto done;
1570 :
1571 0 : len = _2btol(pg->page_length);
1572 :
1573 0 : dma_free(pg, sizeof(*pg));
1574 0 : pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
1575 : PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1576 0 : if (pg == NULL)
1577 0 : return (ENOMEM);
1578 :
1579 0 : if (sc->flags & SDF_DYING) {
1580 : rv = ENXIO;
1581 0 : goto done;
1582 : }
1583 0 : rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
1584 : SI_PG_SUPPORTED, flags);
1585 0 : if (rv != 0)
1586 : goto done;
1587 :
1588 0 : pages = (u_int8_t *)(pg + 1);
1589 0 : if (pages[0] != SI_PG_SUPPORTED) {
1590 : rv = EIO;
1591 0 : goto done;
1592 : }
1593 :
1594 0 : for (i = 1; i < len; i++) {
1595 0 : switch (pages[i]) {
1596 : case SI_PG_DISK_LIMITS:
1597 : case SI_PG_DISK_THIN:
1598 0 : score++;
1599 0 : break;
1600 : }
1601 : }
1602 :
1603 0 : if (score < 2)
1604 0 : rv = EOPNOTSUPP;
1605 :
1606 : done:
1607 0 : dma_free(pg, sizeof(*pg) + len);
1608 0 : return (rv);
1609 0 : }
1610 :
1611 : int
1612 0 : sd_vpd_block_limits(struct sd_softc *sc, int flags)
1613 : {
1614 : struct scsi_vpd_disk_limits *pg;
1615 : int rv;
1616 :
1617 0 : pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1618 0 : PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1619 0 : if (pg == NULL)
1620 0 : return (ENOMEM);
1621 :
1622 0 : if (sc->flags & SDF_DYING) {
1623 : rv = ENXIO;
1624 0 : goto done;
1625 : }
1626 0 : rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1627 : SI_PG_DISK_LIMITS, flags);
1628 0 : if (rv != 0)
1629 : goto done;
1630 :
1631 0 : if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
1632 0 : sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
1633 0 : sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
1634 0 : } else
1635 : rv = EOPNOTSUPP;
1636 :
1637 : done:
1638 0 : dma_free(pg, sizeof(*pg));
1639 0 : return (rv);
1640 0 : }
1641 :
1642 : int
1643 0 : sd_vpd_thin(struct sd_softc *sc, int flags)
1644 : {
1645 : struct scsi_vpd_disk_thin *pg;
1646 : int rv;
1647 :
1648 0 : pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1649 0 : PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1650 0 : if (pg == NULL)
1651 0 : return (ENOMEM);
1652 :
1653 0 : if (sc->flags & SDF_DYING) {
1654 : rv = ENXIO;
1655 0 : goto done;
1656 : }
1657 0 : rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1658 : SI_PG_DISK_THIN, flags);
1659 : if (rv != 0)
1660 0 : goto done;
1661 :
1662 : #ifdef notyet
1663 : if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
1664 : sc->sc_delete = sd_unmap;
1665 : else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
1666 : sc->sc_delete = sd_write_same_16;
1667 : sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
1668 : } else
1669 : rv = EOPNOTSUPP;
1670 : #endif
1671 :
1672 : done:
1673 0 : dma_free(pg, sizeof(*pg));
1674 0 : return (rv);
1675 0 : }
1676 :
1677 : int
1678 0 : sd_thin_params(struct sd_softc *sc, int flags)
1679 : {
1680 : int rv;
1681 :
1682 0 : rv = sd_thin_pages(sc, flags);
1683 0 : if (rv != 0)
1684 0 : return (rv);
1685 :
1686 0 : rv = sd_vpd_block_limits(sc, flags);
1687 0 : if (rv != 0)
1688 0 : return (rv);
1689 :
1690 0 : rv = sd_vpd_thin(sc, flags);
1691 0 : if (rv != 0)
1692 0 : return (rv);
1693 :
1694 0 : return (0);
1695 0 : }
1696 :
1697 : /*
1698 : * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the
1699 : * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller
1700 : * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure
1701 : * cannot be completed.
1702 : */
1703 : int
1704 0 : sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags)
1705 : {
1706 : struct scsi_link *link;
1707 : union scsi_mode_sense_buf *buf = NULL;
1708 0 : struct page_rigid_geometry *rigid = NULL;
1709 0 : struct page_flex_geometry *flex = NULL;
1710 0 : struct page_reduced_geometry *reduced = NULL;
1711 0 : u_char *page0 = NULL;
1712 0 : u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0;
1713 0 : int err = 0, big;
1714 :
1715 0 : if (sd_size(sc, flags) != 0)
1716 0 : return (SDGP_RESULT_OFFLINE);
1717 :
1718 0 : if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) {
1719 : /* we dont know the unmap limits, so we cant use thin shizz */
1720 0 : CLR(sc->flags, SDF_THIN);
1721 0 : }
1722 :
1723 0 : buf = dma_alloc(sizeof(*buf), PR_NOWAIT);
1724 0 : if (buf == NULL)
1725 : goto validate;
1726 :
1727 0 : if (sc->flags & SDF_DYING)
1728 : goto die;
1729 0 : link = sc->sc_link;
1730 :
1731 : /*
1732 : * Ask for page 0 (vendor specific) mode sense data to find
1733 : * READONLY info. The only thing USB devices will ask for.
1734 : */
1735 0 : err = scsi_do_mode_sense(link, 0, buf, (void **)&page0,
1736 0 : NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big);
1737 0 : if (sc->flags & SDF_DYING)
1738 : goto die;
1739 0 : if (err == 0) {
1740 0 : if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT)
1741 0 : SET(link->flags, SDEV_READONLY);
1742 0 : else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT)
1743 0 : SET(link->flags, SDEV_READONLY);
1744 : else
1745 0 : CLR(link->flags, SDEV_READONLY);
1746 : }
1747 :
1748 : /*
1749 : * Many UMASS devices choke when asked about their geometry. Most
1750 : * don't have a meaningful geometry anyway, so just fake it if
1751 : * scsi_size() worked.
1752 : */
1753 0 : if ((link->flags & SDEV_UMASS) && (dp->disksize > 0))
1754 : goto validate;
1755 :
1756 0 : switch (link->inqdata.device & SID_TYPE) {
1757 : case T_OPTICAL:
1758 : /* No more information needed or available. */
1759 : break;
1760 :
1761 : case T_RDIRECT:
1762 : /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */
1763 0 : err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY,
1764 0 : buf, (void **)&reduced, NULL, NULL, &secsize,
1765 : sizeof(*reduced), flags | SCSI_SILENT, NULL);
1766 0 : if (!err && reduced &&
1767 0 : DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) {
1768 0 : if (dp->disksize == 0)
1769 0 : dp->disksize = _5btol(reduced->sectors);
1770 0 : if (secsize == 0)
1771 0 : secsize = _2btol(reduced->bytes_s);
1772 : }
1773 : break;
1774 :
1775 : default:
1776 : /*
1777 : * NOTE: Some devices leave off the last four bytes of
1778 : * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages.
1779 : * The only information in those four bytes is RPM information
1780 : * so accept the page. The extra bytes will be zero and RPM will
1781 : * end up with the default value of 3600.
1782 : */
1783 0 : if (((link->flags & SDEV_ATAPI) == 0) ||
1784 0 : ((link->flags & SDEV_REMOVABLE) == 0))
1785 0 : err = scsi_do_mode_sense(link,
1786 0 : PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL,
1787 : NULL, &secsize, sizeof(*rigid) - 4,
1788 : flags | SCSI_SILENT, NULL);
1789 0 : if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) {
1790 0 : heads = rigid->nheads;
1791 0 : cyls = _3btol(rigid->ncyl);
1792 0 : if (heads * cyls > 0)
1793 0 : sectors = dp->disksize / (heads * cyls);
1794 : } else {
1795 0 : if (sc->flags & SDF_DYING)
1796 : goto die;
1797 0 : err = scsi_do_mode_sense(link,
1798 0 : PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL,
1799 : &secsize, sizeof(*flex) - 4,
1800 : flags | SCSI_SILENT, NULL);
1801 0 : if (!err && flex &&
1802 0 : DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) {
1803 0 : sectors = flex->ph_sec_tr;
1804 0 : heads = flex->nheads;
1805 0 : cyls = _2btol(flex->ncyl);
1806 0 : if (secsize == 0)
1807 0 : secsize = _2btol(flex->bytes_s);
1808 0 : if (dp->disksize == 0)
1809 0 : dp->disksize = heads * cyls * sectors;
1810 : }
1811 : }
1812 : break;
1813 : }
1814 :
1815 : validate:
1816 0 : if (buf)
1817 0 : dma_free(buf, sizeof(*buf));
1818 :
1819 0 : if (dp->disksize == 0)
1820 0 : return (SDGP_RESULT_OFFLINE);
1821 :
1822 0 : if (dp->secsize == 0)
1823 0 : dp->secsize = (secsize == 0) ? 512 : secsize;
1824 :
1825 : /*
1826 : * Restrict secsize values to powers of two between 512 and 64k.
1827 : */
1828 0 : switch (dp->secsize) {
1829 : case 0x200: /* == 512, == DEV_BSIZE on all architectures. */
1830 : case 0x400:
1831 : case 0x800:
1832 : case 0x1000:
1833 : case 0x2000:
1834 : case 0x4000:
1835 : case 0x8000:
1836 : case 0x10000:
1837 : break;
1838 : default:
1839 : SC_DEBUG(sc->sc_link, SDEV_DB1,
1840 : ("sd_get_parms: bad secsize: %#lx\n", dp->secsize));
1841 0 : return (SDGP_RESULT_OFFLINE);
1842 : }
1843 :
1844 : /*
1845 : * XXX THINK ABOUT THIS!! Using values such that sectors * heads *
1846 : * cyls is <= disk_size can lead to wasted space. We need a more
1847 : * careful calculation/validation to make everything work out
1848 : * optimally.
1849 : */
1850 0 : if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) {
1851 0 : dp->heads = 511;
1852 0 : dp->sectors = 255;
1853 : cyls = 0;
1854 0 : } else {
1855 : /*
1856 : * Use standard geometry values for anything we still don't
1857 : * know.
1858 : */
1859 0 : dp->heads = (heads == 0) ? 255 : heads;
1860 0 : dp->sectors = (sectors == 0) ? 63 : sectors;
1861 : }
1862 :
1863 0 : dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) :
1864 0 : cyls;
1865 :
1866 0 : if (dp->cyls == 0) {
1867 0 : dp->heads = dp->cyls = 1;
1868 0 : dp->sectors = dp->disksize;
1869 0 : }
1870 :
1871 0 : return (SDGP_RESULT_OK);
1872 :
1873 : die:
1874 0 : dma_free(buf, sizeof(*buf));
1875 0 : return (SDGP_RESULT_OFFLINE);
1876 0 : }
1877 :
1878 : int
1879 0 : sd_flush(struct sd_softc *sc, int flags)
1880 : {
1881 : struct scsi_link *link;
1882 : struct scsi_xfer *xs;
1883 : struct scsi_synchronize_cache *cmd;
1884 : int error;
1885 :
1886 0 : if (sc->flags & SDF_DYING)
1887 0 : return (ENXIO);
1888 0 : link = sc->sc_link;
1889 :
1890 0 : if (link->quirks & SDEV_NOSYNCCACHE)
1891 0 : return (0);
1892 :
1893 : /*
1894 : * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining
1895 : * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event
1896 : * that the command is not supported by the device.
1897 : */
1898 :
1899 0 : xs = scsi_xs_get(link, flags);
1900 0 : if (xs == NULL) {
1901 : SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n"));
1902 0 : return (EIO);
1903 : }
1904 :
1905 0 : cmd = (struct scsi_synchronize_cache *)xs->cmd;
1906 0 : cmd->opcode = SYNCHRONIZE_CACHE;
1907 :
1908 0 : xs->cmdlen = sizeof(*cmd);
1909 0 : xs->timeout = 100000;
1910 0 : xs->flags |= SCSI_IGNORE_ILLEGAL_REQUEST;
1911 :
1912 0 : error = scsi_xs_sync(xs);
1913 :
1914 0 : scsi_xs_put(xs);
1915 :
1916 0 : if (error)
1917 : SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n"));
1918 : else
1919 0 : sc->flags &= ~SDF_DIRTY;
1920 :
1921 0 : return (error);
1922 0 : }
|