LCOV - code coverage report
Current view: top level - dev/ic - nvme.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 810 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 48 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*      $OpenBSD: nvme.c,v 1.61 2018/01/10 15:45:46 jcs Exp $ */
       2             : 
       3             : /*
       4             :  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
       5             :  *
       6             :  * Permission to use, copy, modify, and distribute this software for any
       7             :  * purpose with or without fee is hereby granted, provided that the above
       8             :  * copyright notice and this permission notice appear in all copies.
       9             :  *
      10             :  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
      11             :  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
      12             :  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
      13             :  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
      14             :  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
      15             :  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
      16             :  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
      17             :  */
      18             : 
      19             : #include <sys/param.h>
      20             : #include <sys/systm.h>
      21             : #include <sys/buf.h>
      22             : #include <sys/kernel.h>
      23             : #include <sys/malloc.h>
      24             : #include <sys/device.h>
      25             : #include <sys/queue.h>
      26             : #include <sys/mutex.h>
      27             : #include <sys/pool.h>
      28             : 
      29             : #include <sys/atomic.h>
      30             : 
      31             : #include <machine/bus.h>
      32             : 
      33             : #include <scsi/scsi_all.h>
      34             : #include <scsi/scsi_disk.h>
      35             : #include <scsi/scsiconf.h>
      36             : 
      37             : #include <dev/ic/nvmereg.h>
      38             : #include <dev/ic/nvmevar.h>
      39             : 
      40             : struct cfdriver nvme_cd = {
      41             :         NULL,
      42             :         "nvme",
      43             :         DV_DULL
      44             : };
      45             : 
      46             : int     nvme_ready(struct nvme_softc *, u_int32_t);
      47             : int     nvme_enable(struct nvme_softc *, u_int);
      48             : int     nvme_disable(struct nvme_softc *);
      49             : int     nvme_shutdown(struct nvme_softc *);
      50             : int     nvme_resume(struct nvme_softc *);
      51             : 
      52             : void    nvme_dumpregs(struct nvme_softc *);
      53             : int     nvme_identify(struct nvme_softc *, u_int);
      54             : void    nvme_fill_identify(struct nvme_softc *, struct nvme_ccb *, void *);
      55             : 
      56             : int     nvme_ccbs_alloc(struct nvme_softc *, u_int);
      57             : void    nvme_ccbs_free(struct nvme_softc *);
      58             : 
      59             : void *  nvme_ccb_get(void *);
      60             : void    nvme_ccb_put(void *, void *);
      61             : 
      62             : int     nvme_poll(struct nvme_softc *, struct nvme_queue *, struct nvme_ccb *,
      63             :             void (*)(struct nvme_softc *, struct nvme_ccb *, void *));
      64             : void    nvme_poll_fill(struct nvme_softc *, struct nvme_ccb *, void *);
      65             : void    nvme_poll_done(struct nvme_softc *, struct nvme_ccb *,
      66             :             struct nvme_cqe *);
      67             : void    nvme_sqe_fill(struct nvme_softc *, struct nvme_ccb *, void *);
      68             : void    nvme_empty_done(struct nvme_softc *, struct nvme_ccb *,
      69             :             struct nvme_cqe *);
      70             : 
      71             : struct nvme_queue *
      72             :         nvme_q_alloc(struct nvme_softc *, u_int16_t, u_int, u_int);
      73             : int     nvme_q_create(struct nvme_softc *, struct nvme_queue *);
      74             : int     nvme_q_reset(struct nvme_softc *, struct nvme_queue *);
      75             : int     nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
      76             : void    nvme_q_submit(struct nvme_softc *,
      77             :             struct nvme_queue *, struct nvme_ccb *,
      78             :             void (*)(struct nvme_softc *, struct nvme_ccb *, void *));
      79             : int     nvme_q_complete(struct nvme_softc *, struct nvme_queue *);
      80             : void    nvme_q_free(struct nvme_softc *, struct nvme_queue *);
      81             : 
      82             : struct nvme_dmamem *
      83             :         nvme_dmamem_alloc(struct nvme_softc *, size_t);
      84             : void    nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
      85             : void    nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *, int);
      86             : 
      87             : void    nvme_scsi_cmd(struct scsi_xfer *);
      88             : int     nvme_scsi_probe(struct scsi_link *);
      89             : void    nvme_scsi_free(struct scsi_link *);
      90             : 
      91             : #ifdef HIBERNATE
      92             : #include <uvm/uvm_extern.h>
      93             : #include <sys/hibernate.h>
      94             : #include <sys/disk.h>
      95             : #include <sys/disklabel.h>
      96             : 
      97             : int     nvme_hibernate_io(dev_t, daddr_t, vaddr_t, size_t, int, void *);
      98             : #endif
      99             : 
     100             : struct scsi_adapter nvme_switch = {
     101             :         nvme_scsi_cmd,          /* cmd */
     102             :         scsi_minphys,           /* minphys */
     103             :         nvme_scsi_probe,        /* dev probe */
     104             :         nvme_scsi_free,         /* dev free */
     105             :         NULL,                   /* ioctl */
     106             : };
     107             : 
     108             : void    nvme_scsi_io(struct scsi_xfer *, int);
     109             : void    nvme_scsi_io_fill(struct nvme_softc *, struct nvme_ccb *, void *);
     110             : void    nvme_scsi_io_done(struct nvme_softc *, struct nvme_ccb *,
     111             :             struct nvme_cqe *);
     112             : 
     113             : void    nvme_scsi_sync(struct scsi_xfer *);
     114             : void    nvme_scsi_sync_fill(struct nvme_softc *, struct nvme_ccb *, void *);
     115             : void    nvme_scsi_sync_done(struct nvme_softc *, struct nvme_ccb *,
     116             :             struct nvme_cqe *);
     117             : 
     118             : void    nvme_scsi_inq(struct scsi_xfer *);
     119             : void    nvme_scsi_inquiry(struct scsi_xfer *);
     120             : void    nvme_scsi_capacity16(struct scsi_xfer *);
     121             : void    nvme_scsi_capacity(struct scsi_xfer *);
     122             : 
     123             : #define nvme_read4(_s, _r) \
     124             :         bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
     125             : #define nvme_write4(_s, _r, _v) \
     126             :         bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
     127             : /*
     128             :  * Some controllers, at least Apple NVMe, always require split
     129             :  * transfers, so don't use bus_space_{read,write}_8() on LP64.
     130             :  */
     131             : static inline u_int64_t
     132           0 : nvme_read8(struct nvme_softc *sc, bus_size_t r)
     133             : {
     134             :         u_int64_t v;
     135             :         u_int32_t *a = (u_int32_t *)&v;
     136             : 
     137             : #if _BYTE_ORDER == _LITTLE_ENDIAN
     138           0 :         a[0] = nvme_read4(sc, r);
     139           0 :         a[1] = nvme_read4(sc, r + 4);
     140             : #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
     141             :         a[1] = nvme_read4(sc, r);
     142             :         a[0] = nvme_read4(sc, r + 4);
     143             : #endif
     144             : 
     145           0 :         return (v);
     146           0 : }
     147             : 
     148             : static inline void
     149           0 : nvme_write8(struct nvme_softc *sc, bus_size_t r, u_int64_t v)
     150             : {
     151             :         u_int32_t *a = (u_int32_t *)&v;
     152             : 
     153             : #if _BYTE_ORDER == _LITTLE_ENDIAN
     154           0 :         nvme_write4(sc, r, a[0]);
     155           0 :         nvme_write4(sc, r + 4, a[1]);
     156             : #else /* _BYTE_ORDER == _LITTLE_ENDIAN */
     157             :         nvme_write4(sc, r, a[1]);
     158             :         nvme_write4(sc, r + 4, a[0]);
     159             : #endif
     160           0 : }
     161             : #define nvme_barrier(_s, _r, _l, _f) \
     162             :         bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
     163             : 
     164             : void
     165           0 : nvme_dumpregs(struct nvme_softc *sc)
     166             : {
     167             :         u_int64_t r8;
     168             :         u_int32_t r4;
     169             : 
     170           0 :         r8 = nvme_read8(sc, NVME_CAP);
     171           0 :         printf("%s: cap  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
     172           0 :         printf("%s:  mpsmax %u (%u)\n", DEVNAME(sc),
     173           0 :             (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
     174           0 :         printf("%s:  mpsmin %u (%u)\n", DEVNAME(sc),
     175           0 :             (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
     176           0 :         printf("%s:  css %llu\n", DEVNAME(sc), NVME_CAP_CSS(r8));
     177           0 :         printf("%s:  nssrs %llu\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
     178           0 :         printf("%s:  dstrd %u\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
     179           0 :         printf("%s:  to %llu msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
     180           0 :         printf("%s:  ams %llu\n", DEVNAME(sc), NVME_CAP_AMS(r8));
     181           0 :         printf("%s:  cqr %llu\n", DEVNAME(sc), NVME_CAP_CQR(r8));
     182           0 :         printf("%s:  mqes %llu\n", DEVNAME(sc), NVME_CAP_MQES(r8));
     183             : 
     184           0 :         printf("%s: vs   0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
     185             : 
     186           0 :         r4 = nvme_read4(sc, NVME_CC);
     187           0 :         printf("%s: cc   0x%04x\n", DEVNAME(sc), r4);
     188           0 :         printf("%s:  iocqes %u\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4));
     189           0 :         printf("%s:  iosqes %u\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4));
     190           0 :         printf("%s:  shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
     191           0 :         printf("%s:  ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
     192           0 :         printf("%s:  mps %u\n", DEVNAME(sc), NVME_CC_MPS_R(r4));
     193           0 :         printf("%s:  css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
     194           0 :         printf("%s:  en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN));
     195             : 
     196           0 :         printf("%s: csts 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_CSTS));
     197           0 :         printf("%s: aqa  0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_AQA));
     198           0 :         printf("%s: asq  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
     199           0 :         printf("%s: acq  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
     200           0 : }
     201             : 
     202             : int
     203           0 : nvme_ready(struct nvme_softc *sc, u_int32_t rdy)
     204             : {
     205             :         u_int i = 0;
     206             : 
     207           0 :         while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
     208           0 :                 if (i++ > sc->sc_rdy_to)
     209           0 :                         return (1);
     210             : 
     211           0 :                 delay(1000);
     212           0 :                 nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
     213             :         }
     214             : 
     215           0 :         return (0);
     216           0 : }
     217             : 
     218             : int
     219           0 : nvme_enable(struct nvme_softc *sc, u_int mps)
     220             : {
     221             :         u_int32_t cc;
     222             : 
     223           0 :         cc = nvme_read4(sc, NVME_CC);
     224           0 :         if (ISSET(cc, NVME_CC_EN))
     225           0 :                 return (nvme_ready(sc, NVME_CSTS_RDY));
     226             : 
     227           0 :         nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
     228             :             NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
     229           0 :         nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
     230             : 
     231           0 :         nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
     232           0 :         nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
     233           0 :         nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
     234           0 :         nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
     235             : 
     236           0 :         CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
     237             :             NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
     238           0 :         SET(cc, NVME_CC_IOSQES(ffs(64) - 1) | NVME_CC_IOCQES(ffs(16) - 1));
     239             :         SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
     240             :         SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
     241             :         SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
     242           0 :         SET(cc, NVME_CC_MPS(mps));
     243           0 :         SET(cc, NVME_CC_EN);
     244             : 
     245           0 :         nvme_write4(sc, NVME_CC, cc);
     246           0 :         nvme_barrier(sc, 0, sc->sc_ios,
     247             :             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
     248             : 
     249           0 :         return (nvme_ready(sc, NVME_CSTS_RDY));
     250           0 : }
     251             : 
     252             : int
     253           0 : nvme_disable(struct nvme_softc *sc)
     254             : {
     255             :         u_int32_t cc, csts;
     256             : 
     257           0 :         cc = nvme_read4(sc, NVME_CC);
     258           0 :         if (ISSET(cc, NVME_CC_EN)) {
     259           0 :                 csts = nvme_read4(sc, NVME_CSTS);
     260           0 :                 if (!ISSET(csts, NVME_CSTS_CFS) &&
     261           0 :                     nvme_ready(sc, NVME_CSTS_RDY) != 0)
     262           0 :                         return (1);
     263             :         }
     264             : 
     265           0 :         CLR(cc, NVME_CC_EN);
     266             : 
     267           0 :         nvme_write4(sc, NVME_CC, cc);
     268           0 :         nvme_barrier(sc, 0, sc->sc_ios,
     269             :             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
     270             : 
     271           0 :         return (nvme_ready(sc, 0));
     272           0 : }
     273             : 
     274             : int
     275           0 : nvme_attach(struct nvme_softc *sc)
     276             : {
     277           0 :         struct scsibus_attach_args saa;
     278             :         u_int64_t cap;
     279             :         u_int32_t reg;
     280             :         u_int mps = PAGE_SHIFT;
     281             : 
     282           0 :         mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
     283           0 :         SIMPLEQ_INIT(&sc->sc_ccb_list);
     284           0 :         scsi_iopool_init(&sc->sc_iopool, sc, nvme_ccb_get, nvme_ccb_put);
     285             : 
     286           0 :         reg = nvme_read4(sc, NVME_VS);
     287           0 :         if (reg == 0xffffffff) {
     288           0 :                 printf(", invalid mapping\n");
     289           0 :                 return (1);
     290             :         }
     291             : 
     292           0 :         printf(", NVMe %d.%d\n", NVME_VS_MJR(reg), NVME_VS_MNR(reg));
     293             : 
     294           0 :         cap = nvme_read8(sc, NVME_CAP);
     295           0 :         sc->sc_dstrd = NVME_CAP_DSTRD(cap);
     296           0 :         if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
     297           0 :                 printf("%s: NVMe minimum page size %u "
     298           0 :                     "is greater than CPU page size %u\n", DEVNAME(sc),
     299           0 :                     1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
     300           0 :                 return (1);
     301             :         }
     302           0 :         if (NVME_CAP_MPSMAX(cap) < mps)
     303           0 :                 mps = NVME_CAP_MPSMAX(cap);
     304             : 
     305           0 :         sc->sc_rdy_to = NVME_CAP_TO(cap);
     306           0 :         sc->sc_mps = 1 << mps;
     307           0 :         sc->sc_mps_bits = mps;
     308           0 :         sc->sc_mdts = MAXPHYS;
     309           0 :         sc->sc_max_sgl = 2;
     310             : 
     311           0 :         if (nvme_disable(sc) != 0) {
     312           0 :                 printf("%s: unable to disable controller\n", DEVNAME(sc));
     313           0 :                 return (1);
     314             :         }
     315             : 
     316           0 :         sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, 128, sc->sc_dstrd);
     317           0 :         if (sc->sc_admin_q == NULL) {
     318           0 :                 printf("%s: unable to allocate admin queue\n", DEVNAME(sc));
     319           0 :                 return (1);
     320             :         }
     321             : 
     322           0 :         if (nvme_ccbs_alloc(sc, 16) != 0) {
     323           0 :                 printf("%s: unable to allocate initial ccbs\n", DEVNAME(sc));
     324           0 :                 goto free_admin_q;
     325             :         }
     326             : 
     327           0 :         if (nvme_enable(sc, mps) != 0) {
     328           0 :                 printf("%s: unable to enable controller\n", DEVNAME(sc));
     329           0 :                 goto free_ccbs;
     330             :         }
     331             : 
     332           0 :         if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
     333           0 :                 printf("%s: unable to identify controller\n", DEVNAME(sc));
     334           0 :                 goto disable;
     335             :         }
     336             : 
     337             :         /* we know how big things are now */
     338           0 :         sc->sc_max_sgl = sc->sc_mdts / sc->sc_mps;
     339             : 
     340           0 :         nvme_ccbs_free(sc);
     341           0 :         if (nvme_ccbs_alloc(sc, 64) != 0) {
     342           0 :                 printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
     343           0 :                 goto free_admin_q;
     344             :         }
     345             : 
     346           0 :         sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q, 128, sc->sc_dstrd);
     347           0 :         if (sc->sc_q == NULL) {
     348           0 :                 printf("%s: unable to allocate io q\n", DEVNAME(sc));
     349           0 :                 goto disable;
     350             :         }
     351             : 
     352           0 :         if (nvme_q_create(sc, sc->sc_q) != 0) {
     353           0 :                 printf("%s: unable to create io q\n", DEVNAME(sc));
     354           0 :                 goto free_q;
     355             :         }
     356             : 
     357           0 :         sc->sc_hib_q = nvme_q_alloc(sc, NVME_HIB_Q, 4, sc->sc_dstrd);
     358           0 :         if (sc->sc_hib_q == NULL) {
     359           0 :                 printf("%s: unable to allocate hibernate io queue\n", DEVNAME(sc));
     360           0 :                 goto free_q;
     361             :         }
     362             : 
     363           0 :         nvme_write4(sc, NVME_INTMC, 1);
     364             : 
     365           0 :         sc->sc_namespaces = mallocarray(sc->sc_nn, sizeof(*sc->sc_namespaces),
     366             :             M_DEVBUF, M_WAITOK|M_ZERO);
     367             : 
     368           0 :         sc->sc_link.adapter = &nvme_switch;
     369           0 :         sc->sc_link.adapter_softc = sc;
     370           0 :         sc->sc_link.adapter_buswidth = sc->sc_nn;
     371           0 :         sc->sc_link.luns = 1;
     372           0 :         sc->sc_link.adapter_target = sc->sc_nn;
     373           0 :         sc->sc_link.openings = 64;
     374           0 :         sc->sc_link.pool = &sc->sc_iopool;
     375             : 
     376           0 :         memset(&saa, 0, sizeof(saa));
     377           0 :         saa.saa_sc_link = &sc->sc_link;
     378             : 
     379           0 :         sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
     380             :             &saa, scsiprint);
     381             : 
     382           0 :         return (0);
     383             : 
     384             : free_q:
     385           0 :         nvme_q_free(sc, sc->sc_q);
     386             : disable:
     387           0 :         nvme_disable(sc);
     388             : free_ccbs:
     389           0 :         nvme_ccbs_free(sc);
     390             : free_admin_q:
     391           0 :         nvme_q_free(sc, sc->sc_admin_q);
     392             : 
     393           0 :         return (1);
     394           0 : }
     395             : 
     396             : int
     397           0 : nvme_resume(struct nvme_softc *sc)
     398             : {
     399           0 :         if (nvme_disable(sc) != 0) {
     400           0 :                 printf("%s: unable to disable controller\n", DEVNAME(sc));
     401           0 :                 return (1);
     402             :         }
     403             : 
     404           0 :         if (nvme_q_reset(sc, sc->sc_admin_q) != 0) {
     405           0 :                 printf("%s: unable to reset admin queue\n", DEVNAME(sc));
     406           0 :                 return (1);
     407             :         }
     408             : 
     409           0 :         if (nvme_enable(sc, sc->sc_mps_bits) != 0) {
     410           0 :                 printf("%s: unable to enable controller\n", DEVNAME(sc));
     411           0 :                 return (1);
     412             :         }
     413             : 
     414           0 :         sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q, 128, sc->sc_dstrd);
     415           0 :         if (sc->sc_q == NULL) {
     416           0 :                 printf("%s: unable to allocate io q\n", DEVNAME(sc));
     417           0 :                 goto disable;
     418             :         }
     419             : 
     420           0 :         if (nvme_q_create(sc, sc->sc_q) != 0) {
     421           0 :                 printf("%s: unable to create io q\n", DEVNAME(sc));
     422             :                 goto free_q;
     423             :         }
     424             : 
     425           0 :         nvme_write4(sc, NVME_INTMC, 1);
     426             : 
     427           0 :         return (0);
     428             : 
     429             : free_q:
     430           0 :         nvme_q_free(sc, sc->sc_q);
     431             : disable:
     432           0 :         nvme_disable(sc);
     433             : 
     434           0 :         return (1);
     435           0 : }
     436             : 
     437             : int
     438           0 : nvme_scsi_probe(struct scsi_link *link)
     439             : {
     440           0 :         struct nvme_softc *sc = link->adapter_softc;
     441           0 :         struct nvme_sqe sqe;
     442             :         struct nvm_identify_namespace *identify;
     443             :         struct nvme_dmamem *mem;
     444             :         struct nvme_ccb *ccb;
     445             :         int rv;
     446             : 
     447           0 :         ccb = scsi_io_get(&sc->sc_iopool, 0);
     448           0 :         KASSERT(ccb != NULL);
     449             : 
     450           0 :         mem = nvme_dmamem_alloc(sc, sizeof(*identify));
     451           0 :         if (mem == NULL)
     452           0 :                 return (ENOMEM);
     453             : 
     454           0 :         memset(&sqe, 0, sizeof(sqe));
     455           0 :         sqe.opcode = NVM_ADMIN_IDENTIFY;
     456           0 :         htolem32(&sqe.nsid, link->target + 1);
     457           0 :         htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
     458           0 :         htolem32(&sqe.cdw10, 0);
     459             : 
     460           0 :         ccb->ccb_done = nvme_empty_done;
     461           0 :         ccb->ccb_cookie = &sqe;
     462             : 
     463           0 :         nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
     464           0 :         rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
     465           0 :         nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
     466             : 
     467           0 :         scsi_io_put(&sc->sc_iopool, ccb);
     468             : 
     469           0 :         if (rv != 0) {
     470             :                 rv = EIO;
     471           0 :                 goto done;
     472             :         }
     473             : 
     474             :         /* commit */
     475             : 
     476           0 :         identify = malloc(sizeof(*identify), M_DEVBUF, M_WAITOK|M_ZERO);
     477           0 :         memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
     478             : 
     479           0 :         sc->sc_namespaces[link->target].ident = identify;
     480             : 
     481             : done:
     482           0 :         nvme_dmamem_free(sc, mem);
     483             : 
     484           0 :         return (rv);
     485           0 : }
     486             : 
     487             : int
     488           0 : nvme_shutdown(struct nvme_softc *sc)
     489             : {
     490             :         u_int32_t cc, csts;
     491             :         int i;
     492             : 
     493           0 :         nvme_write4(sc, NVME_INTMC, 0);
     494             : 
     495           0 :         if (nvme_q_delete(sc, sc->sc_q) != 0) {
     496           0 :                 printf("%s: unable to delete q, disabling\n", DEVNAME(sc));
     497           0 :                 goto disable;
     498             :         }
     499             : 
     500           0 :         cc = nvme_read4(sc, NVME_CC);
     501           0 :         CLR(cc, NVME_CC_SHN_MASK);
     502           0 :         SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
     503           0 :         nvme_write4(sc, NVME_CC, cc);
     504             : 
     505           0 :         for (i = 0; i < 4000; i++) {
     506           0 :                 nvme_barrier(sc, 0, sc->sc_ios,
     507             :                     BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
     508           0 :                 csts = nvme_read4(sc, NVME_CSTS);
     509           0 :                 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
     510           0 :                         return (0);
     511             : 
     512           0 :                 delay(1000);
     513             :         }
     514             : 
     515           0 :         printf("%s: unable to shutdown, disabling\n", DEVNAME(sc));
     516             : 
     517             : disable:
     518           0 :         nvme_disable(sc);
     519           0 :         return (0);
     520           0 : }
     521             : 
     522             : int
     523           0 : nvme_activate(struct nvme_softc *sc, int act)
     524             : {
     525             :         int rv;
     526             : 
     527           0 :         switch (act) {
     528             :         case DVACT_POWERDOWN:
     529           0 :                 rv = config_activate_children(&sc->sc_dev, act);
     530           0 :                 nvme_shutdown(sc);
     531           0 :                 break;
     532             :         case DVACT_RESUME:
     533           0 :                 rv = nvme_resume(sc);
     534           0 :                 if (rv == 0)
     535           0 :                         rv = config_activate_children(&sc->sc_dev, act);
     536             :                 break;
     537             :         default:
     538           0 :                 rv = config_activate_children(&sc->sc_dev, act);
     539           0 :                 break;
     540             :         }
     541             : 
     542           0 :         return (rv);
     543             : }
     544             : 
     545             : void
     546           0 : nvme_scsi_cmd(struct scsi_xfer *xs)
     547             : {
     548           0 :         switch (xs->cmd->opcode) {
     549             :         case READ_COMMAND:
     550             :         case READ_BIG:
     551             :         case READ_12:
     552             :         case READ_16:
     553           0 :                 nvme_scsi_io(xs, SCSI_DATA_IN);
     554           0 :                 return;
     555             :         case WRITE_COMMAND:
     556             :         case WRITE_BIG:
     557             :         case WRITE_12:
     558             :         case WRITE_16:
     559           0 :                 nvme_scsi_io(xs, SCSI_DATA_OUT);
     560           0 :                 return;
     561             : 
     562             :         case SYNCHRONIZE_CACHE:
     563           0 :                 nvme_scsi_sync(xs);
     564           0 :                 return;
     565             : 
     566             :         case INQUIRY:
     567           0 :                 nvme_scsi_inq(xs);
     568           0 :                 return;
     569             :         case READ_CAPACITY_16:
     570           0 :                 nvme_scsi_capacity16(xs);
     571           0 :                 return;
     572             :         case READ_CAPACITY:
     573           0 :                 nvme_scsi_capacity(xs);
     574           0 :                 return;
     575             : 
     576             :         case TEST_UNIT_READY:
     577             :         case PREVENT_ALLOW:
     578             :         case START_STOP:
     579           0 :                 xs->error = XS_NOERROR;
     580           0 :                 scsi_done(xs);
     581           0 :                 return;
     582             : 
     583             :         default:
     584             :                 break;
     585             :         }
     586             : 
     587           0 :         xs->error = XS_DRIVER_STUFFUP;
     588           0 :         scsi_done(xs);
     589           0 : }
     590             : 
     591             : void
     592           0 : nvme_scsi_io(struct scsi_xfer *xs, int dir)
     593             : {
     594           0 :         struct scsi_link *link = xs->sc_link;
     595           0 :         struct nvme_softc *sc = link->adapter_softc;
     596           0 :         struct nvme_ccb *ccb = xs->io;
     597           0 :         bus_dmamap_t dmap = ccb->ccb_dmamap;
     598             :         int i;
     599             : 
     600           0 :         if ((xs->flags & (SCSI_DATA_IN|SCSI_DATA_OUT)) != dir)
     601             :                 goto stuffup;
     602             : 
     603           0 :         ccb->ccb_done = nvme_scsi_io_done;
     604           0 :         ccb->ccb_cookie = xs;
     605             : 
     606           0 :         if (bus_dmamap_load(sc->sc_dmat, dmap,
     607             :             xs->data, xs->datalen, NULL, ISSET(xs->flags, SCSI_NOSLEEP) ?
     608           0 :             BUS_DMA_NOWAIT : BUS_DMA_WAITOK) != 0)
     609             :                 goto stuffup;
     610             : 
     611           0 :         bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
     612             :             ISSET(xs->flags, SCSI_DATA_IN) ?
     613             :             BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
     614             : 
     615           0 :         if (dmap->dm_nsegs > 2) {
     616           0 :                 for (i = 1; i < dmap->dm_nsegs; i++) {
     617           0 :                         htolem64(&ccb->ccb_prpl[i - 1],
     618             :                             dmap->dm_segs[i].ds_addr);
     619             :                 }
     620           0 :                 bus_dmamap_sync(sc->sc_dmat,
     621             :                     NVME_DMA_MAP(sc->sc_ccb_prpls),
     622             :                     ccb->ccb_prpl_off,
     623             :                     sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
     624             :                     BUS_DMASYNC_PREWRITE);
     625           0 :         }
     626             : 
     627           0 :         if (ISSET(xs->flags, SCSI_POLL)) {
     628           0 :                 nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill);
     629           0 :                 return;
     630             :         }
     631             : 
     632           0 :         nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_io_fill);
     633           0 :         return;
     634             : 
     635             : stuffup:
     636           0 :         xs->error = XS_DRIVER_STUFFUP;
     637           0 :         scsi_done(xs);
     638           0 : }
     639             : 
     640             : void
     641           0 : nvme_scsi_io_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
     642             : {
     643           0 :         struct nvme_sqe_io *sqe = slot;
     644           0 :         struct scsi_xfer *xs = ccb->ccb_cookie;
     645           0 :         struct scsi_link *link = xs->sc_link;
     646           0 :         bus_dmamap_t dmap = ccb->ccb_dmamap;
     647           0 :         u_int64_t lba;
     648           0 :         u_int32_t blocks;
     649             : 
     650           0 :         scsi_cmd_rw_decode(xs->cmd, &lba, &blocks);
     651             : 
     652           0 :         sqe->opcode = ISSET(xs->flags, SCSI_DATA_IN) ?
     653             :             NVM_CMD_READ : NVM_CMD_WRITE;
     654           0 :         htolem32(&sqe->nsid, link->target + 1);
     655             : 
     656           0 :         htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
     657           0 :         switch (dmap->dm_nsegs) {
     658             :         case 1:
     659             :                 break;
     660             :         case 2:
     661           0 :                 htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
     662           0 :                 break;
     663             :         default:
     664             :                 /* the prp list is already set up and synced */
     665           0 :                 htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
     666           0 :                 break;
     667             :         }
     668             : 
     669           0 :         htolem64(&sqe->slba, lba);
     670           0 :         htolem16(&sqe->nlb, blocks - 1);
     671           0 : }
     672             : 
     673             : void
     674           0 : nvme_scsi_io_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
     675             :     struct nvme_cqe *cqe)
     676             : {
     677           0 :         struct scsi_xfer *xs = ccb->ccb_cookie;
     678           0 :         bus_dmamap_t dmap = ccb->ccb_dmamap;
     679             :         u_int16_t flags;
     680             : 
     681           0 :         if (dmap->dm_nsegs > 2) {
     682           0 :                 bus_dmamap_sync(sc->sc_dmat,
     683             :                     NVME_DMA_MAP(sc->sc_ccb_prpls),
     684             :                     ccb->ccb_prpl_off,
     685             :                     sizeof(*ccb->ccb_prpl) * dmap->dm_nsegs - 1,
     686             :                     BUS_DMASYNC_POSTWRITE);
     687           0 :         }
     688             : 
     689           0 :         bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
     690             :             ISSET(xs->flags, SCSI_DATA_IN) ?
     691             :             BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
     692             : 
     693           0 :         bus_dmamap_unload(sc->sc_dmat, dmap);
     694             : 
     695           0 :         flags = lemtoh16(&cqe->flags);
     696             : 
     697           0 :         xs->error = (NVME_CQE_SC(flags) == NVME_CQE_SC_SUCCESS) ?
     698             :             XS_NOERROR : XS_DRIVER_STUFFUP;
     699           0 :         xs->status = SCSI_OK;
     700           0 :         xs->resid = 0;
     701           0 :         scsi_done(xs);
     702           0 : }
     703             : 
     704             : void
     705           0 : nvme_scsi_sync(struct scsi_xfer *xs)
     706             : {
     707           0 :         struct scsi_link *link = xs->sc_link;
     708           0 :         struct nvme_softc *sc = link->adapter_softc;
     709           0 :         struct nvme_ccb *ccb = xs->io;
     710             : 
     711           0 :         ccb->ccb_done = nvme_scsi_sync_done;
     712           0 :         ccb->ccb_cookie = xs;
     713             : 
     714           0 :         if (ISSET(xs->flags, SCSI_POLL)) {
     715           0 :                 nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill);
     716           0 :                 return;
     717             :         }
     718             : 
     719           0 :         nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_sync_fill);
     720           0 : }
     721             : 
     722             : void
     723           0 : nvme_scsi_sync_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
     724             : {
     725           0 :         struct nvme_sqe *sqe = slot;
     726           0 :         struct scsi_xfer *xs = ccb->ccb_cookie;
     727           0 :         struct scsi_link *link = xs->sc_link;
     728             : 
     729           0 :         sqe->opcode = NVM_CMD_FLUSH;
     730           0 :         htolem32(&sqe->nsid, link->target + 1);
     731           0 : }
     732             : 
     733             : void
     734           0 : nvme_scsi_sync_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
     735             :     struct nvme_cqe *cqe)
     736             : {
     737           0 :         struct scsi_xfer *xs = ccb->ccb_cookie;
     738             :         u_int16_t flags;
     739             : 
     740           0 :         flags = lemtoh16(&cqe->flags);
     741             : 
     742           0 :         xs->error = (NVME_CQE_SC(flags) == NVME_CQE_SC_SUCCESS) ?
     743             :             XS_NOERROR : XS_DRIVER_STUFFUP;
     744           0 :         xs->status = SCSI_OK;
     745           0 :         xs->resid = 0;
     746           0 :         scsi_done(xs);
     747           0 : }
     748             : 
     749             : void
     750           0 : nvme_scsi_inq(struct scsi_xfer *xs)
     751             : {
     752           0 :         struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
     753             : 
     754           0 :         if (!ISSET(inq->flags, SI_EVPD)) {
     755           0 :                 nvme_scsi_inquiry(xs);
     756           0 :                 return;
     757             :         }
     758             : 
     759             :         switch (inq->pagecode) {
     760             :         default:
     761             :                 /* printf("%s: %d\n", __func__, inq->pagecode); */
     762             :                 break;
     763             :         }
     764             : 
     765           0 :         xs->error = XS_DRIVER_STUFFUP;
     766           0 :         scsi_done(xs);
     767           0 : }
     768             : 
     769             : void
     770           0 : nvme_scsi_inquiry(struct scsi_xfer *xs)
     771             : {
     772           0 :         struct scsi_inquiry_data inq;
     773           0 :         struct scsi_link *link = xs->sc_link;
     774           0 :         struct nvme_softc *sc = link->adapter_softc;
     775             :         struct nvm_identify_namespace *ns;
     776             : 
     777           0 :         ns = sc->sc_namespaces[link->target].ident;
     778             : 
     779           0 :         memset(&inq, 0, sizeof(inq));
     780             : 
     781           0 :         inq.device = T_DIRECT;
     782           0 :         inq.version = 0x06; /* SPC-4 */
     783           0 :         inq.response_format = 2;
     784           0 :         inq.additional_length = 32;
     785           0 :         inq.flags |= SID_CmdQue;
     786           0 :         memcpy(inq.vendor, "NVMe    ", sizeof(inq.vendor));
     787           0 :         memcpy(inq.product, sc->sc_identify.mn, sizeof(inq.product));
     788           0 :         memcpy(inq.revision, sc->sc_identify.fr, sizeof(inq.revision));
     789             : 
     790           0 :         memcpy(xs->data, &inq, MIN(sizeof(inq), xs->datalen));
     791             : 
     792           0 :         xs->error = XS_NOERROR;
     793           0 :         scsi_done(xs);
     794           0 : }
     795             : 
     796             : void
     797           0 : nvme_scsi_capacity16(struct scsi_xfer *xs)
     798             : {
     799           0 :         struct scsi_read_cap_data_16 rcd;
     800           0 :         struct scsi_link *link = xs->sc_link;
     801           0 :         struct nvme_softc *sc = link->adapter_softc;
     802             :         struct nvm_identify_namespace *ns;
     803             :         struct nvm_namespace_format *f;
     804             :         u_int64_t nsze;
     805             :         u_int16_t tpe = READ_CAP_16_TPE;
     806             : 
     807           0 :         ns = sc->sc_namespaces[link->target].ident;
     808             : 
     809           0 :         if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) {
     810           0 :                 xs->error = XS_DRIVER_STUFFUP;
     811           0 :                 scsi_done(xs);
     812           0 :                 return;
     813             :         }
     814             : 
     815             :         /* sd_read_cap_16() will add one */
     816           0 :         nsze = lemtoh64(&ns->nsze) - 1;
     817           0 :         f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)];
     818             : 
     819           0 :         memset(&rcd, 0, sizeof(rcd));
     820           0 :         _lto8b(nsze, rcd.addr);
     821           0 :         _lto4b(1 << f->lbads, rcd.length);
     822           0 :         _lto2b(tpe, rcd.lowest_aligned);
     823             : 
     824           0 :         memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen));
     825             : 
     826           0 :         xs->error = XS_NOERROR;
     827           0 :         scsi_done(xs);
     828           0 : }
     829             : 
     830             : void
     831           0 : nvme_scsi_capacity(struct scsi_xfer *xs)
     832             : {
     833           0 :         struct scsi_read_cap_data rcd;
     834           0 :         struct scsi_link *link = xs->sc_link;
     835           0 :         struct nvme_softc *sc = link->adapter_softc;
     836             :         struct nvm_identify_namespace *ns;
     837             :         struct nvm_namespace_format *f;
     838             :         u_int64_t nsze;
     839             : 
     840           0 :         ns = sc->sc_namespaces[link->target].ident;
     841             : 
     842           0 :         if (xs->cmdlen != sizeof(struct scsi_read_capacity)) {
     843           0 :                 xs->error = XS_DRIVER_STUFFUP;
     844           0 :                 scsi_done(xs);
     845           0 :                 return;
     846             :         }
     847             : 
     848             :         /* sd_read_cap_10() will add one */
     849           0 :         nsze = lemtoh64(&ns->nsze) - 1;
     850           0 :         if (nsze > 0xffffffff)
     851             :                 nsze = 0xffffffff;
     852             : 
     853           0 :         f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)];
     854             : 
     855           0 :         memset(&rcd, 0, sizeof(rcd));
     856           0 :         _lto4b(nsze, rcd.addr);
     857           0 :         _lto4b(1 << f->lbads, rcd.length);
     858             : 
     859           0 :         memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen));
     860             : 
     861           0 :         xs->error = XS_NOERROR;
     862           0 :         scsi_done(xs);
     863           0 : }
     864             : 
     865             : void
     866           0 : nvme_scsi_free(struct scsi_link *link)
     867             : {
     868           0 :         struct nvme_softc *sc = link->adapter_softc;
     869             :         struct nvm_identify_namespace *identify;
     870             : 
     871           0 :         identify = sc->sc_namespaces[link->target].ident;
     872           0 :         sc->sc_namespaces[link->target].ident = NULL;
     873             : 
     874           0 :         free(identify, M_DEVBUF, sizeof(*identify));
     875           0 : }
     876             : 
     877             : void
     878           0 : nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
     879             :     void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *))
     880             : {
     881           0 :         struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
     882             :         u_int32_t tail;
     883             : 
     884           0 :         mtx_enter(&q->q_sq_mtx);
     885           0 :         tail = q->q_sq_tail;
     886           0 :         if (++q->q_sq_tail >= q->q_entries)
     887           0 :                 q->q_sq_tail = 0;
     888             : 
     889           0 :         sqe += tail;
     890             : 
     891           0 :         bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
     892             :             sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
     893           0 :         memset(sqe, 0, sizeof(*sqe));
     894           0 :         (*fill)(sc, ccb, sqe);
     895           0 :         sqe->cid = ccb->ccb_id;
     896           0 :         bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
     897             :             sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
     898             : 
     899           0 :         nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
     900           0 :         mtx_leave(&q->q_sq_mtx);
     901           0 : }
     902             : 
     903             : struct nvme_poll_state {
     904             :         struct nvme_sqe s;
     905             :         struct nvme_cqe c;
     906             : };
     907             : 
     908             : int
     909           0 : nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
     910             :     void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *))
     911             : {
     912           0 :         struct nvme_poll_state state;
     913             :         void (*done)(struct nvme_softc *, struct nvme_ccb *, struct nvme_cqe *);
     914             :         void *cookie;
     915             :         u_int16_t flags;
     916             : 
     917           0 :         memset(&state, 0, sizeof(state));
     918           0 :         (*fill)(sc, ccb, &state.s);
     919             : 
     920           0 :         done = ccb->ccb_done;
     921           0 :         cookie = ccb->ccb_cookie;
     922             : 
     923           0 :         ccb->ccb_done = nvme_poll_done;
     924           0 :         ccb->ccb_cookie = &state;
     925             : 
     926           0 :         nvme_q_submit(sc, q, ccb, nvme_poll_fill);
     927           0 :         while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))) {
     928           0 :                 if (nvme_q_complete(sc, q) == 0)
     929           0 :                         delay(10);
     930             : 
     931             :                 /* XXX no timeout? */
     932             :         }
     933             : 
     934           0 :         ccb->ccb_cookie = cookie;
     935           0 :         done(sc, ccb, &state.c);
     936             : 
     937           0 :         flags = lemtoh16(&state.c.flags);
     938             : 
     939           0 :         return (flags & ~NVME_CQE_PHASE);
     940           0 : }
     941             : 
     942             : void
     943           0 : nvme_poll_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
     944             : {
     945           0 :         struct nvme_sqe *sqe = slot;
     946           0 :         struct nvme_poll_state *state = ccb->ccb_cookie;
     947             : 
     948           0 :         *sqe = state->s;
     949           0 : }
     950             : 
     951             : void
     952           0 : nvme_poll_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
     953             :     struct nvme_cqe *cqe)
     954             : {
     955           0 :         struct nvme_poll_state *state = ccb->ccb_cookie;
     956             : 
     957           0 :         SET(cqe->flags, htole16(NVME_CQE_PHASE));
     958           0 :         state->c = *cqe;
     959           0 : }
     960             : 
     961             : void
     962           0 : nvme_sqe_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
     963             : {
     964           0 :         struct nvme_sqe *src = ccb->ccb_cookie;
     965           0 :         struct nvme_sqe *dst = slot;
     966             : 
     967           0 :         *dst = *src;
     968           0 : }
     969             : 
     970             : void
     971           0 : nvme_empty_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
     972             :     struct nvme_cqe *cqe)
     973             : {
     974           0 : }
     975             : 
     976             : int
     977           0 : nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
     978             : {
     979             :         struct nvme_ccb *ccb;
     980           0 :         struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
     981             :         u_int32_t head;
     982             :         u_int16_t flags;
     983             :         int rv = 0;
     984             : 
     985           0 :         if (!mtx_enter_try(&q->q_cq_mtx))
     986           0 :                 return (-1);
     987             : 
     988           0 :         head = q->q_cq_head;
     989             : 
     990           0 :         nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
     991           0 :         for (;;) {
     992           0 :                 cqe = &ring[head];
     993           0 :                 flags = lemtoh16(&cqe->flags);
     994           0 :                 if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
     995             :                         break;
     996             : 
     997           0 :                 ccb = &sc->sc_ccbs[cqe->cid];
     998           0 :                 ccb->ccb_done(sc, ccb, cqe);
     999             : 
    1000           0 :                 if (++head >= q->q_entries) {
    1001             :                         head = 0;
    1002           0 :                         q->q_cq_phase ^= NVME_CQE_PHASE;
    1003           0 :                 }
    1004             : 
    1005             :                 rv = 1;
    1006             :         }
    1007           0 :         nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
    1008             : 
    1009           0 :         if (rv)
    1010           0 :                 nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head);
    1011           0 :         mtx_leave(&q->q_cq_mtx);
    1012             : 
    1013           0 :         return (rv);
    1014           0 : }
    1015             : 
    1016             : int
    1017           0 : nvme_identify(struct nvme_softc *sc, u_int mps)
    1018             : {
    1019           0 :         char sn[41], mn[81], fr[17];
    1020             :         struct nvm_identify_controller *identify;
    1021             :         struct nvme_dmamem *mem;
    1022             :         struct nvme_ccb *ccb;
    1023             :         u_int mdts;
    1024             :         int rv = 1;
    1025             : 
    1026           0 :         ccb = nvme_ccb_get(sc);
    1027           0 :         if (ccb == NULL)
    1028           0 :                 panic("nvme_identify: nvme_ccb_get returned NULL");
    1029             : 
    1030           0 :         mem = nvme_dmamem_alloc(sc, sizeof(*identify));
    1031           0 :         if (mem == NULL)
    1032           0 :                 return (1);
    1033             : 
    1034           0 :         ccb->ccb_done = nvme_empty_done;
    1035           0 :         ccb->ccb_cookie = mem;
    1036             : 
    1037           0 :         nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
    1038           0 :         rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify);
    1039           0 :         nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
    1040             : 
    1041           0 :         nvme_ccb_put(sc, ccb);
    1042             : 
    1043           0 :         if (rv != 0)
    1044             :                 goto done;
    1045             : 
    1046           0 :         identify = NVME_DMA_KVA(mem);
    1047             : 
    1048           0 :         scsi_strvis(sn, identify->sn, sizeof(identify->sn));
    1049           0 :         scsi_strvis(mn, identify->mn, sizeof(identify->mn));
    1050           0 :         scsi_strvis(fr, identify->fr, sizeof(identify->fr));
    1051             : 
    1052           0 :         printf("%s: %s, firmware %s, serial %s\n", DEVNAME(sc), mn, fr, sn);
    1053             : 
    1054           0 :         if (identify->mdts > 0) {
    1055           0 :                 mdts = (1 << identify->mdts) * (1 << mps);
    1056           0 :                 if (mdts < sc->sc_mdts)
    1057           0 :                         sc->sc_mdts = mdts;
    1058             :         }
    1059             : 
    1060           0 :         sc->sc_nn = lemtoh32(&identify->nn);
    1061             : 
    1062             :         /*
    1063             :          * At least one Apple NVMe device presents a second, bogus disk that is
    1064             :          * inaccessible, so cap targets at 1.
    1065             :          *
    1066             :          * sd1 at scsibus1 targ 1 lun 0: <NVMe, APPLE SSD AP0512, 16.1> [..]
    1067             :          * sd1: 0MB, 4096 bytes/sector, 2 sectors
    1068             :          */
    1069           0 :         if (sc->sc_nn > 1 &&
    1070           0 :             mn[0] == 'A' && mn[1] == 'P' && mn[2] == 'P' && mn[3] == 'L' &&
    1071           0 :             mn[4] == 'E')
    1072           0 :                 sc->sc_nn = 1;
    1073             : 
    1074           0 :         memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
    1075             : 
    1076             : done:
    1077           0 :         nvme_dmamem_free(sc, mem);
    1078             : 
    1079           0 :         return (rv);
    1080           0 : }
    1081             : 
    1082             : int
    1083           0 : nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
    1084             : {
    1085           0 :         struct nvme_sqe_q sqe;
    1086             :         struct nvme_ccb *ccb;
    1087             :         int rv;
    1088             : 
    1089           0 :         ccb = scsi_io_get(&sc->sc_iopool, 0);
    1090           0 :         KASSERT(ccb != NULL);
    1091             : 
    1092           0 :         ccb->ccb_done = nvme_empty_done;
    1093           0 :         ccb->ccb_cookie = &sqe;
    1094             : 
    1095           0 :         memset(&sqe, 0, sizeof(sqe));
    1096           0 :         sqe.opcode = NVM_ADMIN_ADD_IOCQ;
    1097           0 :         htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
    1098           0 :         htolem16(&sqe.qsize, q->q_entries - 1);
    1099           0 :         htolem16(&sqe.qid, q->q_id);
    1100           0 :         sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
    1101             : 
    1102           0 :         rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
    1103           0 :         if (rv != 0)
    1104             :                 goto fail;
    1105             : 
    1106           0 :         ccb->ccb_done = nvme_empty_done;
    1107           0 :         ccb->ccb_cookie = &sqe;
    1108             : 
    1109           0 :         memset(&sqe, 0, sizeof(sqe));
    1110           0 :         sqe.opcode = NVM_ADMIN_ADD_IOSQ;
    1111           0 :         htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
    1112           0 :         htolem16(&sqe.qsize, q->q_entries - 1);
    1113           0 :         htolem16(&sqe.qid, q->q_id);
    1114           0 :         htolem16(&sqe.cqid, q->q_id);
    1115           0 :         sqe.qflags = NVM_SQE_Q_PC;
    1116             : 
    1117           0 :         rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
    1118             :         if (rv != 0)
    1119           0 :                 goto fail;
    1120             : 
    1121             : fail:
    1122           0 :         scsi_io_put(&sc->sc_iopool, ccb);
    1123           0 :         return (rv);
    1124           0 : }
    1125             : 
    1126             : int
    1127           0 : nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
    1128             : {
    1129           0 :         struct nvme_sqe_q sqe;
    1130             :         struct nvme_ccb *ccb;
    1131             :         int rv;
    1132             : 
    1133           0 :         ccb = scsi_io_get(&sc->sc_iopool, 0);
    1134           0 :         KASSERT(ccb != NULL);
    1135             : 
    1136           0 :         ccb->ccb_done = nvme_empty_done;
    1137           0 :         ccb->ccb_cookie = &sqe;
    1138             : 
    1139           0 :         memset(&sqe, 0, sizeof(sqe));
    1140           0 :         sqe.opcode = NVM_ADMIN_DEL_IOSQ;
    1141           0 :         htolem16(&sqe.qid, q->q_id);
    1142             : 
    1143           0 :         rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
    1144           0 :         if (rv != 0)
    1145             :                 goto fail;
    1146             : 
    1147           0 :         ccb->ccb_done = nvme_empty_done;
    1148           0 :         ccb->ccb_cookie = &sqe;
    1149             : 
    1150           0 :         memset(&sqe, 0, sizeof(sqe));
    1151           0 :         sqe.opcode = NVM_ADMIN_DEL_IOCQ;
    1152           0 :         htolem16(&sqe.qid, q->q_id);
    1153             : 
    1154           0 :         rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill);
    1155           0 :         if (rv != 0)
    1156             :                 goto fail;
    1157             : 
    1158           0 :         nvme_q_free(sc, q);
    1159             : 
    1160             : fail:
    1161           0 :         scsi_io_put(&sc->sc_iopool, ccb);
    1162           0 :         return (rv);
    1163             : 
    1164           0 : }
    1165             : 
    1166             : void
    1167           0 : nvme_fill_identify(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
    1168             : {
    1169           0 :         struct nvme_sqe *sqe = slot;
    1170           0 :         struct nvme_dmamem *mem = ccb->ccb_cookie;
    1171             : 
    1172           0 :         sqe->opcode = NVM_ADMIN_IDENTIFY;
    1173           0 :         htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
    1174           0 :         htolem32(&sqe->cdw10, 1);
    1175           0 : }
    1176             : 
    1177             : int
    1178           0 : nvme_ccbs_alloc(struct nvme_softc *sc, u_int nccbs)
    1179             : {
    1180             :         struct nvme_ccb *ccb;
    1181             :         bus_addr_t off;
    1182             :         u_int64_t *prpl;
    1183             :         u_int i;
    1184             : 
    1185           0 :         sc->sc_ccbs = mallocarray(nccbs, sizeof(*ccb), M_DEVBUF,
    1186             :             M_WAITOK | M_CANFAIL);
    1187           0 :         if (sc->sc_ccbs == NULL)
    1188           0 :                 return (1);
    1189             : 
    1190           0 :         sc->sc_ccb_prpls = nvme_dmamem_alloc(sc, 
    1191           0 :             sizeof(*prpl) * sc->sc_max_sgl * nccbs);
    1192             : 
    1193           0 :         prpl = NVME_DMA_KVA(sc->sc_ccb_prpls);
    1194             :         off = 0;
    1195             : 
    1196           0 :         for (i = 0; i < nccbs; i++) {
    1197           0 :                 ccb = &sc->sc_ccbs[i];
    1198             : 
    1199           0 :                 if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
    1200             :                     sc->sc_max_sgl + 1 /* we get a free prp in the sqe */,
    1201             :                     sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
    1202           0 :                     &ccb->ccb_dmamap) != 0)
    1203             :                         goto free_maps;
    1204             : 
    1205           0 :                 ccb->ccb_id = i;
    1206           0 :                 ccb->ccb_prpl = prpl;
    1207           0 :                 ccb->ccb_prpl_off = off;
    1208           0 :                 ccb->ccb_prpl_dva = NVME_DMA_DVA(sc->sc_ccb_prpls) + off;
    1209             : 
    1210           0 :                 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_list, ccb, ccb_entry);
    1211             : 
    1212           0 :                 prpl += sc->sc_max_sgl;
    1213           0 :                 off += sizeof(*prpl) * sc->sc_max_sgl;
    1214             :         }
    1215             : 
    1216           0 :         return (0);
    1217             : 
    1218             : free_maps:
    1219           0 :         nvme_ccbs_free(sc);
    1220           0 :         return (1);
    1221           0 : }
    1222             : 
    1223             : void *
    1224           0 : nvme_ccb_get(void *cookie)
    1225             : {
    1226           0 :         struct nvme_softc *sc = cookie;
    1227             :         struct nvme_ccb *ccb;
    1228             : 
    1229           0 :         mtx_enter(&sc->sc_ccb_mtx);
    1230           0 :         ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
    1231           0 :         if (ccb != NULL)
    1232           0 :                 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
    1233           0 :         mtx_leave(&sc->sc_ccb_mtx);
    1234             : 
    1235           0 :         return (ccb);
    1236             : }
    1237             : 
    1238             : void
    1239           0 : nvme_ccb_put(void *cookie, void *io)
    1240             : {
    1241           0 :         struct nvme_softc *sc = cookie;
    1242           0 :         struct nvme_ccb *ccb = io;
    1243             : 
    1244           0 :         mtx_enter(&sc->sc_ccb_mtx);
    1245           0 :         SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
    1246           0 :         mtx_leave(&sc->sc_ccb_mtx);
    1247           0 : }
    1248             : 
    1249             : void
    1250           0 : nvme_ccbs_free(struct nvme_softc *sc)
    1251             : {
    1252             :         struct nvme_ccb *ccb;
    1253             : 
    1254           0 :         while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)) != NULL) {
    1255           0 :                 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
    1256           0 :                 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
    1257             :         }
    1258             : 
    1259           0 :         nvme_dmamem_free(sc, sc->sc_ccb_prpls);
    1260           0 :         free(sc->sc_ccbs, M_DEVBUF, 0);
    1261           0 : }
    1262             : 
    1263             : struct nvme_queue *
    1264           0 : nvme_q_alloc(struct nvme_softc *sc, u_int16_t id, u_int entries, u_int dstrd)
    1265             : {
    1266             :         struct nvme_queue *q;
    1267             : 
    1268           0 :         q = malloc(sizeof(*q), M_DEVBUF, M_WAITOK | M_CANFAIL);
    1269           0 :         if (q == NULL)
    1270           0 :                 return (NULL);
    1271             : 
    1272           0 :         q->q_sq_dmamem = nvme_dmamem_alloc(sc,
    1273           0 :             sizeof(struct nvme_sqe) * entries);
    1274           0 :         if (q->q_sq_dmamem == NULL)
    1275             :                 goto free;
    1276             : 
    1277           0 :         q->q_cq_dmamem = nvme_dmamem_alloc(sc,
    1278           0 :             sizeof(struct nvme_cqe) * entries);
    1279           0 :         if (q->q_cq_dmamem == NULL)
    1280             :                 goto free_sq;
    1281             : 
    1282           0 :         memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
    1283           0 :         memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
    1284             : 
    1285           0 :         mtx_init(&q->q_sq_mtx, IPL_BIO);
    1286           0 :         mtx_init(&q->q_cq_mtx, IPL_BIO);
    1287           0 :         q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
    1288           0 :         q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
    1289             : 
    1290           0 :         q->q_id = id;
    1291           0 :         q->q_entries = entries;
    1292           0 :         q->q_sq_tail = 0;
    1293           0 :         q->q_cq_head = 0;
    1294           0 :         q->q_cq_phase = NVME_CQE_PHASE;
    1295             : 
    1296           0 :         nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
    1297           0 :         nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
    1298             : 
    1299           0 :         return (q);
    1300             : 
    1301             : free_sq:
    1302           0 :         nvme_dmamem_free(sc, q->q_sq_dmamem);
    1303             : free:
    1304           0 :         free(q, M_DEVBUF, sizeof *q);
    1305             : 
    1306           0 :         return (NULL);
    1307           0 : }
    1308             : 
    1309             : int
    1310           0 : nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q)
    1311             : {
    1312           0 :         memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
    1313           0 :         memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
    1314             : 
    1315           0 :         q->q_sqtdbl = NVME_SQTDBL(q->q_id, sc->sc_dstrd);
    1316           0 :         q->q_cqhdbl = NVME_CQHDBL(q->q_id, sc->sc_dstrd);
    1317             : 
    1318           0 :         q->q_sq_tail = 0;
    1319           0 :         q->q_cq_head = 0;
    1320           0 :         q->q_cq_phase = NVME_CQE_PHASE;
    1321             : 
    1322           0 :         nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
    1323           0 :         nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
    1324             : 
    1325           0 :         return (0);
    1326             : }
    1327             : 
    1328             : void
    1329           0 : nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
    1330             : {
    1331           0 :         nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
    1332           0 :         nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
    1333           0 :         nvme_dmamem_free(sc, q->q_cq_dmamem);
    1334           0 :         nvme_dmamem_free(sc, q->q_sq_dmamem);
    1335           0 :         free(q, M_DEVBUF, sizeof *q);
    1336           0 : }
    1337             : 
    1338             : int
    1339           0 : nvme_intr(void *xsc)
    1340             : {
    1341           0 :         struct nvme_softc *sc = xsc;
    1342             :         int rv = 0;
    1343             : 
    1344           0 :         if (nvme_q_complete(sc, sc->sc_q))
    1345           0 :                 rv = 1;
    1346           0 :         if (nvme_q_complete(sc, sc->sc_admin_q))
    1347           0 :                 rv = 1;
    1348             : 
    1349           0 :         return (rv);
    1350             : }
    1351             : 
    1352             : int
    1353           0 : nvme_intr_intx(void *xsc)
    1354             : {
    1355           0 :         struct nvme_softc *sc = xsc;
    1356             :         int rv;
    1357             : 
    1358           0 :         nvme_write4(sc, NVME_INTMS, 1);
    1359           0 :         rv = nvme_intr(sc);
    1360           0 :         nvme_write4(sc, NVME_INTMC, 1);
    1361             : 
    1362           0 :         return (rv);
    1363             : }
    1364             : 
    1365             : struct nvme_dmamem *
    1366           0 : nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
    1367             : {
    1368             :         struct nvme_dmamem *ndm;
    1369           0 :         int nsegs;
    1370             : 
    1371           0 :         ndm = malloc(sizeof(*ndm), M_DEVBUF, M_WAITOK | M_ZERO);
    1372           0 :         if (ndm == NULL)
    1373           0 :                 return (NULL);
    1374             : 
    1375           0 :         ndm->ndm_size = size;
    1376             : 
    1377           0 :         if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
    1378           0 :             BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map) != 0)
    1379             :                 goto ndmfree;
    1380             : 
    1381           0 :         if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
    1382           0 :             1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
    1383             :                 goto destroy;
    1384             : 
    1385           0 :         if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
    1386           0 :             &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
    1387             :                 goto free;
    1388             : 
    1389           0 :         if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
    1390           0 :             NULL, BUS_DMA_WAITOK) != 0)
    1391             :                 goto unmap;
    1392             : 
    1393           0 :         return (ndm);
    1394             : 
    1395             : unmap:
    1396           0 :         bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
    1397             : free:
    1398           0 :         bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
    1399             : destroy:
    1400           0 :         bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
    1401             : ndmfree:
    1402           0 :         free(ndm, M_DEVBUF, sizeof *ndm);
    1403             : 
    1404           0 :         return (NULL);
    1405           0 : }
    1406             : 
    1407             : void
    1408           0 : nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
    1409             : {
    1410           0 :         bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
    1411             :             0, NVME_DMA_LEN(mem), ops);
    1412           0 : }
    1413             : 
    1414             : void
    1415           0 : nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
    1416             : {
    1417           0 :         bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
    1418           0 :         bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
    1419           0 :         bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
    1420           0 :         bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
    1421           0 :         free(ndm, M_DEVBUF, sizeof *ndm);
    1422           0 : }
    1423             : 
    1424             : #ifdef HIBERNATE
    1425             : 
    1426             : int
    1427           0 : nvme_hibernate_admin_cmd(struct nvme_softc *sc, struct nvme_sqe *sqe,
    1428             :     struct nvme_cqe *cqe, int cid)
    1429             : {
    1430           0 :         struct nvme_sqe *asqe = NVME_DMA_KVA(sc->sc_admin_q->q_sq_dmamem);
    1431           0 :         struct nvme_cqe *acqe = NVME_DMA_KVA(sc->sc_admin_q->q_cq_dmamem);
    1432             :         struct nvme_queue *q = sc->sc_admin_q;
    1433             :         int tail;
    1434             :         u_int16_t flags;
    1435             : 
    1436             :         /* submit command */
    1437           0 :         tail = q->q_sq_tail;
    1438           0 :         if (++q->q_sq_tail >= q->q_entries)
    1439           0 :                 q->q_sq_tail = 0;
    1440             : 
    1441           0 :         asqe += tail;
    1442           0 :         bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
    1443             :             sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
    1444           0 :         *asqe = *sqe;
    1445           0 :         asqe->cid = cid;
    1446           0 :         bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
    1447             :             sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
    1448             : 
    1449           0 :         nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
    1450             : 
    1451             :         /* wait for completion */
    1452           0 :         acqe += q->q_cq_head;
    1453           0 :         for (;;) {
    1454           0 :                 nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
    1455           0 :                 flags = lemtoh16(&acqe->flags);
    1456           0 :                 if ((flags & NVME_CQE_PHASE) == q->q_cq_phase)
    1457             :                         break;
    1458             :         
    1459           0 :                 delay(10);
    1460             :         }
    1461             : 
    1462           0 :         if (++q->q_cq_head >= q->q_entries) {
    1463           0 :                 q->q_cq_head = 0;
    1464           0 :                 q->q_cq_phase ^= NVME_CQE_PHASE;
    1465           0 :         }
    1466           0 :         nvme_write4(sc, q->q_cqhdbl, q->q_cq_head);
    1467           0 :         if ((NVME_CQE_SC(flags) != NVME_CQE_SC_SUCCESS) || (acqe->cid != cid))
    1468           0 :                 return (EIO);
    1469             : 
    1470           0 :         return (0);
    1471           0 : }
    1472             : 
    1473             : int
    1474           0 : nvme_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr, size_t size,
    1475             :     int op, void *page)
    1476             : {
    1477             :         struct nvme_hibernate_page {
    1478             :                 u_int64_t               prpl[MAXPHYS / PAGE_SIZE];
    1479             : 
    1480             :                 struct nvme_softc       *sc;
    1481             :                 int                     nsid;
    1482             :                 int                     sq_tail;
    1483             :                 int                     cq_head;
    1484             :                 int                     cqe_phase;
    1485             : 
    1486             :                 daddr_t                 poffset;
    1487             :                 size_t                  psize;
    1488           0 :         } *my = page;
    1489             :         struct nvme_sqe_io *isqe;
    1490             :         struct nvme_cqe *icqe;
    1491           0 :         paddr_t data_phys, page_phys;
    1492             :         u_int64_t data_bus_phys, page_bus_phys;
    1493             :         u_int16_t flags;
    1494             :         int i;
    1495             : 
    1496           0 :         if (op == HIB_INIT) {
    1497             :                 struct device *disk;
    1498             :                 struct device *scsibus;
    1499             :                 extern struct cfdriver sd_cd;
    1500             :                 struct scsi_link *link;
    1501             :                 struct scsibus_softc *bus_sc;
    1502           0 :                 struct nvme_sqe_q qsqe;
    1503           0 :                 struct nvme_cqe qcqe;
    1504             : 
    1505             :                 /* find nvme softc */
    1506           0 :                 disk = disk_lookup(&sd_cd, DISKUNIT(dev));
    1507           0 :                 scsibus = disk->dv_parent;
    1508           0 :                 my->sc = (struct nvme_softc *)disk->dv_parent->dv_parent;
    1509             : 
    1510             :                 /* find scsi_link, which tells us the target */
    1511           0 :                 my->nsid = 0;
    1512           0 :                 bus_sc = (struct scsibus_softc *)scsibus;
    1513           0 :                 SLIST_FOREACH(link, &bus_sc->sc_link_list, bus_list) {
    1514           0 :                         if (link->device_softc == disk) {
    1515           0 :                                 my->nsid = link->target + 1;
    1516           0 :                                 break;
    1517             :                         }
    1518             :                 }
    1519           0 :                 if (my->nsid == 0)
    1520           0 :                         return (EIO);
    1521             :                 
    1522           0 :                 my->poffset = blkno;
    1523           0 :                 my->psize = size;
    1524             : 
    1525           0 :                 memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem), 0,
    1526             :                     my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe));
    1527           0 :                 memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem), 0,
    1528             :                     my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe));
    1529             :                 
    1530           0 :                 my->sq_tail = 0;
    1531           0 :                 my->cq_head = 0;
    1532           0 :                 my->cqe_phase = NVME_CQE_PHASE;
    1533             : 
    1534           0 :                 pmap_extract(pmap_kernel(), (vaddr_t)page, &page_phys);
    1535             : 
    1536           0 :                 memset(&qsqe, 0, sizeof(qsqe));
    1537           0 :                 qsqe.opcode = NVM_ADMIN_ADD_IOCQ;
    1538           0 :                 htolem64(&qsqe.prp1,
    1539             :                     NVME_DMA_DVA(my->sc->sc_hib_q->q_cq_dmamem));
    1540           0 :                 htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1);
    1541           0 :                 htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id);
    1542           0 :                 qsqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
    1543           0 :                 if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe,
    1544           0 :                     &qcqe, 1) != 0)
    1545           0 :                         return (EIO);
    1546             : 
    1547           0 :                 memset(&qsqe, 0, sizeof(qsqe));
    1548           0 :                 qsqe.opcode = NVM_ADMIN_ADD_IOSQ;
    1549           0 :                 htolem64(&qsqe.prp1,
    1550             :                     NVME_DMA_DVA(my->sc->sc_hib_q->q_sq_dmamem));
    1551           0 :                 htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1);
    1552           0 :                 htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id);
    1553           0 :                 htolem16(&qsqe.cqid, my->sc->sc_hib_q->q_id);
    1554           0 :                 qsqe.qflags = NVM_SQE_Q_PC;
    1555           0 :                 if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe,
    1556           0 :                     &qcqe, 2) != 0)
    1557           0 :                         return (EIO);
    1558             : 
    1559           0 :                 return (0);
    1560           0 :         }
    1561             : 
    1562           0 :         if (op != HIB_W)
    1563           0 :                 return (0);
    1564             : 
    1565           0 :         isqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem);
    1566           0 :         isqe += my->sq_tail;
    1567           0 :         if (++my->sq_tail == my->sc->sc_hib_q->q_entries)
    1568           0 :                 my->sq_tail = 0;
    1569             : 
    1570           0 :         memset(isqe, 0, sizeof(*isqe));
    1571           0 :         isqe->opcode = NVM_CMD_WRITE;
    1572           0 :         htolem32(&isqe->nsid, my->nsid);
    1573             : 
    1574           0 :         pmap_extract(pmap_kernel(), addr, &data_phys);
    1575           0 :         data_bus_phys = data_phys;
    1576           0 :         htolem64(&isqe->entry.prp[0], data_bus_phys);
    1577           0 :         if ((size > my->sc->sc_mps) && (size <= my->sc->sc_mps * 2)) {
    1578           0 :                 htolem64(&isqe->entry.prp[1], data_bus_phys + my->sc->sc_mps);
    1579           0 :         } else if (size > my->sc->sc_mps * 2) {
    1580           0 :                 pmap_extract(pmap_kernel(), (vaddr_t)page, &page_phys);
    1581           0 :                 page_bus_phys = page_phys;
    1582           0 :                 htolem64(&isqe->entry.prp[1], page_bus_phys + 
    1583             :                     offsetof(struct nvme_hibernate_page, prpl));
    1584           0 :                 for (i = 1; i < (size / my->sc->sc_mps); i++) {
    1585           0 :                         htolem64(&my->prpl[i - 1], data_bus_phys +
    1586             :                             (i * my->sc->sc_mps));
    1587             :                 }
    1588             :         }
    1589             : 
    1590           0 :         isqe->slba = blkno + my->poffset;
    1591           0 :         isqe->nlb = (size / DEV_BSIZE) - 1;
    1592           0 :         isqe->cid = blkno % 0xffff;
    1593             : 
    1594           0 :         nvme_write4(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd),
    1595             :             my->sq_tail);
    1596             : 
    1597           0 :         icqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem);
    1598           0 :         icqe += my->cq_head;
    1599           0 :         for (;;) {
    1600           0 :                 flags = lemtoh16(&icqe->flags);
    1601           0 :                 if ((flags & NVME_CQE_PHASE) == my->cqe_phase)
    1602             :                         break;
    1603             :         
    1604           0 :                 delay(10);
    1605             :         }
    1606             : 
    1607           0 :         if (++my->cq_head == my->sc->sc_hib_q->q_entries) {
    1608           0 :                 my->cq_head = 0;
    1609           0 :                 my->cqe_phase ^= NVME_CQE_PHASE;
    1610           0 :         }
    1611           0 :         nvme_write4(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd),
    1612             :             my->cq_head);
    1613           0 :         if ((NVME_CQE_SC(flags) != NVME_CQE_SC_SUCCESS) ||
    1614           0 :             (icqe->cid != blkno % 0xffff))
    1615           0 :                 return (EIO);
    1616             : 
    1617           0 :         return (0);
    1618           0 : }
    1619             : 
    1620             : #endif

Generated by: LCOV version 1.13