Line data Source code
1 : /* $OpenBSD: viomb.c,v 1.1 2017/01/21 11:22:43 reyk Exp $ */
2 : /* $NetBSD: viomb.c,v 1.1 2011/10/30 12:12:21 hannken Exp $ */
3 :
4 : /*
5 : * Copyright (c) 2012 Talypov Dinar <dinar@i-nk.ru>
6 : * Copyright (c) 2010 Minoura Makoto.
7 : * All rights reserved.
8 : *
9 : * Redistribution and use in source and binary forms, with or without
10 : * modification, are permitted provided that the following conditions
11 : * are met:
12 : * 1. Redistributions of source code must retain the above copyright
13 : * notice, this list of conditions and the following disclaimer.
14 : * 2. Redistributions in binary form must reproduce the above copyright
15 : * notice, this list of conditions and the following disclaimer in the
16 : * documentation and/or other materials provided with the distribution.
17 : *
18 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 : * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 : * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 : * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 : * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 : * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 : */
29 :
30 : #include <sys/param.h>
31 : #include <sys/systm.h>
32 : #include <sys/malloc.h>
33 : #include <sys/device.h>
34 : #include <sys/task.h>
35 : #include <sys/pool.h>
36 : #include <sys/sensors.h>
37 :
38 : #include <uvm/uvm_extern.h>
39 :
40 : #include <dev/pv/virtioreg.h>
41 : #include <dev/pv/virtiovar.h>
42 :
43 : #if VIRTIO_PAGE_SIZE!=PAGE_SIZE
44 : #error non-4K page sizes are not supported yet
45 : #endif
46 :
47 : #define DEVNAME(sc) sc->sc_dev.dv_xname
48 : #if VIRTIO_DEBUG
49 : #define VIOMBDEBUG(sc, format, args...) \
50 : do { printf("%s: " format, sc->sc_dev.dv_xname, ##args);} \
51 : while (0)
52 : #else
53 : #define VIOMBDEBUG(...)
54 : #endif
55 :
56 : /* flags used to specify kind of operation,
57 : * actually should be moved to virtiovar.h
58 : */
59 : #define VRING_READ 0
60 : #define VRING_WRITE 1
61 :
62 : /* notify or don't notify */
63 : #define VRING_NO_NOTIFY 0
64 : #define VRING_NOTIFY 1
65 :
66 : /* Configuration registers */
67 : #define VIRTIO_BALLOON_CONFIG_NUM_PAGES 0 /* 32bit */
68 : #define VIRTIO_BALLOON_CONFIG_ACTUAL 4 /* 32bit */
69 :
70 : /* Feature bits */
71 : #define VIRTIO_BALLOON_F_MUST_TELL_HOST (1<<0)
72 : #define VIRTIO_BALLOON_F_STATS_VQ (1<<1)
73 :
74 : static const struct virtio_feature_name viomb_feature_names[] = {
75 : {VIRTIO_BALLOON_F_MUST_TELL_HOST, "TellHost"},
76 : {VIRTIO_BALLOON_F_STATS_VQ, "StatVQ"},
77 : {0, NULL}
78 : };
79 : #define PGS_PER_REQ 256 /* 1MB, 4KB/page */
80 : #define VQ_INFLATE 0
81 : #define VQ_DEFLATE 1
82 :
83 : struct balloon_req {
84 : bus_dmamap_t bl_dmamap;
85 : struct pglist bl_pglist;
86 : int bl_nentries;
87 : u_int32_t *bl_pages;
88 : };
89 :
90 : struct viomb_softc {
91 : struct device sc_dev;
92 : struct virtio_softc *sc_virtio;
93 : struct virtqueue sc_vq[2];
94 : u_int32_t sc_npages; /* desired pages */
95 : u_int32_t sc_actual; /* current pages */
96 : struct balloon_req sc_req;
97 : struct taskq *sc_taskq;
98 : struct task sc_task;
99 : struct pglist sc_balloon_pages;
100 : struct ksensor sc_sens[2];
101 : struct ksensordev sc_sensdev;
102 : };
103 :
104 : int viomb_match(struct device *, void *, void *);
105 : void viomb_attach(struct device *, struct device *, void *);
106 : void viomb_worker(void *);
107 : void viomb_inflate(struct viomb_softc *);
108 : void viomb_deflate(struct viomb_softc *);
109 : int viomb_config_change(struct virtio_softc *);
110 : void viomb_read_config(struct viomb_softc *);
111 : int viomb_vq_dequeue(struct virtqueue *);
112 : int viomb_inflate_intr(struct virtqueue *);
113 : int viomb_deflate_intr(struct virtqueue *);
114 :
115 : struct cfattach viomb_ca = {
116 : sizeof(struct viomb_softc), viomb_match, viomb_attach
117 : };
118 :
119 : struct cfdriver viomb_cd = {
120 : NULL, "viomb", DV_DULL
121 : };
122 :
123 : int
124 0 : viomb_match(struct device *parent, void *match, void *aux)
125 : {
126 0 : struct virtio_softc *va = aux;
127 0 : if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BALLOON)
128 0 : return (1);
129 0 : return (0);
130 0 : }
131 :
132 : void
133 0 : viomb_attach(struct device *parent, struct device *self, void *aux)
134 : {
135 0 : struct viomb_softc *sc = (struct viomb_softc *)self;
136 0 : struct virtio_softc *vsc = (struct virtio_softc *)parent;
137 : u_int32_t features;
138 : int i;
139 :
140 0 : if (vsc->sc_child != NULL) {
141 0 : printf("child already attached for %s; something wrong...\n",
142 0 : parent->dv_xname);
143 0 : return;
144 : }
145 :
146 : /* fail on non-4K page size archs */
147 : if (VIRTIO_PAGE_SIZE != PAGE_SIZE){
148 : printf("non-4K page size arch found, needs %d, got %d\n",
149 : VIRTIO_PAGE_SIZE, PAGE_SIZE);
150 : return;
151 : }
152 :
153 0 : sc->sc_virtio = vsc;
154 0 : vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE];
155 0 : vsc->sc_nvqs = 0;
156 0 : vsc->sc_child = self;
157 0 : vsc->sc_ipl = IPL_BIO;
158 0 : vsc->sc_config_change = viomb_config_change;
159 :
160 : /* negotiate features */
161 : features = VIRTIO_F_RING_INDIRECT_DESC;
162 0 : features = virtio_negotiate_features(vsc, features,
163 : viomb_feature_names);
164 :
165 0 : if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE,
166 0 : sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0))
167 : goto err;
168 0 : vsc->sc_nvqs++;
169 0 : if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE,
170 0 : sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0))
171 : goto err;
172 0 : vsc->sc_nvqs++;
173 :
174 0 : sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr;
175 0 : sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr;
176 0 : virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]);
177 0 : virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]);
178 :
179 0 : viomb_read_config(sc);
180 0 : TAILQ_INIT(&sc->sc_balloon_pages);
181 :
182 0 : if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ,
183 0 : PR_NOWAIT|PR_ZERO)) == NULL) {
184 0 : printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc));
185 0 : goto err;
186 : }
187 0 : if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ,
188 : 1, sizeof(u_int32_t) * PGS_PER_REQ, 0,
189 : BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
190 0 : printf("%s: dmamap creation failed.\n", DEVNAME(sc));
191 0 : goto err;
192 : }
193 0 : if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap,
194 : &sc->sc_req.bl_pages[0],
195 : sizeof(uint32_t) * PGS_PER_REQ,
196 : NULL, BUS_DMA_NOWAIT)) {
197 0 : printf("%s: dmamap load failed.\n", DEVNAME(sc));
198 0 : goto err_dmamap;
199 : }
200 :
201 0 : sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO, 0);
202 0 : if (sc->sc_taskq == NULL)
203 : goto err_dmamap;
204 0 : task_set(&sc->sc_task, viomb_worker, sc);
205 :
206 0 : strlcpy(sc->sc_sensdev.xname, DEVNAME(sc),
207 : sizeof(sc->sc_sensdev.xname));
208 0 : strlcpy(sc->sc_sens[0].desc, "desired",
209 : sizeof(sc->sc_sens[0].desc));
210 0 : sc->sc_sens[0].type = SENSOR_INTEGER;
211 0 : sensor_attach(&sc->sc_sensdev, &sc->sc_sens[0]);
212 0 : sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
213 :
214 0 : strlcpy(sc->sc_sens[1].desc, "current",
215 : sizeof(sc->sc_sens[1].desc));
216 0 : sc->sc_sens[1].type = SENSOR_INTEGER;
217 0 : sensor_attach(&sc->sc_sensdev, &sc->sc_sens[1]);
218 0 : sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;
219 :
220 0 : sensordev_install(&sc->sc_sensdev);
221 :
222 0 : printf("\n");
223 0 : return;
224 : err_dmamap:
225 0 : bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap);
226 : err:
227 0 : if (sc->sc_req.bl_pages)
228 0 : dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ);
229 0 : for (i = 0; i < vsc->sc_nvqs; i++)
230 0 : virtio_free_vq(vsc, &sc->sc_vq[i]);
231 0 : vsc->sc_nvqs = 0;
232 0 : vsc->sc_child = VIRTIO_CHILD_ERROR;
233 0 : return;
234 0 : }
235 :
236 : /*
237 : * Config change
238 : */
239 : int
240 0 : viomb_config_change(struct virtio_softc *vsc)
241 : {
242 0 : struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
243 :
244 0 : task_add(sc->sc_taskq, &sc->sc_task);
245 :
246 0 : return (1);
247 : }
248 :
249 : void
250 0 : viomb_worker(void *arg1)
251 : {
252 0 : struct viomb_softc *sc = (struct viomb_softc *)arg1;
253 : int s;
254 :
255 0 : s = splbio();
256 0 : viomb_read_config(sc);
257 0 : if (sc->sc_npages > sc->sc_actual){
258 : VIOMBDEBUG(sc, "inflating balloon from %u to %u.\n",
259 : sc->sc_actual, sc->sc_npages);
260 0 : viomb_inflate(sc);
261 0 : }
262 0 : else if (sc->sc_npages < sc->sc_actual){
263 : VIOMBDEBUG(sc, "deflating balloon from %u to %u.\n",
264 : sc->sc_actual, sc->sc_npages);
265 0 : viomb_deflate(sc);
266 0 : }
267 :
268 0 : sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
269 0 : sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;
270 :
271 0 : splx(s);
272 0 : }
273 :
274 : void
275 0 : viomb_inflate(struct viomb_softc *sc)
276 : {
277 0 : struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
278 : struct balloon_req *b;
279 : struct vm_page *p;
280 0 : struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
281 : u_int32_t nvpages;
282 0 : int slot, error, i = 0;
283 :
284 0 : nvpages = sc->sc_npages - sc->sc_actual;
285 0 : if (nvpages > PGS_PER_REQ)
286 : nvpages = PGS_PER_REQ;
287 0 : b = &sc->sc_req;
288 :
289 0 : if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0,
290 0 : dma_constraint.ucr_high,
291 0 : 0, 0, &b->bl_pglist, nvpages,
292 : UVM_PLA_NOWAIT))) {
293 0 : printf("%s unable to allocate %u physmem pages,"
294 0 : "error %d\n", DEVNAME(sc), nvpages, error);
295 0 : return;
296 : }
297 :
298 0 : b->bl_nentries = nvpages;
299 0 : TAILQ_FOREACH(p, &b->bl_pglist, pageq)
300 0 : b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE;
301 :
302 0 : KASSERT(i == nvpages);
303 :
304 0 : if ((virtio_enqueue_prep(vq, &slot)) > 0) {
305 0 : printf("%s:virtio_enqueue_prep() vq_num %d\n",
306 0 : DEVNAME(sc), vq->vq_num);
307 0 : goto err;
308 : }
309 0 : if (virtio_enqueue_reserve(vq, slot, 1)) {
310 0 : printf("%s:virtio_enqueue_reserve vq_num %d\n",
311 0 : DEVNAME(sc), vq->vq_num);
312 0 : goto err;
313 : }
314 0 : bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
315 : sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE);
316 0 : virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
317 : sizeof(u_int32_t) * nvpages, VRING_READ);
318 0 : virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
319 0 : return;
320 : err:
321 0 : uvm_pglistfree(&b->bl_pglist);
322 0 : return;
323 0 : }
324 :
325 : void
326 0 : viomb_deflate(struct viomb_softc *sc)
327 : {
328 0 : struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
329 : struct balloon_req *b;
330 : struct vm_page *p;
331 0 : struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
332 : u_int64_t nvpages;
333 0 : int i, slot;
334 :
335 0 : nvpages = sc->sc_actual - sc->sc_npages;
336 0 : if (nvpages > PGS_PER_REQ)
337 : nvpages = PGS_PER_REQ;
338 0 : b = &sc->sc_req;
339 0 : b->bl_nentries = nvpages;
340 :
341 0 : TAILQ_INIT(&b->bl_pglist);
342 0 : for (i = 0; i < nvpages; i++) {
343 0 : p = TAILQ_FIRST(&sc->sc_balloon_pages);
344 0 : if (p == NULL){
345 0 : b->bl_nentries = i - 1;
346 0 : break;
347 : }
348 0 : TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq);
349 0 : TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq);
350 0 : b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE;
351 : }
352 :
353 0 : if (virtio_enqueue_prep(vq, &slot)) {
354 0 : printf("%s:virtio_get_slot(def) vq_num %d\n",
355 0 : DEVNAME(sc), vq->vq_num);
356 0 : goto err;
357 : }
358 0 : if (virtio_enqueue_reserve(vq, slot, 1)) {
359 0 : printf("%s:virtio_enqueue_reserve() vq_num %d\n",
360 0 : DEVNAME(sc), vq->vq_num);
361 0 : goto err;
362 : }
363 0 : bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
364 : sizeof(u_int32_t) * nvpages,
365 : BUS_DMASYNC_PREWRITE);
366 0 : virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
367 : sizeof(u_int32_t) * nvpages, VRING_READ);
368 :
369 0 : if (!(vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST))
370 0 : uvm_pglistfree(&b->bl_pglist);
371 0 : virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
372 0 : return;
373 : err:
374 0 : while ((p = TAILQ_LAST(&b->bl_pglist, pglist))) {
375 0 : TAILQ_REMOVE(&b->bl_pglist, p, pageq);
376 0 : TAILQ_INSERT_HEAD(&sc->sc_balloon_pages, p, pageq);
377 : }
378 0 : return;
379 0 : }
380 :
381 : void
382 0 : viomb_read_config(struct viomb_softc *sc)
383 : {
384 0 : struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
385 : u_int32_t reg;
386 :
387 : /* these values are explicitly specified as little-endian */
388 0 : reg = virtio_read_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_NUM_PAGES);
389 0 : sc->sc_npages = letoh32(reg);
390 0 : reg = virtio_read_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL);
391 0 : sc->sc_actual = letoh32(reg);
392 : VIOMBDEBUG(sc, "sc->sc_npages %u, sc->sc_actual %u\n",
393 : sc->sc_npages, sc->sc_actual);
394 0 : }
395 :
396 : int
397 0 : viomb_vq_dequeue(struct virtqueue *vq)
398 : {
399 0 : struct virtio_softc *vsc = vq->vq_owner;
400 0 : struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
401 0 : int r, slot;
402 :
403 0 : r = virtio_dequeue(vsc, vq, &slot, NULL);
404 0 : if (r != 0) {
405 0 : printf("%s: dequeue failed, errno %d\n", DEVNAME(sc), r);
406 0 : return(r);
407 : }
408 0 : virtio_dequeue_commit(vq, slot);
409 0 : return(0);
410 0 : }
411 :
412 : /*
413 : * interrupt handling for vq's
414 : */
415 : int
416 0 : viomb_inflate_intr(struct virtqueue *vq)
417 : {
418 0 : struct virtio_softc *vsc = vq->vq_owner;
419 0 : struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
420 : struct balloon_req *b;
421 : struct vm_page *p;
422 : u_int64_t nvpages;
423 :
424 0 : if (viomb_vq_dequeue(vq))
425 0 : return(1);
426 :
427 0 : b = &sc->sc_req;
428 0 : nvpages = b->bl_nentries;
429 0 : bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
430 : sizeof(u_int32_t) * nvpages,
431 : BUS_DMASYNC_POSTWRITE);
432 0 : while (!TAILQ_EMPTY(&b->bl_pglist)) {
433 : p = TAILQ_FIRST(&b->bl_pglist);
434 0 : TAILQ_REMOVE(&b->bl_pglist, p, pageq);
435 0 : TAILQ_INSERT_TAIL(&sc->sc_balloon_pages, p, pageq);
436 : }
437 : VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
438 : sc->sc_actual, sc->sc_actual + nvpages);
439 0 : virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
440 : sc->sc_actual + nvpages);
441 0 : viomb_read_config(sc);
442 :
443 : /* if we have more work to do, add it to the task list */
444 0 : if (sc->sc_npages > sc->sc_actual)
445 0 : task_add(sc->sc_taskq, &sc->sc_task);
446 :
447 0 : return (1);
448 0 : }
449 :
450 : int
451 0 : viomb_deflate_intr(struct virtqueue *vq)
452 : {
453 0 : struct virtio_softc *vsc = vq->vq_owner;
454 0 : struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
455 : struct balloon_req *b;
456 : u_int64_t nvpages;
457 :
458 0 : if (viomb_vq_dequeue(vq))
459 0 : return(1);
460 :
461 0 : b = &sc->sc_req;
462 0 : nvpages = b->bl_nentries;
463 0 : bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
464 : sizeof(u_int32_t) * nvpages,
465 : BUS_DMASYNC_POSTWRITE);
466 :
467 0 : if (vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST)
468 0 : uvm_pglistfree(&b->bl_pglist);
469 :
470 : VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
471 : sc->sc_actual, sc->sc_actual - nvpages);
472 0 : virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
473 : sc->sc_actual - nvpages);
474 0 : viomb_read_config(sc);
475 :
476 : /* if we have more work to do, add it to tasks list */
477 0 : if (sc->sc_npages < sc->sc_actual)
478 0 : task_add(sc->sc_taskq, &sc->sc_task);
479 :
480 0 : return(1);
481 0 : }
|