GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/vmd/vmm.c Lines: 0 311 0.0 %
Date: 2017-11-13 Branches: 0 193 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: vmm.c,v 1.78 2017/10/24 07:58:52 mlarkin Exp $	*/
2
3
/*
4
 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
5
 *
6
 * Permission to use, copy, modify, and distribute this software for any
7
 * purpose with or without fee is hereby granted, provided that the above
8
 * copyright notice and this permission notice appear in all copies.
9
 *
10
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
 */
18
19
#include <sys/param.h>	/* nitems */
20
#include <sys/ioctl.h>
21
#include <sys/queue.h>
22
#include <sys/wait.h>
23
#include <sys/uio.h>
24
#include <sys/socket.h>
25
#include <sys/time.h>
26
#include <sys/mman.h>
27
28
#include <dev/ic/i8253reg.h>
29
#include <dev/isa/isareg.h>
30
#include <dev/pci/pcireg.h>
31
32
#include <machine/param.h>
33
#include <machine/psl.h>
34
#include <machine/specialreg.h>
35
#include <machine/vmmvar.h>
36
37
#include <net/if.h>
38
39
#include <errno.h>
40
#include <event.h>
41
#include <fcntl.h>
42
#include <imsg.h>
43
#include <limits.h>
44
#include <poll.h>
45
#include <pthread.h>
46
#include <stddef.h>
47
#include <stdio.h>
48
#include <stdlib.h>
49
#include <string.h>
50
#include <unistd.h>
51
#include <util.h>
52
53
#include "vmd.h"
54
#include "vmm.h"
55
56
void vmm_sighdlr(int, short, void *);
57
int vmm_start_vm(struct imsg *, uint32_t *);
58
int vmm_receive_vm(struct vmd_vm * , int);
59
int vmm_dispatch_parent(int, struct privsep_proc *, struct imsg *);
60
void vmm_run(struct privsep *, struct privsep_proc *, void *);
61
void vmm_dispatch_vm(int, short, void *);
62
int terminate_vm(struct vm_terminate_params *);
63
int get_info_vm(struct privsep *, struct imsg *, int);
64
int opentap(char *);
65
66
extern struct vmd *env;
67
68
static struct privsep_proc procs[] = {
69
	{ "parent",	PROC_PARENT,	vmm_dispatch_parent  },
70
};
71
72
void
73
vmm(struct privsep *ps, struct privsep_proc *p)
74
{
75
	proc_run(ps, p, procs, nitems(procs), vmm_run, NULL);
76
}
77
78
void
79
vmm_run(struct privsep *ps, struct privsep_proc *p, void *arg)
80
{
81
	if (config_init(ps->ps_env) == -1)
82
		fatal("failed to initialize configuration");
83
84
	signal_del(&ps->ps_evsigchld);
85
	signal_set(&ps->ps_evsigchld, SIGCHLD, vmm_sighdlr, ps);
86
	signal_add(&ps->ps_evsigchld, NULL);
87
88
	/*
89
	 * pledge in the vmm process:
90
	 * stdio - for malloc and basic I/O including events.
91
	 * vmm - for the vmm ioctls and operations.
92
	 * proc - for forking and maitaining vms.
93
	 * send - for sending send/recv fds to vm proc.
94
	 * recvfd - for disks, interfaces and other fds.
95
	 */
96
	if (pledge("stdio vmm sendfd recvfd proc flock rpath cpath wpath", NULL) == -1)
97
		fatal("pledge");
98
99
	/* Get and terminate all running VMs */
100
	get_info_vm(ps, NULL, 1);
101
}
102
103
int
104
vmm_dispatch_parent(int fd, struct privsep_proc *p, struct imsg *imsg)
105
{
106
	struct privsep		*ps = p->p_ps;
107
	int			 res = 0, cmd = 0, verbose, ret;
108
	struct vmd_vm		*vm = NULL;
109
	struct vm_terminate_params vtp;
110
	struct vmop_id		 vid;
111
	struct vmop_result	 vmr;
112
	struct vmop_create_params vmc;
113
	uint32_t		 id = 0;
114
	unsigned int		 mode;
115
116
	switch (imsg->hdr.type) {
117
	case IMSG_VMDOP_START_VM_REQUEST:
118
		res = config_getvm(ps, imsg);
119
		if (res == -1) {
120
			res = errno;
121
			cmd = IMSG_VMDOP_START_VM_RESPONSE;
122
		}
123
		break;
124
	case IMSG_VMDOP_START_VM_DISK:
125
		res = config_getdisk(ps, imsg);
126
		if (res == -1) {
127
			res = errno;
128
			cmd = IMSG_VMDOP_START_VM_RESPONSE;
129
		}
130
		break;
131
	case IMSG_VMDOP_START_VM_IF:
132
		res = config_getif(ps, imsg);
133
		if (res == -1) {
134
			res = errno;
135
			cmd = IMSG_VMDOP_START_VM_RESPONSE;
136
		}
137
		break;
138
	case IMSG_VMDOP_START_VM_END:
139
		res = vmm_start_vm(imsg, &id);
140
		/* Check if the ID can be mapped correctly */
141
		if ((id = vm_id2vmid(id, NULL)) == 0)
142
			res = ENOENT;
143
		cmd = IMSG_VMDOP_START_VM_RESPONSE;
144
		break;
145
	case IMSG_VMDOP_TERMINATE_VM_REQUEST:
146
		IMSG_SIZE_CHECK(imsg, &vtp);
147
		memcpy(&vtp, imsg->data, sizeof(vtp));
148
		id = vtp.vtp_vm_id;
149
		log_debug("%s: recv'ed TERMINATE_VM for %d", __func__, id);
150
151
		if (id == 0) {
152
			res = ENOENT;
153
		} else if ((vm = vm_getbyvmid(id)) != NULL) {
154
			if (vm->vm_shutdown == 0) {
155
				log_debug("%s: sending shutdown req to vm %d",
156
				    __func__, id);
157
158
				/*
159
				 * Request reboot but mark the VM as shutting
160
				 * down. This way we can terminate the VM after
161
				 * the triple fault instead of reboot and
162
				 * avoid being stuck in the ACPI-less powerdown
163
				 * ("press any key to reboot") of the VM.
164
				 */
165
				vm->vm_shutdown = 1;
166
				if (imsg_compose_event(&vm->vm_iev,
167
				    IMSG_VMDOP_VM_REBOOT,
168
				    0, 0, -1, NULL, 0) == -1)
169
					res = errno;
170
				else
171
					res = 0;
172
			} else {
173
				/*
174
				 * VM is currently being shutdown.
175
				 * Check to see if the VM process is still
176
				 * active.  If not, return VMD_VM_STOP_INVALID.
177
				 */
178
				vtp.vtp_vm_id = vm_vmid2id(vm->vm_vmid, vm);
179
				if (vtp.vtp_vm_id == 0) {
180
					log_debug("%s: no vm running anymore",
181
					    __func__);
182
					res = VMD_VM_STOP_INVALID;
183
				}
184
			}
185
		} else {
186
			/* vm doesn't exist, cannot stop vm */
187
			log_debug("%s: cannot stop vm that is not running",
188
			    __func__);
189
			res = VMD_VM_STOP_INVALID;
190
		}
191
		cmd = IMSG_VMDOP_TERMINATE_VM_RESPONSE;
192
		break;
193
	case IMSG_VMDOP_GET_INFO_VM_REQUEST:
194
		res = get_info_vm(ps, imsg, 0);
195
		cmd = IMSG_VMDOP_GET_INFO_VM_END_DATA;
196
		break;
197
	case IMSG_VMDOP_CONFIG:
198
		config_getconfig(env, imsg);
199
		break;
200
	case IMSG_CTL_RESET:
201
		IMSG_SIZE_CHECK(imsg, &mode);
202
		memcpy(&mode, imsg->data, sizeof(mode));
203
204
		if (mode & CONFIG_VMS) {
205
			/* Terminate and remove all VMs */
206
			vmm_shutdown();
207
			mode &= ~CONFIG_VMS;
208
		}
209
210
		config_getreset(env, imsg);
211
		break;
212
	case IMSG_CTL_VERBOSE:
213
		IMSG_SIZE_CHECK(imsg, &verbose);
214
		memcpy(&verbose, imsg->data, sizeof(verbose));
215
		log_setverbose(verbose);
216
217
		/* Forward message to each VM process */
218
		TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
219
			imsg_compose_event(&vm->vm_iev,
220
			    imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
221
			    -1, &verbose, sizeof(verbose));
222
		}
223
		break;
224
	case IMSG_VMDOP_PAUSE_VM:
225
		IMSG_SIZE_CHECK(imsg, &vid);
226
		memcpy(&vid, imsg->data, sizeof(vid));
227
		id = vid.vid_id;
228
		vm = vm_getbyvmid(id);
229
		if ((vm = vm_getbyvmid(id)) == NULL) {
230
			res = ENOENT;
231
			cmd = IMSG_VMDOP_PAUSE_VM_RESPONSE;
232
			break;
233
		}
234
		imsg_compose_event(&vm->vm_iev,
235
		    imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
236
		    imsg->fd, &vid, sizeof(vid));
237
		break;
238
	case IMSG_VMDOP_UNPAUSE_VM:
239
		IMSG_SIZE_CHECK(imsg, &vid);
240
		memcpy(&vid, imsg->data, sizeof(vid));
241
		id = vid.vid_id;
242
		if ((vm = vm_getbyvmid(id)) == NULL) {
243
			res = ENOENT;
244
			cmd = IMSG_VMDOP_UNPAUSE_VM_RESPONSE;
245
			break;
246
		}
247
		imsg_compose_event(&vm->vm_iev,
248
		    imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
249
		    imsg->fd, &vid, sizeof(vid));
250
		break;
251
	case IMSG_VMDOP_SEND_VM_REQUEST:
252
		IMSG_SIZE_CHECK(imsg, &vid);
253
		memcpy(&vid, imsg->data, sizeof(vid));
254
		id = vid.vid_id;
255
		if ((vm = vm_getbyvmid(id)) == NULL) {
256
			res = ENOENT;
257
			close(imsg->fd);
258
			cmd = IMSG_VMDOP_START_VM_RESPONSE;
259
			break;
260
		}
261
		imsg_compose_event(&vm->vm_iev,
262
		    imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
263
		    imsg->fd, &vid, sizeof(vid));
264
		break;
265
	case IMSG_VMDOP_RECEIVE_VM_REQUEST:
266
		IMSG_SIZE_CHECK(imsg, &vmc);
267
		memcpy(&vmc, imsg->data, sizeof(vmc));
268
		ret = vm_register(ps, &vmc, &vm, imsg->hdr.peerid, vmc.vmc_uid);
269
		vm->vm_tty = imsg->fd;
270
		vm->vm_received = 1;
271
		break;
272
	case IMSG_VMDOP_RECEIVE_VM_END:
273
		if ((vm = vm_getbyvmid(imsg->hdr.peerid)) == NULL) {
274
			res = ENOENT;
275
			close(imsg->fd);
276
			cmd = IMSG_VMDOP_START_VM_RESPONSE;
277
			break;
278
		}
279
		vm->vm_receive_fd = imsg->fd;
280
		res = vmm_start_vm(imsg, &id);
281
		/* Check if the ID can be mapped correctly */
282
		if ((id = vm_id2vmid(id, NULL)) == 0)
283
			res = ENOENT;
284
		cmd = IMSG_VMDOP_START_VM_RESPONSE;
285
		break;
286
	default:
287
		return (-1);
288
	}
289
290
	switch (cmd) {
291
	case 0:
292
		break;
293
	case IMSG_VMDOP_START_VM_RESPONSE:
294
		if (res != 0) {
295
			/* Remove local reference if it exists */
296
			if ((vm = vm_getbyvmid(imsg->hdr.peerid)) != NULL) {
297
				log_debug("%s: removing vm, START_VM_RESPONSE",
298
				    __func__);
299
				vm_remove(vm);
300
			}
301
		}
302
		if (id == 0)
303
			id = imsg->hdr.peerid;
304
	case IMSG_VMDOP_PAUSE_VM_RESPONSE:
305
	case IMSG_VMDOP_UNPAUSE_VM_RESPONSE:
306
	case IMSG_VMDOP_TERMINATE_VM_RESPONSE:
307
		memset(&vmr, 0, sizeof(vmr));
308
		vmr.vmr_result = res;
309
		vmr.vmr_id = id;
310
		if (proc_compose_imsg(ps, PROC_PARENT, -1, cmd,
311
		    imsg->hdr.peerid, -1, &vmr, sizeof(vmr)) == -1)
312
			return (-1);
313
		break;
314
	default:
315
		if (proc_compose_imsg(ps, PROC_PARENT, -1, cmd,
316
		    imsg->hdr.peerid, -1, &res, sizeof(res)) == -1)
317
			return (-1);
318
		break;
319
	}
320
321
	return (0);
322
}
323
324
void
325
vmm_sighdlr(int sig, short event, void *arg)
326
{
327
	struct privsep *ps = arg;
328
	int status, ret = 0;
329
	uint32_t vmid;
330
	pid_t pid;
331
	struct vmop_result vmr;
332
	struct vmd_vm *vm;
333
	struct vm_terminate_params vtp;
334
335
	log_debug("%s: handling signal %d", __func__, sig);
336
	switch (sig) {
337
	case SIGCHLD:
338
		do {
339
			pid = waitpid(-1, &status, WNOHANG);
340
			if (pid <= 0)
341
				continue;
342
343
			if (WIFEXITED(status) || WIFSIGNALED(status)) {
344
				vm = vm_getbypid(pid);
345
				if (vm == NULL) {
346
					/*
347
					 * If the VM is gone already, it
348
					 * got terminated via a
349
					 * IMSG_VMDOP_TERMINATE_VM_REQUEST.
350
					 */
351
					continue;
352
				}
353
354
				if (WIFEXITED(status))
355
					ret = WEXITSTATUS(status);
356
357
				/* don't reboot on pending shutdown */
358
				if (ret == EAGAIN && vm->vm_shutdown)
359
					ret = 0;
360
361
				vmid = vm->vm_params.vmc_params.vcp_id;
362
				vtp.vtp_vm_id = vmid;
363
				log_debug("%s: attempting to terminate vm %d",
364
				    __func__, vm->vm_vmid);
365
				if (terminate_vm(&vtp) == 0) {
366
					memset(&vmr, 0, sizeof(vmr));
367
					vmr.vmr_result = ret;
368
					vmr.vmr_id = vm_id2vmid(vmid, vm);
369
					if (proc_compose_imsg(ps, PROC_PARENT,
370
					    -1, IMSG_VMDOP_TERMINATE_VM_EVENT,
371
					    0, -1, &vmr, sizeof(vmr)) == -1)
372
						log_warnx("could not signal "
373
						    "termination of VM %u to "
374
						    "parent", vm->vm_vmid);
375
				} else
376
					log_warnx("could not terminate VM %u",
377
					    vm->vm_vmid);
378
379
				log_debug("%s: calling vm_remove", __func__);
380
				vm_remove(vm);
381
			} else
382
				fatalx("unexpected cause of SIGCHLD");
383
		} while (pid > 0 || (pid == -1 && errno == EINTR));
384
		break;
385
	default:
386
		fatalx("unexpected signal");
387
	}
388
}
389
390
/*
391
 * vmm_shutdown
392
 *
393
 * Terminate VMs on shutdown to avoid "zombie VM" processes.
394
 */
395
void
396
vmm_shutdown(void)
397
{
398
	struct vm_terminate_params vtp;
399
	struct vmd_vm *vm, *vm_next;
400
401
	TAILQ_FOREACH_SAFE(vm, env->vmd_vms, vm_entry, vm_next) {
402
		vtp.vtp_vm_id = vm_vmid2id(vm->vm_vmid, vm);
403
404
		/* XXX suspend or request graceful shutdown */
405
		(void)terminate_vm(&vtp);
406
		log_debug("%s: calling vm_remove", __func__);
407
		vm_remove(vm);
408
	}
409
}
410
411
/*
412
 * vmm_pipe
413
 *
414
 * Create a new imsg control channel between vmm parent and a VM
415
 * (can be called on both sides).
416
 */
417
int
418
vmm_pipe(struct vmd_vm *vm, int fd, void (*cb)(int, short, void *))
419
{
420
	struct imsgev	*iev = &vm->vm_iev;
421
422
	if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) {
423
		log_warn("failed to set nonblocking mode on vm pipe");
424
		return (-1);
425
	}
426
427
	imsg_init(&iev->ibuf, fd);
428
	iev->handler = cb;
429
	iev->data = vm;
430
	imsg_event_add(iev);
431
432
	return (0);
433
}
434
435
/*
436
 * vmm_dispatch_vm
437
 *
438
 * imsg callback for messages that are received from a VM child process.
439
 */
440
void
441
vmm_dispatch_vm(int fd, short event, void *arg)
442
{
443
	struct vmd_vm		*vm = arg;
444
	struct vmop_result	 vmr;
445
	struct imsgev		*iev = &vm->vm_iev;
446
	struct imsgbuf		*ibuf = &iev->ibuf;
447
	struct imsg		 imsg;
448
	ssize_t			 n;
449
	unsigned int		 i;
450
451
	if (event & EV_READ) {
452
		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
453
			fatal("%s: imsg_read", __func__);
454
		if (n == 0) {
455
			/* this pipe is dead, so remove the event handler */
456
			event_del(&iev->ev);
457
			return;
458
		}
459
	}
460
461
	if (event & EV_WRITE) {
462
		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
463
			fatal("%s: msgbuf_write fd %d", __func__, ibuf->fd);
464
		if (n == 0) {
465
			/* this pipe is dead, so remove the event handler */
466
			event_del(&iev->ev);
467
			return;
468
		}
469
	}
470
471
	for (;;) {
472
		if ((n = imsg_get(ibuf, &imsg)) == -1)
473
			fatal("%s: imsg_get", __func__);
474
		if (n == 0)
475
			break;
476
477
		dprintf("%s: got imsg %d from %s",
478
		    __func__, imsg.hdr.type,
479
		    vm->vm_params.vmc_params.vcp_name);
480
481
		switch (imsg.hdr.type) {
482
		case IMSG_VMDOP_VM_SHUTDOWN:
483
			vm->vm_shutdown = 1;
484
			break;
485
		case IMSG_VMDOP_VM_REBOOT:
486
			vm->vm_shutdown = 0;
487
			break;
488
		case IMSG_VMDOP_SEND_VM_RESPONSE:
489
			IMSG_SIZE_CHECK(&imsg, &vmr);
490
			memcpy(&vmr, imsg.data, sizeof(vmr));
491
			if (!vmr.vmr_result) {
492
				log_debug("%s: calling vm_remove", __func__);
493
				vm_remove(vm);
494
			}
495
		case IMSG_VMDOP_PAUSE_VM_RESPONSE:
496
		case IMSG_VMDOP_UNPAUSE_VM_RESPONSE:
497
			for (i = 0; i < sizeof(procs); i++) {
498
				if (procs[i].p_id == PROC_PARENT) {
499
					proc_forward_imsg(procs[i].p_ps,
500
					    &imsg, PROC_PARENT, -1);
501
					break;
502
				}
503
			}
504
			break;
505
506
		default:
507
			fatalx("%s: got invalid imsg %d from %s",
508
			    __func__, imsg.hdr.type,
509
			    vm->vm_params.vmc_params.vcp_name);
510
		}
511
		imsg_free(&imsg);
512
	}
513
	imsg_event_add(iev);
514
}
515
516
/*
517
 * terminate_vm
518
 *
519
 * Requests vmm(4) to terminate the VM whose ID is provided in the
520
 * supplied vm_terminate_params structure (vtp->vtp_vm_id)
521
 *
522
 * Parameters
523
 *  vtp: vm_terminate_params struct containing the ID of the VM to terminate
524
 *
525
 * Return values:
526
 *  0: success
527
 *  !0 : ioctl to vmm(4) failed (eg, ENOENT if the supplied VM is not
528
 *      valid)
529
 */
530
int
531
terminate_vm(struct vm_terminate_params *vtp)
532
{
533
	log_debug("%s: terminating vmid %d", __func__, vtp->vtp_vm_id);
534
	if (ioctl(env->vmd_fd, VMM_IOC_TERM, vtp) < 0)
535
		return (errno);
536
537
	return (0);
538
}
539
540
/*
541
 * opentap
542
 *
543
 * Opens the next available tap device, up to MAX_TAP.
544
 *
545
 * Parameters
546
 *  ifname: an optional buffer of at least IF_NAMESIZE bytes.
547
 *
548
 * Returns a file descriptor to the tap node opened, or -1 if no tap
549
 * devices were available.
550
 */
551
int
552
opentap(char *ifname)
553
{
554
	int i, fd;
555
	char path[PATH_MAX];
556
557
	strlcpy(ifname, "tap", IF_NAMESIZE);
558
	for (i = 0; i < MAX_TAP; i++) {
559
		snprintf(path, PATH_MAX, "/dev/tap%d", i);
560
		fd = open(path, O_RDWR | O_NONBLOCK);
561
		if (fd != -1) {
562
			if (ifname != NULL)
563
				snprintf(ifname, IF_NAMESIZE, "tap%d", i);
564
			return (fd);
565
		}
566
	}
567
568
	return (-1);
569
}
570
571
/*
572
 * vmm_start_vm
573
 *
574
 * Prepares and forks a new VM process.
575
 *
576
 * Parameters:
577
 *  imsg: The VM data structure that is including the VM create parameters.
578
 *  id: Returns the VM id as reported by the kernel and obtained from the VM.
579
 *
580
 * Return values:
581
 *  0: success
582
 *  !0 : failure - typically an errno indicating the source of the failure
583
 */
584
int
585
vmm_start_vm(struct imsg *imsg, uint32_t *id)
586
{
587
	struct vm_create_params	*vcp;
588
	struct vmd_vm		*vm;
589
	int			 ret = EINVAL;
590
	int			 fds[2];
591
	size_t			 i;
592
593
	if ((vm = vm_getbyvmid(imsg->hdr.peerid)) == NULL) {
594
		log_warnx("%s: can't find vm", __func__);
595
		ret = ENOENT;
596
		goto err;
597
	}
598
	vcp = &vm->vm_params.vmc_params;
599
600
	if (!vm->vm_received) {
601
		if ((vm->vm_tty = imsg->fd) == -1) {
602
			log_warnx("%s: can't get tty", __func__);
603
			goto err;
604
		}
605
	}
606
607
	if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, fds) == -1)
608
		fatal("socketpair");
609
610
	/* Start child vmd for this VM (fork, chroot, drop privs) */
611
	ret = fork();
612
613
	/* Start child failed? - cleanup and leave */
614
	if (ret == -1) {
615
		log_warnx("%s: start child failed", __func__);
616
		ret = EIO;
617
		goto err;
618
	}
619
620
	if (ret > 0) {
621
		/* Parent */
622
		vm->vm_pid = ret;
623
		close(fds[1]);
624
625
		for (i = 0 ; i < vcp->vcp_ndisks; i++) {
626
			close(vm->vm_disks[i]);
627
			vm->vm_disks[i] = -1;
628
		}
629
630
		for (i = 0 ; i < vcp->vcp_nnics; i++) {
631
			close(vm->vm_ifs[i].vif_fd);
632
			vm->vm_ifs[i].vif_fd = -1;
633
		}
634
635
		close(vm->vm_kernel);
636
		vm->vm_kernel = -1;
637
638
		close(vm->vm_tty);
639
		vm->vm_tty = -1;
640
641
		/* read back the kernel-generated vm id from the child */
642
		if (read(fds[0], &vcp->vcp_id, sizeof(vcp->vcp_id)) !=
643
		    sizeof(vcp->vcp_id))
644
			fatal("read vcp id");
645
646
		if (vcp->vcp_id == 0)
647
			goto err;
648
649
		*id = vcp->vcp_id;
650
651
		if (vmm_pipe(vm, fds[0], vmm_dispatch_vm) == -1)
652
			fatal("setup vm pipe");
653
654
		return (0);
655
	} else {
656
		/* Child */
657
		close(fds[0]);
658
659
		ret = start_vm(vm, fds[1]);
660
661
		_exit(ret);
662
	}
663
664
	return (0);
665
666
 err:
667
	log_debug("%s: calling vm_remove", __func__);
668
	vm_remove(vm);
669
670
	return (ret);
671
}
672
673
/*
674
 * get_info_vm
675
 *
676
 * Returns a list of VMs known to vmm(4).
677
 *
678
 * Parameters:
679
 *  ps: the privsep context.
680
 *  imsg: the received imsg including the peer id.
681
 *  terminate: terminate the listed vm.
682
 *
683
 * Return values:
684
 *  0: success
685
 *  !0 : failure (eg, ENOMEM, EIO or another error code from vmm(4) ioctl)
686
 */
687
int
688
get_info_vm(struct privsep *ps, struct imsg *imsg, int terminate)
689
{
690
	int ret;
691
	size_t ct, i;
692
	struct vm_info_params vip;
693
	struct vm_info_result *info;
694
	struct vm_terminate_params vtp;
695
	struct vmop_info_result vir;
696
697
	/*
698
	 * We issue the VMM_IOC_INFO ioctl twice, once with an input
699
	 * buffer size of 0, which results in vmm(4) returning the
700
	 * number of bytes required back to us in vip.vip_size,
701
	 * and then we call it again after malloc'ing the required
702
	 * number of bytes.
703
	 *
704
	 * It is possible that we could fail a second time (eg, if
705
	 * another VM was created in the instant between the two
706
	 * ioctls, but in that case the caller can just try again
707
	 * as vmm(4) will return a zero-sized list in that case.
708
	 */
709
	vip.vip_size = 0;
710
	info = NULL;
711
	ret = 0;
712
	memset(&vir, 0, sizeof(vir));
713
714
	/* First ioctl to see how many bytes needed (vip.vip_size) */
715
	if (ioctl(env->vmd_fd, VMM_IOC_INFO, &vip) < 0)
716
		return (errno);
717
718
	if (vip.vip_info_ct != 0)
719
		return (EIO);
720
721
	info = malloc(vip.vip_size);
722
	if (info == NULL)
723
		return (ENOMEM);
724
725
	/* Second ioctl to get the actual list */
726
	vip.vip_info = info;
727
	if (ioctl(env->vmd_fd, VMM_IOC_INFO, &vip) < 0) {
728
		ret = errno;
729
		free(info);
730
		return (ret);
731
	}
732
733
	/* Return info */
734
	ct = vip.vip_size / sizeof(struct vm_info_result);
735
	for (i = 0; i < ct; i++) {
736
		if (terminate) {
737
			vtp.vtp_vm_id = info[i].vir_id;
738
			if ((ret = terminate_vm(&vtp)) != 0)
739
				return (ret);
740
			log_debug("%s: terminated VM %s (id %d)", __func__,
741
			    info[i].vir_name, info[i].vir_id);
742
			continue;
743
		}
744
		memcpy(&vir.vir_info, &info[i], sizeof(vir.vir_info));
745
		vir.vir_info.vir_id = vm_id2vmid(info[i].vir_id, NULL);
746
		if (proc_compose_imsg(ps, PROC_PARENT, -1,
747
		    IMSG_VMDOP_GET_INFO_VM_DATA, imsg->hdr.peerid, -1,
748
		    &vir, sizeof(vir)) == -1)
749
			return (EIO);
750
	}
751
	free(info);
752
	return (0);
753
}