GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: usr.sbin/vmd/vm.c Lines: 0 676 0.0 %
Date: 2017-11-07 Branches: 0 387 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: vm.c,v 1.28 2017/09/19 06:22:30 mlarkin Exp $	*/
2
3
/*
4
 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
5
 *
6
 * Permission to use, copy, modify, and distribute this software for any
7
 * purpose with or without fee is hereby granted, provided that the above
8
 * copyright notice and this permission notice appear in all copies.
9
 *
10
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
 */
18
19
#include <sys/types.h>
20
#include <sys/ioctl.h>
21
#include <sys/queue.h>
22
#include <sys/wait.h>
23
#include <sys/uio.h>
24
#include <sys/socket.h>
25
#include <sys/time.h>
26
#include <sys/mman.h>
27
28
#include <dev/ic/i8253reg.h>
29
#include <dev/isa/isareg.h>
30
#include <dev/pci/pcireg.h>
31
32
#include <machine/param.h>
33
#include <machine/psl.h>
34
#include <machine/specialreg.h>
35
#include <machine/vmmvar.h>
36
37
#include <net/if.h>
38
39
#include <errno.h>
40
#include <event.h>
41
#include <fcntl.h>
42
#include <imsg.h>
43
#include <limits.h>
44
#include <poll.h>
45
#include <pthread.h>
46
#include <stddef.h>
47
#include <stdio.h>
48
#include <stdlib.h>
49
#include <string.h>
50
#include <unistd.h>
51
#include <util.h>
52
53
#include "vmd.h"
54
#include "vmm.h"
55
#include "loadfile.h"
56
#include "pci.h"
57
#include "virtio.h"
58
#include "proc.h"
59
#include "i8253.h"
60
#include "i8259.h"
61
#include "ns8250.h"
62
#include "mc146818.h"
63
#include "atomicio.h"
64
65
io_fn_t ioports_map[MAX_PORTS];
66
67
int run_vm(int *, int *, struct vmop_create_params *, struct vcpu_reg_state *);
68
void vm_dispatch_vmm(int, short, void *);
69
void *event_thread(void *);
70
void *vcpu_run_loop(void *);
71
int vcpu_exit(struct vm_run_params *);
72
int vcpu_reset(uint32_t, uint32_t, struct vcpu_reg_state *);
73
void create_memory_map(struct vm_create_params *);
74
int alloc_guest_mem(struct vm_create_params *);
75
int vmm_create_vm(struct vm_create_params *);
76
void init_emulated_hw(struct vmop_create_params *, int *, int *);
77
void restore_emulated_hw(struct vm_create_params *,int , int *, int *);
78
void vcpu_exit_inout(struct vm_run_params *);
79
uint8_t vcpu_exit_pci(struct vm_run_params *);
80
int vcpu_pic_intr(uint32_t, uint32_t, uint8_t);
81
int loadfile_bios(FILE *, struct vcpu_reg_state *);
82
int send_vm(int, struct vm_create_params *);
83
int dump_send_header(int);
84
int dump_vmr(int , struct vm_mem_range *);
85
int dump_mem(int, struct vm_create_params *);
86
void restore_vmr(int, struct vm_mem_range *);
87
void restore_mem(int, struct vm_create_params *);
88
void pause_vm(struct vm_create_params *);
89
void unpause_vm(struct vm_create_params *);
90
91
static struct vm_mem_range *find_gpa_range(struct vm_create_params *, paddr_t,
92
    size_t);
93
94
int con_fd;
95
struct vmd_vm *current_vm;
96
97
extern struct vmd *env;
98
99
extern char *__progname;
100
101
pthread_mutex_t threadmutex;
102
pthread_cond_t threadcond;
103
104
pthread_cond_t vcpu_run_cond[VMM_MAX_VCPUS_PER_VM];
105
pthread_mutex_t vcpu_run_mtx[VMM_MAX_VCPUS_PER_VM];
106
uint8_t vcpu_hlt[VMM_MAX_VCPUS_PER_VM];
107
uint8_t vcpu_done[VMM_MAX_VCPUS_PER_VM];
108
109
/*
110
 * Represents a standard register set for an OS to be booted
111
 * as a flat 32 bit address space, before paging is enabled.
112
 *
113
 * NOT set here are:
114
 *  RIP
115
 *  RSP
116
 *  GDTR BASE
117
 *
118
 * Specific bootloaders should clone this structure and override
119
 * those fields as needed.
120
 *
121
 * Note - CR3 and various bits in CR0 may be overridden by vmm(4) based on
122
 *        features of the CPU in use.
123
 */
124
static const struct vcpu_reg_state vcpu_init_flat32 = {
125
#ifdef __i386__
126
	.vrs_gprs[VCPU_REGS_EFLAGS] = 0x2,
127
	.vrs_gprs[VCPU_REGS_EIP] = 0x0,
128
	.vrs_gprs[VCPU_REGS_ESP] = 0x0,
129
#else
130
	.vrs_gprs[VCPU_REGS_RFLAGS] = 0x2,
131
	.vrs_gprs[VCPU_REGS_RIP] = 0x0,
132
	.vrs_gprs[VCPU_REGS_RSP] = 0x0,
133
#endif
134
	.vrs_crs[VCPU_REGS_CR0] = CR0_CD | CR0_NW | CR0_ET | CR0_PE | CR0_PG,
135
	.vrs_crs[VCPU_REGS_CR3] = PML4_PAGE,
136
	.vrs_sregs[VCPU_REGS_CS] = { 0x8, 0xFFFFFFFF, 0xC09F, 0x0},
137
	.vrs_sregs[VCPU_REGS_DS] = { 0x10, 0xFFFFFFFF, 0xC093, 0x0},
138
	.vrs_sregs[VCPU_REGS_ES] = { 0x10, 0xFFFFFFFF, 0xC093, 0x0},
139
	.vrs_sregs[VCPU_REGS_FS] = { 0x10, 0xFFFFFFFF, 0xC093, 0x0},
140
	.vrs_sregs[VCPU_REGS_GS] = { 0x10, 0xFFFFFFFF, 0xC093, 0x0},
141
	.vrs_sregs[VCPU_REGS_SS] = { 0x10, 0xFFFFFFFF, 0xC093, 0x0},
142
	.vrs_gdtr = { 0x0, 0xFFFF, 0x0, 0x0},
143
	.vrs_idtr = { 0x0, 0xFFFF, 0x0, 0x0},
144
	.vrs_sregs[VCPU_REGS_LDTR] = { 0x0, 0xFFFF, 0x0082, 0x0},
145
	.vrs_sregs[VCPU_REGS_TR] = { 0x0, 0xFFFF, 0x008B, 0x0},
146
	.vrs_msrs[VCPU_REGS_EFER] = 0ULL,
147
#ifndef __i386__
148
	.vrs_msrs[VCPU_REGS_STAR] = 0ULL,
149
	.vrs_msrs[VCPU_REGS_LSTAR] = 0ULL,
150
	.vrs_msrs[VCPU_REGS_CSTAR] = 0ULL,
151
	.vrs_msrs[VCPU_REGS_SFMASK] = 0ULL,
152
	.vrs_msrs[VCPU_REGS_KGSBASE] = 0ULL,
153
	.vrs_msrs[VCPU_REGS_MISC_ENABLE] = 0ULL,
154
	.vrs_crs[VCPU_REGS_XCR0] = XCR0_X87
155
#endif
156
};
157
158
/*
159
 * Represents a standard register set for an BIOS to be booted
160
 * as a flat 16 bit address space.
161
 */
162
static const struct vcpu_reg_state vcpu_init_flat16 = {
163
#ifdef __i386__
164
	.vrs_gprs[VCPU_REGS_EFLAGS] = 0x2,
165
	.vrs_gprs[VCPU_REGS_EIP] = 0xFFF0,
166
	.vrs_gprs[VCPU_REGS_ESP] = 0x0,
167
#else
168
	.vrs_gprs[VCPU_REGS_RFLAGS] = 0x2,
169
	.vrs_gprs[VCPU_REGS_RIP] = 0xFFF0,
170
	.vrs_gprs[VCPU_REGS_RSP] = 0x0,
171
#endif
172
	.vrs_crs[VCPU_REGS_CR0] = 0x60000010,
173
	.vrs_crs[VCPU_REGS_CR3] = 0,
174
	.vrs_sregs[VCPU_REGS_CS] = { 0xF000, 0xFFFF, 0x809F, 0xF0000},
175
	.vrs_sregs[VCPU_REGS_DS] = { 0x0, 0xFFFF, 0x8093, 0x0},
176
	.vrs_sregs[VCPU_REGS_ES] = { 0x0, 0xFFFF, 0x8093, 0x0},
177
	.vrs_sregs[VCPU_REGS_FS] = { 0x0, 0xFFFF, 0x8093, 0x0},
178
	.vrs_sregs[VCPU_REGS_GS] = { 0x0, 0xFFFF, 0x8093, 0x0},
179
	.vrs_sregs[VCPU_REGS_SS] = { 0x0, 0xFFFF, 0x8093, 0x0},
180
	.vrs_gdtr = { 0x0, 0xFFFF, 0x0, 0x0},
181
	.vrs_idtr = { 0x0, 0xFFFF, 0x0, 0x0},
182
	.vrs_sregs[VCPU_REGS_LDTR] = { 0x0, 0xFFFF, 0x0082, 0x0},
183
	.vrs_sregs[VCPU_REGS_TR] = { 0x0, 0xFFFF, 0x008B, 0x0},
184
	.vrs_msrs[VCPU_REGS_EFER] = 0ULL,
185
#ifndef __i386__
186
	.vrs_msrs[VCPU_REGS_STAR] = 0ULL,
187
	.vrs_msrs[VCPU_REGS_LSTAR] = 0ULL,
188
	.vrs_msrs[VCPU_REGS_CSTAR] = 0ULL,
189
	.vrs_msrs[VCPU_REGS_SFMASK] = 0ULL,
190
	.vrs_msrs[VCPU_REGS_KGSBASE] = 0ULL,
191
	.vrs_crs[VCPU_REGS_XCR0] = XCR0_X87
192
#endif
193
};
194
195
/*
196
 * loadfile_bios
197
 *
198
 * Alternatively to loadfile_elf, this function loads a non-ELF BIOS image
199
 * directly into memory.
200
 *
201
 * Parameters:
202
 *  fp: file of a kernel file to load
203
 *  (out) vrs: register state to set on init for this kernel
204
 *
205
 * Return values:
206
 *  0 if successful
207
 *  various error codes returned from read(2) or loadelf functions
208
 */
209
int
210
loadfile_bios(FILE *fp, struct vcpu_reg_state *vrs)
211
{
212
	off_t	 size, off;
213
214
	/* Set up a "flat 16 bit" register state for BIOS */
215
	memcpy(vrs, &vcpu_init_flat16, sizeof(*vrs));
216
217
	/* Get the size of the BIOS image and seek to the beginning */
218
	if (fseeko(fp, 0, SEEK_END) == -1 || (size = ftello(fp)) == -1 ||
219
	    fseeko(fp, 0, SEEK_SET) == -1)
220
		return (-1);
221
222
	/* The BIOS image must end at 1M */
223
	if ((off = 1048576 - size) < 0)
224
		return (-1);
225
226
	/* Read BIOS image into memory */
227
	if (mread(fp, off, size) != (size_t)size) {
228
		errno = EIO;
229
		return (-1);
230
	}
231
232
	log_debug("%s: loaded BIOS image", __func__);
233
234
	return (0);
235
}
236
237
/*
238
 * start_vm
239
 *
240
 * After forking a new VM process, starts the new VM with the creation
241
 * parameters supplied (in the incoming vm->vm_params field). This
242
 * function performs a basic sanity check on the incoming parameters
243
 * and then performs the following steps to complete the creation of the VM:
244
 *
245
 * 1. validates and create the new VM
246
 * 2. opens the imsg control channel to the parent and drops more privilege
247
 * 3. drops additional privleges by calling pledge(2)
248
 * 4. loads the kernel from the disk image or file descriptor
249
 * 5. runs the VM's VCPU loops.
250
 *
251
 * Parameters:
252
 *  vm: The VM data structure that is including the VM create parameters.
253
 *  fd: The imsg socket that is connected to the parent process.
254
 *
255
 * Return values:
256
 *  0: success
257
 *  !0 : failure - typically an errno indicating the source of the failure
258
 */
259
int
260
start_vm(struct vmd_vm *vm, int fd)
261
{
262
	struct vm_create_params	*vcp = &vm->vm_params.vmc_params;
263
	struct vcpu_reg_state	 vrs;
264
	int			 nicfds[VMM_MAX_NICS_PER_VM];
265
	int			 ret;
266
	FILE			*fp;
267
	struct vmboot_params	 vmboot;
268
	size_t			 i;
269
	struct vm_rwregs_params  vrp;
270
271
	/* Child */
272
	setproctitle("%s", vcp->vcp_name);
273
	log_procinit(vcp->vcp_name);
274
275
	if (!vm->vm_received)
276
		create_memory_map(vcp);
277
278
	ret = alloc_guest_mem(vcp);
279
280
	if (ret) {
281
		errno = ret;
282
		fatal("could not allocate guest memory - exiting");
283
	}
284
285
	ret = vmm_create_vm(vcp);
286
	current_vm = vm;
287
288
	/* send back the kernel-generated vm id (0 on error) */
289
	if (write(fd, &vcp->vcp_id, sizeof(vcp->vcp_id)) !=
290
	    sizeof(vcp->vcp_id))
291
		fatal("write vcp id");
292
293
	if (ret) {
294
		errno = ret;
295
		fatal("create vmm ioctl failed - exiting");
296
	}
297
298
	/*
299
	 * pledge in the vm processes:
300
	 * stdio - for malloc and basic I/O including events.
301
	 * recvfd - for send/recv.
302
	 * vmm - for the vmm ioctls and operations.
303
	 */
304
	if (pledge("stdio vmm recvfd flock rpath cpath wpath", NULL) == -1)
305
		fatal("pledge");
306
307
	if (vm->vm_received) {
308
		ret = read(vm->vm_receive_fd, &vrp, sizeof(vrp));
309
		if (ret != sizeof(vrp)) {
310
			fatal("received incomplete vrp - exiting");
311
		}
312
		vrs = vrp.vrwp_regs;
313
	} else {
314
		/*
315
		 * Set up default "flat 32 bit" register state - RIP,
316
		 * RSP, and GDT info will be set in bootloader
317
		 */
318
		memcpy(&vrs, &vcpu_init_flat32, sizeof(vrs));
319
320
		/* Find and open kernel image */
321
		if ((fp = vmboot_open(vm->vm_kernel,
322
		    vm->vm_disks[0], &vmboot)) == NULL)
323
			fatalx("failed to open kernel - exiting");
324
325
		/* Load kernel image */
326
		ret = loadfile_elf(fp, vcp, &vrs,
327
		    vmboot.vbp_bootdev, vmboot.vbp_howto);
328
329
		/*
330
		 * Try BIOS as a fallback (only if it was provided as an image
331
		 * with vm->vm_kernel and not loaded from the disk)
332
		 */
333
		if (ret && errno == ENOEXEC && vm->vm_kernel != -1)
334
			ret = loadfile_bios(fp, &vrs);
335
336
		if (ret)
337
			fatal("failed to load kernel or BIOS - exiting");
338
339
		vmboot_close(fp, &vmboot);
340
	}
341
342
	if (vm->vm_kernel != -1)
343
		close(vm->vm_kernel);
344
345
	con_fd = vm->vm_tty;
346
	if (fcntl(con_fd, F_SETFL, O_NONBLOCK) == -1)
347
		fatal("failed to set nonblocking mode on console");
348
349
	for (i = 0; i < VMM_MAX_NICS_PER_VM; i++)
350
		nicfds[i] = vm->vm_ifs[i].vif_fd;
351
352
	event_init();
353
354
	if (vm->vm_received) {
355
		restore_emulated_hw(vcp, vm->vm_receive_fd, nicfds,
356
		    vm->vm_disks);
357
		mc146818_start();
358
		restore_mem(vm->vm_receive_fd, vcp);
359
	}
360
361
	if (vmm_pipe(vm, fd, vm_dispatch_vmm) == -1)
362
		fatal("setup vm pipe");
363
364
	/* Execute the vcpu run loop(s) for this VM */
365
	ret = run_vm(vm->vm_disks, nicfds, &vm->vm_params, &vrs);
366
367
	return (ret);
368
}
369
370
/*
371
 * vm_dispatch_vmm
372
 *
373
 * imsg callback for messages that are received from the vmm parent process.
374
 */
375
void
376
vm_dispatch_vmm(int fd, short event, void *arg)
377
{
378
	struct vmd_vm		*vm = arg;
379
	struct vmop_result	 vmr;
380
	struct imsgev		*iev = &vm->vm_iev;
381
	struct imsgbuf		*ibuf = &iev->ibuf;
382
	struct imsg		 imsg;
383
	ssize_t			 n;
384
	int			 verbose;
385
386
	if (event & EV_READ) {
387
		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
388
			fatal("%s: imsg_read", __func__);
389
		if (n == 0)
390
			_exit(0);
391
	}
392
393
	if (event & EV_WRITE) {
394
		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
395
			fatal("%s: msgbuf_write fd %d", __func__, ibuf->fd);
396
		if (n == 0)
397
			_exit(0);
398
	}
399
400
	for (;;) {
401
		if ((n = imsg_get(ibuf, &imsg)) == -1)
402
			fatal("%s: imsg_get", __func__);
403
		if (n == 0)
404
			break;
405
406
#if DEBUG > 1
407
		log_debug("%s: got imsg %d from %s",
408
		    __func__, imsg.hdr.type,
409
		    vm->vm_params.vmc_params.vcp_name);
410
#endif
411
412
		switch (imsg.hdr.type) {
413
		case IMSG_CTL_VERBOSE:
414
			IMSG_SIZE_CHECK(&imsg, &verbose);
415
			memcpy(&verbose, imsg.data, sizeof(verbose));
416
			log_setverbose(verbose);
417
			break;
418
		case IMSG_VMDOP_VM_SHUTDOWN:
419
			if (vmmci_ctl(VMMCI_SHUTDOWN) == -1)
420
				_exit(0);
421
			break;
422
		case IMSG_VMDOP_VM_REBOOT:
423
			if (vmmci_ctl(VMMCI_REBOOT) == -1)
424
				_exit(0);
425
			break;
426
		case IMSG_VMDOP_PAUSE_VM:
427
			vmr.vmr_result = 0;
428
			vmr.vmr_id = vm->vm_vmid;
429
			pause_vm(&vm->vm_params.vmc_params);
430
			imsg_compose_event(&vm->vm_iev,
431
			    IMSG_VMDOP_PAUSE_VM_RESPONSE,
432
			    imsg.hdr.peerid, imsg.hdr.pid, -1, &vmr,
433
			    sizeof(vmr));
434
			break;
435
		case IMSG_VMDOP_UNPAUSE_VM:
436
			vmr.vmr_result = 0;
437
			vmr.vmr_id = vm->vm_vmid;
438
			unpause_vm(&vm->vm_params.vmc_params);
439
			imsg_compose_event(&vm->vm_iev,
440
			    IMSG_VMDOP_UNPAUSE_VM_RESPONSE,
441
			    imsg.hdr.peerid, imsg.hdr.pid, -1, &vmr,
442
			    sizeof(vmr));
443
			break;
444
		case IMSG_VMDOP_SEND_VM_REQUEST:
445
			vmr.vmr_id = vm->vm_vmid;
446
			vmr.vmr_result = send_vm(imsg.fd,
447
			    &vm->vm_params.vmc_params);
448
			imsg_compose_event(&vm->vm_iev,
449
			    IMSG_VMDOP_SEND_VM_RESPONSE,
450
			    imsg.hdr.peerid, imsg.hdr.pid, -1, &vmr,
451
			    sizeof(vmr));
452
			break;
453
		default:
454
			fatalx("%s: got invalid imsg %d from %s",
455
			    __func__, imsg.hdr.type,
456
			    vm->vm_params.vmc_params.vcp_name);
457
		}
458
		imsg_free(&imsg);
459
	}
460
	imsg_event_add(iev);
461
}
462
463
/*
464
 * vm_ctl
465
 *
466
 * Tell the vmm parent process to shutdown or reboot the VM and exit.
467
 */
468
__dead void
469
vm_shutdown(unsigned int cmd)
470
{
471
	switch (cmd) {
472
	case VMMCI_NONE:
473
	case VMMCI_SHUTDOWN:
474
		(void)imsg_compose_event(&current_vm->vm_iev,
475
		    IMSG_VMDOP_VM_SHUTDOWN, 0, 0, -1, NULL, 0);
476
		break;
477
	case VMMCI_REBOOT:
478
		(void)imsg_compose_event(&current_vm->vm_iev,
479
		    IMSG_VMDOP_VM_REBOOT, 0, 0, -1, NULL, 0);
480
		break;
481
	default:
482
		fatalx("invalid vm ctl command: %d", cmd);
483
	}
484
	imsg_flush(&current_vm->vm_iev.ibuf);
485
486
	_exit(0);
487
}
488
489
int
490
send_vm(int fd, struct vm_create_params *vcp)
491
{
492
	struct vm_rwregs_params	   vrp;
493
	struct vmop_create_params *vmc;
494
	struct vm_terminate_params vtp;
495
	unsigned int		   flags = 0;
496
	unsigned int		   i;
497
	int			   ret = 0;
498
	size_t			   sz;
499
500
	if (dump_send_header(fd)) {
501
		log_info("%s: failed to send vm dump header", __func__);
502
		goto err;
503
	}
504
505
	pause_vm(vcp);
506
507
	vmc = calloc(1, sizeof(struct vmop_create_params));
508
	if (vmc == NULL) {
509
		log_warn("%s: calloc error geting vmc", __func__);
510
		ret = -1;
511
		goto err;
512
	}
513
514
	flags |= VMOP_CREATE_MEMORY;
515
	memcpy(&vmc->vmc_params, &current_vm->vm_params, sizeof(struct
516
	    vmop_create_params));
517
	vmc->vmc_flags = flags;
518
	vrp.vrwp_vm_id = vcp->vcp_id;
519
	vrp.vrwp_mask = VM_RWREGS_ALL;
520
521
	sz = atomicio(vwrite, fd, vmc,sizeof(struct vmop_create_params));
522
	if (sz != sizeof(struct vmop_create_params)) {
523
		ret = -1;
524
		goto err;
525
	}
526
527
	for (i = 0; i < vcp->vcp_ncpus; i++) {
528
		vrp.vrwp_vcpu_id = i;
529
		if ((ret = ioctl(env->vmd_fd, VMM_IOC_READREGS, &vrp))) {
530
			log_warn("%s: readregs failed", __func__);
531
			goto err;
532
		}
533
534
		sz = atomicio(vwrite, fd, &vrp,
535
		    sizeof(struct vm_rwregs_params));
536
		if (sz != sizeof(struct vm_rwregs_params)) {
537
			log_warn("%s: dumping registers failed", __func__);
538
			ret = -1;
539
			goto err;
540
		}
541
	}
542
543
	if ((ret = i8253_dump(fd)))
544
		goto err;
545
	if ((ret = i8259_dump(fd)))
546
		goto err;
547
	if ((ret = ns8250_dump(fd)))
548
		goto err;
549
	if ((ret = mc146818_dump(fd)))
550
		goto err;
551
	if ((ret = pci_dump(fd)))
552
		goto err;
553
	if ((ret = virtio_dump(fd)))
554
		goto err;
555
	if ((ret = dump_mem(fd, vcp)))
556
		goto err;
557
558
	vtp.vtp_vm_id = vcp->vcp_id;
559
	if (ioctl(env->vmd_fd, VMM_IOC_TERM, &vtp) < 0) {
560
		log_warnx("%s: term IOC error: %d, %d", __func__,
561
		    errno, ENOENT);
562
	}
563
err:
564
	close(fd);
565
	if (ret)
566
		unpause_vm(vcp);
567
	return ret;
568
}
569
570
int
571
dump_send_header(int fd) {
572
	struct vm_dump_header	   vmh;
573
	int			   i;
574
575
	vmh.vmh_cpuids[0].code = 0x00;
576
	vmh.vmh_cpuids[0].leaf = 0x00;
577
578
	vmh.vmh_cpuids[1].code = 0x01;
579
	vmh.vmh_cpuids[1].leaf = 0x00;
580
581
	vmh.vmh_cpuids[2].code = 0x07;
582
	vmh.vmh_cpuids[2].leaf = 0x00;
583
584
	vmh.vmh_cpuids[3].code = 0x0d;
585
	vmh.vmh_cpuids[3].leaf = 0x00;
586
587
	vmh.vmh_cpuids[4].code = 0x80000001;
588
	vmh.vmh_cpuids[4].leaf = 0x00;
589
590
	vmh.vmh_version = VM_DUMP_VERSION;
591
592
	for (i=0; i < VM_DUMP_HEADER_CPUID_COUNT; i++) {
593
		CPUID_LEAF(vmh.vmh_cpuids[i].code,
594
		    vmh.vmh_cpuids[i].leaf,
595
		    vmh.vmh_cpuids[i].a,
596
		    vmh.vmh_cpuids[i].b,
597
		    vmh.vmh_cpuids[i].c,
598
		    vmh.vmh_cpuids[i].d);
599
	}
600
601
	if (atomicio(vwrite, fd, &vmh, sizeof(vmh)) != sizeof(vmh))
602
		return (-1);
603
604
	return (0);
605
}
606
607
int
608
dump_mem(int fd, struct vm_create_params *vcp)
609
{
610
	unsigned int	i;
611
	int		ret;
612
	struct		vm_mem_range *vmr;
613
614
	for (i = 0; i < vcp->vcp_nmemranges; i++) {
615
		vmr = &vcp->vcp_memranges[i];
616
		ret = dump_vmr(fd, vmr);
617
		if (ret)
618
			return ret;
619
	}
620
	return (0);
621
}
622
623
void
624
restore_mem(int fd, struct vm_create_params *vcp)
625
{
626
	unsigned int	     i;
627
	struct vm_mem_range *vmr;
628
629
	for (i = 0; i < vcp->vcp_nmemranges; i++) {
630
		vmr = &vcp->vcp_memranges[i];
631
		restore_vmr(fd, vmr);
632
	}
633
}
634
635
int
636
dump_vmr(int fd, struct vm_mem_range *vmr)
637
{
638
	size_t	rem = vmr->vmr_size, read=0;
639
	char	buf[PAGE_SIZE];
640
641
	while (rem > 0) {
642
		if(read_mem(vmr->vmr_gpa + read, buf, PAGE_SIZE)) {
643
			log_warn("failed to read vmr");
644
			return (-1);
645
		}
646
		if (atomicio(vwrite, fd, buf, sizeof(buf)) != sizeof(buf)) {
647
			log_warn("failed to dump vmr");
648
			return (-1);
649
		}
650
		rem = rem - PAGE_SIZE;
651
		read = read + PAGE_SIZE;
652
	}
653
	return (0);
654
}
655
656
void
657
restore_vmr(int fd, struct vm_mem_range *vmr)
658
{
659
	size_t	rem = vmr->vmr_size, wrote=0;
660
	char	buf[PAGE_SIZE];
661
662
	while (rem > 0) {
663
		if (atomicio(read, fd, buf, sizeof(buf)) != sizeof(buf))
664
			fatal("failed to restore vmr");
665
		if (write_mem(vmr->vmr_gpa + wrote, buf, PAGE_SIZE))
666
			fatal("failed to write vmr");
667
		rem = rem - PAGE_SIZE;
668
		wrote = wrote + PAGE_SIZE;
669
	}
670
}
671
672
void
673
pause_vm(struct vm_create_params *vcp)
674
{
675
	if (current_vm->vm_paused)
676
		return;
677
678
	current_vm->vm_paused = 1;
679
680
	/* XXX: vcpu_run_loop is running in another thread and we have to wait
681
	 * for the vm to exit before returning */
682
	sleep(1);
683
684
	i8253_stop();
685
	mc146818_stop();
686
}
687
688
void
689
unpause_vm(struct vm_create_params *vcp)
690
{
691
	unsigned int n;
692
	if (!current_vm->vm_paused)
693
		return;
694
695
	current_vm->vm_paused = 0;
696
697
	i8253_start();
698
	mc146818_start();
699
	for (n = 0; n <= vcp->vcp_ncpus; n++)
700
		pthread_cond_broadcast(&vcpu_run_cond[n]);
701
}
702
703
/*
704
 * vcpu_reset
705
 *
706
 * Requests vmm(4) to reset the VCPUs in the indicated VM to
707
 * the register state provided
708
 *
709
 * Parameters
710
 *  vmid: VM ID to reset
711
 *  vcpu_id: VCPU ID to reset
712
 *  vrs: the register state to initialize
713
 *
714
 * Return values:
715
 *  0: success
716
 *  !0 : ioctl to vmm(4) failed (eg, ENOENT if the supplied VM ID is not
717
 *      valid)
718
 */
719
int
720
vcpu_reset(uint32_t vmid, uint32_t vcpu_id, struct vcpu_reg_state *vrs)
721
{
722
	struct vm_resetcpu_params vrp;
723
724
	memset(&vrp, 0, sizeof(vrp));
725
	vrp.vrp_vm_id = vmid;
726
	vrp.vrp_vcpu_id = vcpu_id;
727
	memcpy(&vrp.vrp_init_state, vrs, sizeof(struct vcpu_reg_state));
728
729
	log_debug("%s: resetting vcpu %d for vm %d", __func__, vcpu_id, vmid);
730
731
	if (ioctl(env->vmd_fd, VMM_IOC_RESETCPU, &vrp) < 0)
732
		return (errno);
733
734
	return (0);
735
}
736
737
/*
738
 * create_memory_map
739
 *
740
 * Sets up the guest physical memory ranges that the VM can access.
741
 *
742
 * Parameters:
743
 *  vcp: VM create parameters describing the VM whose memory map
744
 *       is being created
745
 *
746
 * Return values:
747
 *  nothing
748
 */
749
void
750
create_memory_map(struct vm_create_params *vcp)
751
{
752
	size_t len, mem_bytes, mem_mb;
753
754
	mem_mb = vcp->vcp_memranges[0].vmr_size;
755
	vcp->vcp_nmemranges = 0;
756
	if (mem_mb < 1 || mem_mb > VMM_MAX_VM_MEM_SIZE)
757
		return;
758
759
	mem_bytes = mem_mb * 1024 * 1024;
760
761
	/* First memory region: 0 - LOWMEM_KB (DOS low mem) */
762
	len = LOWMEM_KB * 1024;
763
	vcp->vcp_memranges[0].vmr_gpa = 0x0;
764
	vcp->vcp_memranges[0].vmr_size = len;
765
	mem_bytes -= len;
766
767
	/*
768
	 * Second memory region: LOWMEM_KB - 1MB.
769
	 *
770
	 * N.B. - Normally ROMs or parts of video RAM are mapped here.
771
	 * We have to add this region, because some systems
772
	 * unconditionally write to 0xb8000 (VGA RAM), and
773
	 * we need to make sure that vmm(4) permits accesses
774
	 * to it. So allocate guest memory for it.
775
	 */
776
	len = 0x100000 - LOWMEM_KB * 1024;
777
	vcp->vcp_memranges[1].vmr_gpa = LOWMEM_KB * 1024;
778
	vcp->vcp_memranges[1].vmr_size = len;
779
	mem_bytes -= len;
780
781
	/* Make sure that we do not place physical memory into MMIO ranges. */
782
	if (mem_bytes > VMM_PCI_MMIO_BAR_BASE - 0x100000)
783
		len = VMM_PCI_MMIO_BAR_BASE - 0x100000;
784
	else
785
		len = mem_bytes;
786
787
	/* Third memory region: 1MB - (1MB + len) */
788
	vcp->vcp_memranges[2].vmr_gpa = 0x100000;
789
	vcp->vcp_memranges[2].vmr_size = len;
790
	mem_bytes -= len;
791
792
	if (mem_bytes > 0) {
793
		/* Fourth memory region for the remaining memory (if any) */
794
		vcp->vcp_memranges[3].vmr_gpa = VMM_PCI_MMIO_BAR_END + 1;
795
		vcp->vcp_memranges[3].vmr_size = mem_bytes;
796
		vcp->vcp_nmemranges = 4;
797
	} else
798
		vcp->vcp_nmemranges = 3;
799
}
800
801
/*
802
 * alloc_guest_mem
803
 *
804
 * Allocates memory for the guest.
805
 * Instead of doing a single allocation with one mmap(), we allocate memory
806
 * separately for every range for the following reasons:
807
 * - ASLR for the individual ranges
808
 * - to reduce memory consumption in the UVM subsystem: if vmm(4) had to
809
 *   map the single mmap'd userspace memory to the individual guest physical
810
 *   memory ranges, the underlying amap of the single mmap'd range would have
811
 *   to allocate per-page reference counters. The reason is that the
812
 *   individual guest physical ranges would reference the single mmap'd region
813
 *   only partially. However, if every guest physical range has its own
814
 *   corresponding mmap'd userspace allocation, there are no partial
815
 *   references: every guest physical range fully references an mmap'd
816
 *   range => no per-page reference counters have to be allocated.
817
 *
818
 * Return values:
819
 *  0: success
820
 *  !0: failure - errno indicating the source of the failure
821
 */
822
int
823
alloc_guest_mem(struct vm_create_params *vcp)
824
{
825
	void *p;
826
	int ret;
827
	size_t i, j;
828
	struct vm_mem_range *vmr;
829
830
	for (i = 0; i < vcp->vcp_nmemranges; i++) {
831
		vmr = &vcp->vcp_memranges[i];
832
		p = mmap(NULL, vmr->vmr_size, PROT_READ | PROT_WRITE,
833
		    MAP_PRIVATE | MAP_ANON, -1, 0);
834
		if (p == MAP_FAILED) {
835
			ret = errno;
836
			for (j = 0; j < i; j++) {
837
				vmr = &vcp->vcp_memranges[j];
838
				munmap((void *)vmr->vmr_va, vmr->vmr_size);
839
			}
840
841
			return (ret);
842
		}
843
844
		vmr->vmr_va = (vaddr_t)p;
845
	}
846
847
	return (0);
848
}
849
850
/*
851
 * vmm_create_vm
852
 *
853
 * Requests vmm(4) to create a new VM using the supplied creation
854
 * parameters. This operation results in the creation of the in-kernel
855
 * structures for the VM, but does not start the VM's vcpu(s).
856
 *
857
 * Parameters:
858
 *  vcp: vm_create_params struct containing the VM's desired creation
859
 *      configuration
860
 *
861
 * Return values:
862
 *  0: success
863
 *  !0 : ioctl to vmm(4) failed
864
 */
865
int
866
vmm_create_vm(struct vm_create_params *vcp)
867
{
868
	/* Sanity check arguments */
869
	if (vcp->vcp_ncpus > VMM_MAX_VCPUS_PER_VM)
870
		return (EINVAL);
871
872
	if (vcp->vcp_nmemranges == 0 ||
873
	    vcp->vcp_nmemranges > VMM_MAX_MEM_RANGES)
874
		return (EINVAL);
875
876
	if (vcp->vcp_ndisks > VMM_MAX_DISKS_PER_VM)
877
		return (EINVAL);
878
879
	if (vcp->vcp_nnics > VMM_MAX_NICS_PER_VM)
880
		return (EINVAL);
881
882
	if (ioctl(env->vmd_fd, VMM_IOC_CREATE, vcp) < 0)
883
		return (errno);
884
885
	return (0);
886
}
887
888
/*
889
 * init_emulated_hw
890
 *
891
 * Initializes the userspace hardware emulation
892
 */
893
void
894
init_emulated_hw(struct vmop_create_params *vmc, int *child_disks,
895
    int *child_taps)
896
{
897
	struct vm_create_params *vcp = &vmc->vmc_params;
898
	int i;
899
	uint64_t memlo, memhi;
900
901
	/* Calculate memory size for NVRAM registers */
902
	memlo = memhi = 0;
903
	if (vcp->vcp_nmemranges > 2)
904
		memlo = vcp->vcp_memranges[2].vmr_size - 15 * 0x100000;
905
906
	if (vcp->vcp_nmemranges > 3)
907
		memhi = vcp->vcp_memranges[3].vmr_size;
908
909
	/* Reset the IO port map */
910
	memset(&ioports_map, 0, sizeof(io_fn_t) * MAX_PORTS);
911
912
	/* Init i8253 PIT */
913
	i8253_init(vcp->vcp_id);
914
	ioports_map[TIMER_CTRL] = vcpu_exit_i8253;
915
	ioports_map[TIMER_BASE + TIMER_CNTR0] = vcpu_exit_i8253;
916
	ioports_map[TIMER_BASE + TIMER_CNTR1] = vcpu_exit_i8253;
917
	ioports_map[TIMER_BASE + TIMER_CNTR2] = vcpu_exit_i8253;
918
919
	/* Init mc146818 RTC */
920
	mc146818_init(vcp->vcp_id, memlo, memhi);
921
	ioports_map[IO_RTC] = vcpu_exit_mc146818;
922
	ioports_map[IO_RTC + 1] = vcpu_exit_mc146818;
923
924
	/* Init master and slave PICs */
925
	i8259_init();
926
	ioports_map[IO_ICU1] = vcpu_exit_i8259;
927
	ioports_map[IO_ICU1 + 1] = vcpu_exit_i8259;
928
	ioports_map[IO_ICU2] = vcpu_exit_i8259;
929
	ioports_map[IO_ICU2 + 1] = vcpu_exit_i8259;
930
931
	/* Init ns8250 UART */
932
	ns8250_init(con_fd, vcp->vcp_id);
933
	for (i = COM1_DATA; i <= COM1_SCR; i++)
934
		ioports_map[i] = vcpu_exit_com;
935
936
	/* Initialize PCI */
937
	for (i = VMM_PCI_IO_BAR_BASE; i <= VMM_PCI_IO_BAR_END; i++)
938
		ioports_map[i] = vcpu_exit_pci;
939
940
	ioports_map[PCI_MODE1_ADDRESS_REG] = vcpu_exit_pci;
941
	ioports_map[PCI_MODE1_DATA_REG] = vcpu_exit_pci;
942
	ioports_map[PCI_MODE1_DATA_REG + 1] = vcpu_exit_pci;
943
	ioports_map[PCI_MODE1_DATA_REG + 2] = vcpu_exit_pci;
944
	ioports_map[PCI_MODE1_DATA_REG + 3] = vcpu_exit_pci;
945
	pci_init();
946
947
	/* Initialize virtio devices */
948
	virtio_init(current_vm, child_disks, child_taps);
949
}
950
/*
951
 * restore_emulated_hw
952
 *
953
 * Restores the userspace hardware emulation from fd
954
 */
955
void
956
restore_emulated_hw(struct vm_create_params *vcp, int fd,
957
    int *child_taps, int *child_disks)
958
{
959
	/* struct vm_create_params *vcp = &vmc->vmc_params; */
960
	int i;
961
	memset(&ioports_map, 0, sizeof(io_fn_t) * MAX_PORTS);
962
963
	/* Init i8253 PIT */
964
	i8253_restore(fd, vcp->vcp_id);
965
	ioports_map[TIMER_CTRL] = vcpu_exit_i8253;
966
	ioports_map[TIMER_BASE + TIMER_CNTR0] = vcpu_exit_i8253;
967
	ioports_map[TIMER_BASE + TIMER_CNTR1] = vcpu_exit_i8253;
968
	ioports_map[TIMER_BASE + TIMER_CNTR2] = vcpu_exit_i8253;
969
970
	/* Init master and slave PICs */
971
	i8259_restore(fd);
972
	ioports_map[IO_ICU1] = vcpu_exit_i8259;
973
	ioports_map[IO_ICU1 + 1] = vcpu_exit_i8259;
974
	ioports_map[IO_ICU2] = vcpu_exit_i8259;
975
	ioports_map[IO_ICU2 + 1] = vcpu_exit_i8259;
976
977
	/* Init ns8250 UART */
978
	ns8250_restore(fd, con_fd, vcp->vcp_id);
979
	for (i = COM1_DATA; i <= COM1_SCR; i++)
980
		ioports_map[i] = vcpu_exit_com;
981
982
	/* Init mc146818 RTC */
983
	mc146818_restore(fd, vcp->vcp_id);
984
	ioports_map[IO_RTC] = vcpu_exit_mc146818;
985
	ioports_map[IO_RTC + 1] = vcpu_exit_mc146818;
986
987
	/* Initialize PCI */
988
	for (i = VMM_PCI_IO_BAR_BASE; i <= VMM_PCI_IO_BAR_END; i++)
989
		ioports_map[i] = vcpu_exit_pci;
990
991
	ioports_map[PCI_MODE1_ADDRESS_REG] = vcpu_exit_pci;
992
	ioports_map[PCI_MODE1_DATA_REG] = vcpu_exit_pci;
993
	ioports_map[PCI_MODE1_DATA_REG + 1] = vcpu_exit_pci;
994
	ioports_map[PCI_MODE1_DATA_REG + 2] = vcpu_exit_pci;
995
	ioports_map[PCI_MODE1_DATA_REG + 3] = vcpu_exit_pci;
996
	pci_restore(fd);
997
	virtio_restore(fd, current_vm, child_disks, child_taps);
998
}
999
1000
/*
1001
 * run_vm
1002
 *
1003
 * Runs the VM whose creation parameters are specified in vcp
1004
 *
1005
 * Parameters:
1006
 *  child_disks: previously-opened child VM disk file file descriptors
1007
 *  child_taps: previously-opened child tap file descriptors
1008
 *  vmc: vmop_create_params struct containing the VM's desired creation
1009
 *      configuration
1010
 *  vrs: VCPU register state to initialize
1011
 *
1012
 * Return values:
1013
 *  0: the VM exited normally
1014
 *  !0 : the VM exited abnormally or failed to start
1015
 */
1016
int
1017
run_vm(int *child_disks, int *child_taps, struct vmop_create_params *vmc,
1018
    struct vcpu_reg_state *vrs)
1019
{
1020
	struct vm_create_params *vcp = &vmc->vmc_params;
1021
	struct vm_rwregs_params vregsp;
1022
	uint8_t evdone = 0;
1023
	size_t i;
1024
	int ret;
1025
	pthread_t *tid, evtid;
1026
	struct vm_run_params **vrp;
1027
	void *exit_status;
1028
1029
	if (vcp == NULL)
1030
		return (EINVAL);
1031
1032
	if (child_disks == NULL && vcp->vcp_ndisks != 0)
1033
		return (EINVAL);
1034
1035
	if (child_taps == NULL && vcp->vcp_nnics != 0)
1036
		return (EINVAL);
1037
1038
	if (vcp->vcp_ncpus > VMM_MAX_VCPUS_PER_VM)
1039
		return (EINVAL);
1040
1041
	if (vcp->vcp_ndisks > VMM_MAX_DISKS_PER_VM)
1042
		return (EINVAL);
1043
1044
	if (vcp->vcp_nnics > VMM_MAX_NICS_PER_VM)
1045
		return (EINVAL);
1046
1047
	if (vcp->vcp_nmemranges == 0 ||
1048
	    vcp->vcp_nmemranges > VMM_MAX_MEM_RANGES)
1049
		return (EINVAL);
1050
1051
	tid = calloc(vcp->vcp_ncpus, sizeof(pthread_t));
1052
	vrp = calloc(vcp->vcp_ncpus, sizeof(struct vm_run_params *));
1053
	if (tid == NULL || vrp == NULL) {
1054
		log_warn("%s: memory allocation error - exiting.",
1055
		    __progname);
1056
		return (ENOMEM);
1057
	}
1058
1059
	log_debug("%s: initializing hardware for vm %s", __func__,
1060
	    vcp->vcp_name);
1061
1062
	if (!current_vm->vm_received)
1063
		init_emulated_hw(vmc, child_disks, child_taps);
1064
1065
	ret = pthread_mutex_init(&threadmutex, NULL);
1066
	if (ret) {
1067
		log_warn("%s: could not initialize thread state mutex",
1068
		    __func__);
1069
		return (ret);
1070
	}
1071
	ret = pthread_cond_init(&threadcond, NULL);
1072
	if (ret) {
1073
		log_warn("%s: could not initialize thread state "
1074
		    "condition variable", __func__);
1075
		return (ret);
1076
	}
1077
1078
	mutex_lock(&threadmutex);
1079
1080
	log_debug("%s: starting vcpu threads for vm %s", __func__,
1081
	    vcp->vcp_name);
1082
1083
	/*
1084
	 * Create and launch one thread for each VCPU. These threads may
1085
	 * migrate between PCPUs over time; the need to reload CPU state
1086
	 * in such situations is detected and performed by vmm(4) in the
1087
	 * kernel.
1088
	 */
1089
	for (i = 0 ; i < vcp->vcp_ncpus; i++) {
1090
		vrp[i] = malloc(sizeof(struct vm_run_params));
1091
		if (vrp[i] == NULL) {
1092
			log_warn("%s: memory allocation error - "
1093
			    "exiting.", __progname);
1094
			/* caller will exit, so skip free'ing */
1095
			return (ENOMEM);
1096
		}
1097
		vrp[i]->vrp_exit = malloc(sizeof(union vm_exit));
1098
		if (vrp[i]->vrp_exit == NULL) {
1099
			log_warn("%s: memory allocation error - "
1100
			    "exiting.", __progname);
1101
			/* caller will exit, so skip free'ing */
1102
			return (ENOMEM);
1103
		}
1104
		vrp[i]->vrp_vm_id = vcp->vcp_id;
1105
		vrp[i]->vrp_vcpu_id = i;
1106
1107
		if (vcpu_reset(vcp->vcp_id, i, vrs)) {
1108
			log_warnx("%s: cannot reset VCPU %zu - exiting.",
1109
			    __progname, i);
1110
			return (EIO);
1111
		}
1112
1113
		/* once more becuase reset_cpu changes regs */
1114
		if (current_vm->vm_received) {
1115
			vregsp.vrwp_vm_id = vcp->vcp_id;
1116
			vregsp.vrwp_vcpu_id = i;
1117
			vregsp.vrwp_regs = *vrs;
1118
			vregsp.vrwp_mask = VM_RWREGS_ALL;
1119
			if ((ret = ioctl(env->vmd_fd, VMM_IOC_WRITEREGS,
1120
			    &vregsp)) < 0) {
1121
				log_warn("%s: writeregs failed", __func__);
1122
				return (ret);
1123
			}
1124
		}
1125
1126
		ret = pthread_cond_init(&vcpu_run_cond[i], NULL);
1127
		if (ret) {
1128
			log_warnx("%s: cannot initialize cond var (%d)",
1129
			    __progname, ret);
1130
			return (ret);
1131
		}
1132
1133
		ret = pthread_mutex_init(&vcpu_run_mtx[i], NULL);
1134
		if (ret) {
1135
			log_warnx("%s: cannot initialize mtx (%d)",
1136
			    __progname, ret);
1137
			return (ret);
1138
		}
1139
1140
		vcpu_hlt[i] = 0;
1141
1142
		/* Start each VCPU run thread at vcpu_run_loop */
1143
		ret = pthread_create(&tid[i], NULL, vcpu_run_loop, vrp[i]);
1144
		if (ret) {
1145
			/* caller will _exit after this return */
1146
			ret = errno;
1147
			log_warn("%s: could not create vcpu thread %zu",
1148
			    __func__, i);
1149
			return (ret);
1150
		}
1151
	}
1152
1153
	log_debug("%s: waiting on events for VM %s", __func__, vcp->vcp_name);
1154
	ret = pthread_create(&evtid, NULL, event_thread, &evdone);
1155
	if (ret) {
1156
		errno = ret;
1157
		log_warn("%s: could not create event thread", __func__);
1158
		return (ret);
1159
	}
1160
1161
	for (;;) {
1162
		ret = pthread_cond_wait(&threadcond, &threadmutex);
1163
		if (ret) {
1164
			log_warn("%s: waiting on thread state condition "
1165
			    "variable failed", __func__);
1166
			return (ret);
1167
		}
1168
1169
		/*
1170
		 * Did a VCPU thread exit with an error? => return the first one
1171
		 */
1172
		for (i = 0; i < vcp->vcp_ncpus; i++) {
1173
			if (vcpu_done[i] == 0)
1174
				continue;
1175
1176
			if (pthread_join(tid[i], &exit_status)) {
1177
				log_warn("%s: failed to join thread %zd - "
1178
				    "exiting", __progname, i);
1179
				return (EIO);
1180
			}
1181
1182
			ret = (intptr_t)exit_status;
1183
		}
1184
1185
		/* Did the event thread exit? => return with an error */
1186
		if (evdone) {
1187
			if (pthread_join(evtid, &exit_status)) {
1188
				log_warn("%s: failed to join event thread - "
1189
				    "exiting", __progname);
1190
				return (EIO);
1191
			}
1192
1193
			log_warnx("%s: vm %d event thread exited "
1194
			    "unexpectedly", __progname, vcp->vcp_id);
1195
			return (EIO);
1196
		}
1197
1198
		/* Did all VCPU threads exit successfully? => return */
1199
		for (i = 0; i < vcp->vcp_ncpus; i++) {
1200
			if (vcpu_done[i] == 0)
1201
				break;
1202
		}
1203
		if (i == vcp->vcp_ncpus)
1204
			return (ret);
1205
1206
		/* Some more threads to wait for, start over */
1207
	}
1208
1209
	return (ret);
1210
}
1211
1212
void *
1213
event_thread(void *arg)
1214
{
1215
	uint8_t *donep = arg;
1216
	intptr_t ret;
1217
1218
	ret = event_dispatch();
1219
1220
	mutex_lock(&threadmutex);
1221
	*donep = 1;
1222
	pthread_cond_signal(&threadcond);
1223
	mutex_unlock(&threadmutex);
1224
1225
	return (void *)ret;
1226
 }
1227
1228
/*
1229
 * vcpu_run_loop
1230
 *
1231
 * Runs a single VCPU until vmm(4) requires help handling an exit,
1232
 * or the VM terminates.
1233
 *
1234
 * Parameters:
1235
 *  arg: vcpu_run_params for the VCPU being run by this thread
1236
 *
1237
 * Return values:
1238
 *  NULL: the VCPU shutdown properly
1239
 *  !NULL: error processing VCPU run, or the VCPU shutdown abnormally
1240
 */
1241
void *
1242
vcpu_run_loop(void *arg)
1243
{
1244
	struct vm_run_params *vrp = (struct vm_run_params *)arg;
1245
	intptr_t ret = 0;
1246
	int irq;
1247
	uint32_t n;
1248
1249
	vrp->vrp_continue = 0;
1250
	n = vrp->vrp_vcpu_id;
1251
1252
	for (;;) {
1253
		ret = pthread_mutex_lock(&vcpu_run_mtx[n]);
1254
1255
		if (ret) {
1256
			log_warnx("%s: can't lock vcpu run mtx (%d)",
1257
			    __func__, (int)ret);
1258
			return ((void *)ret);
1259
		}
1260
1261
		/* If we are halted or paused, wait */
1262
		if (vcpu_hlt[n]) {
1263
			while (current_vm->vm_paused == 1) {
1264
				ret = pthread_cond_wait(&vcpu_run_cond[n],
1265
				    &vcpu_run_mtx[n]);
1266
				if (ret) {
1267
					log_warnx(
1268
					    "%s: can't wait on cond (%d)",
1269
					    __func__, (int)ret);
1270
					(void)pthread_mutex_unlock(
1271
					    &vcpu_run_mtx[n]);
1272
					break;
1273
				}
1274
			}
1275
			if (vcpu_hlt[n]) {
1276
				ret = pthread_cond_wait(&vcpu_run_cond[n],
1277
				    &vcpu_run_mtx[n]);
1278
1279
				if (ret) {
1280
					log_warnx(
1281
					    "%s: can't wait on cond (%d)",
1282
					    __func__, (int)ret);
1283
					(void)pthread_mutex_unlock(
1284
					    &vcpu_run_mtx[n]);
1285
					break;
1286
				}
1287
			}
1288
		}
1289
1290
		ret = pthread_mutex_unlock(&vcpu_run_mtx[n]);
1291
1292
		if (ret) {
1293
			log_warnx("%s: can't unlock mutex on cond (%d)",
1294
			    __func__, (int)ret);
1295
			break;
1296
		}
1297
1298
		if (vrp->vrp_irqready && i8259_is_pending()) {
1299
			irq = i8259_ack();
1300
			vrp->vrp_irq = irq;
1301
		} else
1302
			vrp->vrp_irq = 0xFFFF;
1303
1304
		/* Still more pending? */
1305
		if (i8259_is_pending()) {
1306
			/* XXX can probably avoid ioctls here by providing intr in vrp */
1307
			if (vcpu_pic_intr(vrp->vrp_vm_id, vrp->vrp_vcpu_id, 1)) {
1308
				fatal("can't set INTR");
1309
			}
1310
		} else {
1311
			if (vcpu_pic_intr(vrp->vrp_vm_id, vrp->vrp_vcpu_id, 0)) {
1312
				fatal("can't clear INTR");
1313
			}
1314
		}
1315
1316
		if (ioctl(env->vmd_fd, VMM_IOC_RUN, vrp) < 0) {
1317
			/* If run ioctl failed, exit */
1318
			ret = errno;
1319
			log_warn("%s: vm %d / vcpu %d run ioctl failed",
1320
			    __func__, vrp->vrp_vm_id, n);
1321
			break;
1322
		}
1323
1324
		/* If the VM is terminating, exit normally */
1325
		if (vrp->vrp_exit_reason == VM_EXIT_TERMINATED) {
1326
			ret = (intptr_t)NULL;
1327
			break;
1328
		}
1329
1330
		if (vrp->vrp_exit_reason != VM_EXIT_NONE) {
1331
			/*
1332
			 * vmm(4) needs help handling an exit, handle in
1333
			 * vcpu_exit.
1334
			 */
1335
			ret = vcpu_exit(vrp);
1336
			if (ret)
1337
				break;
1338
		}
1339
	}
1340
1341
	mutex_lock(&threadmutex);
1342
	vcpu_done[n] = 1;
1343
	pthread_cond_signal(&threadcond);
1344
	mutex_unlock(&threadmutex);
1345
1346
	return ((void *)ret);
1347
}
1348
1349
int
1350
vcpu_pic_intr(uint32_t vm_id, uint32_t vcpu_id, uint8_t intr)
1351
{
1352
	struct vm_intr_params vip;
1353
1354
	memset(&vip, 0, sizeof(vip));
1355
1356
	vip.vip_vm_id = vm_id;
1357
	vip.vip_vcpu_id = vcpu_id; /* XXX always 0? */
1358
	vip.vip_intr = intr;
1359
1360
	if (ioctl(env->vmd_fd, VMM_IOC_INTR, &vip) < 0)
1361
		return (errno);
1362
1363
	return (0);
1364
}
1365
1366
/*
1367
 * vcpu_exit_pci
1368
 *
1369
 * Handle all I/O to the emulated PCI subsystem.
1370
 *
1371
 * Parameters:
1372
 *  vrp: vcpu run paramters containing guest state for this exit
1373
 *
1374
 * Return value:
1375
 *  Interrupt to inject to the guest VM, or 0xFF if no interrupt should
1376
 *      be injected.
1377
 */
1378
uint8_t
1379
vcpu_exit_pci(struct vm_run_params *vrp)
1380
{
1381
	union vm_exit *vei = vrp->vrp_exit;
1382
	uint8_t intr;
1383
1384
	intr = 0xFF;
1385
1386
	switch (vei->vei.vei_port) {
1387
	case PCI_MODE1_ADDRESS_REG:
1388
		pci_handle_address_reg(vrp);
1389
		break;
1390
	case PCI_MODE1_DATA_REG:
1391
	case PCI_MODE1_DATA_REG + 1:
1392
	case PCI_MODE1_DATA_REG + 2:
1393
	case PCI_MODE1_DATA_REG + 3:
1394
		pci_handle_data_reg(vrp);
1395
		break;
1396
	case VMM_PCI_IO_BAR_BASE ... VMM_PCI_IO_BAR_END:
1397
		intr = pci_handle_io(vrp);
1398
		break;
1399
	default:
1400
		log_warnx("%s: unknown PCI register 0x%llx",
1401
		    __progname, (uint64_t)vei->vei.vei_port);
1402
		break;
1403
	}
1404
1405
	return (intr);
1406
}
1407
1408
/*
1409
 * vcpu_exit_inout
1410
 *
1411
 * Handle all I/O exits that need to be emulated in vmd. This includes the
1412
 * i8253 PIT, the com1 ns8250 UART, and the MC146818 RTC/NVRAM device.
1413
 *
1414
 * Parameters:
1415
 *  vrp: vcpu run parameters containing guest state for this exit
1416
 */
1417
void
1418
vcpu_exit_inout(struct vm_run_params *vrp)
1419
{
1420
	union vm_exit *vei = vrp->vrp_exit;
1421
	uint8_t intr = 0xFF;
1422
1423
	if (ioports_map[vei->vei.vei_port] != NULL)
1424
		intr = ioports_map[vei->vei.vei_port](vrp);
1425
	else if (vei->vei.vei_dir == VEI_DIR_IN)
1426
			set_return_data(vei, 0xFFFFFFFF);
1427
1428
	if (intr != 0xFF)
1429
		vcpu_assert_pic_irq(vrp->vrp_vm_id, vrp->vrp_vcpu_id, intr);
1430
}
1431
1432
/*
1433
 * vcpu_exit
1434
 *
1435
 * Handle a vcpu exit. This function is called when it is determined that
1436
 * vmm(4) requires the assistance of vmd to support a particular guest
1437
 * exit type (eg, accessing an I/O port or device). Guest state is contained
1438
 * in 'vrp', and will be resent to vmm(4) on exit completion.
1439
 *
1440
 * Upon conclusion of handling the exit, the function determines if any
1441
 * interrupts should be injected into the guest, and asserts the proper
1442
 * IRQ line whose interrupt should be vectored.
1443
 *
1444
 * Parameters:
1445
 *  vrp: vcpu run parameters containing guest state for this exit
1446
 *
1447
 * Return values:
1448
 *  0: the exit was handled successfully
1449
 *  1: an error occurred (eg, unknown exit reason passed in 'vrp')
1450
 */
1451
int
1452
vcpu_exit(struct vm_run_params *vrp)
1453
{
1454
	int ret;
1455
1456
	switch (vrp->vrp_exit_reason) {
1457
	case VMX_EXIT_INT_WINDOW:
1458
	case SVM_VMEXIT_VINTR:
1459
	case VMX_EXIT_CPUID:
1460
	case VMX_EXIT_EXTINT:
1461
	case SVM_VMEXIT_INTR:
1462
	case VMX_EXIT_EPT_VIOLATION:
1463
	case SVM_VMEXIT_NPF:
1464
	case SVM_VMEXIT_MSR:
1465
	case SVM_VMEXIT_CPUID:
1466
		/*
1467
		 * We may be exiting to vmd to handle a pending interrupt but
1468
		 * at the same time the last exit type may have been one of
1469
		 * these. In this case, there's nothing extra to be done
1470
		 * here (and falling through to the default case below results
1471
		 * in more vmd log spam).
1472
		 */
1473
		break;
1474
	case VMX_EXIT_IO:
1475
	case SVM_VMEXIT_IOIO:
1476
		vcpu_exit_inout(vrp);
1477
		break;
1478
	case VMX_EXIT_HLT:
1479
	case SVM_VMEXIT_HLT:
1480
		ret = pthread_mutex_lock(&vcpu_run_mtx[vrp->vrp_vcpu_id]);
1481
		if (ret) {
1482
			log_warnx("%s: can't lock vcpu mutex (%d)",
1483
			    __func__, ret);
1484
			return (ret);
1485
		}
1486
		vcpu_hlt[vrp->vrp_vcpu_id] = 1;
1487
		ret = pthread_mutex_unlock(&vcpu_run_mtx[vrp->vrp_vcpu_id]);
1488
		if (ret) {
1489
			log_warnx("%s: can't unlock vcpu mutex (%d)",
1490
			    __func__, ret);
1491
			return (ret);
1492
		}
1493
		break;
1494
	case VMX_EXIT_TRIPLE_FAULT:
1495
	case SVM_VMEXIT_SHUTDOWN:
1496
		/* reset VM */
1497
		return (EAGAIN);
1498
	default:
1499
		log_debug("%s: unknown exit reason 0x%x",
1500
		    __progname, vrp->vrp_exit_reason);
1501
	}
1502
1503
	/* Process any pending traffic */
1504
	vionet_process_rx(vrp->vrp_vm_id);
1505
1506
	vrp->vrp_continue = 1;
1507
1508
	return (0);
1509
}
1510
1511
/*
1512
 * find_gpa_range
1513
 *
1514
 * Search for a contiguous guest physical mem range.
1515
 *
1516
 * Parameters:
1517
 *  vcp: VM create parameters that contain the memory map to search in
1518
 *  gpa: the starting guest physical address
1519
 *  len: the length of the memory range
1520
 *
1521
 * Return values:
1522
 *  NULL: on failure if there is no memory range as described by the parameters
1523
 *  Pointer to vm_mem_range that contains the start of the range otherwise.
1524
 */
1525
static struct vm_mem_range *
1526
find_gpa_range(struct vm_create_params *vcp, paddr_t gpa, size_t len)
1527
{
1528
	size_t i, n;
1529
	struct vm_mem_range *vmr;
1530
1531
	/* Find the first vm_mem_range that contains gpa */
1532
	for (i = 0; i < vcp->vcp_nmemranges; i++) {
1533
		vmr = &vcp->vcp_memranges[i];
1534
		if (vmr->vmr_gpa + vmr->vmr_size >= gpa)
1535
			break;
1536
	}
1537
1538
	/* No range found. */
1539
	if (i == vcp->vcp_nmemranges)
1540
		return (NULL);
1541
1542
	/*
1543
	 * vmr may cover the range [gpa, gpa + len) only partly. Make
1544
	 * sure that the following vm_mem_ranges are contiguous and
1545
	 * cover the rest.
1546
	 */
1547
	n = vmr->vmr_size - (gpa - vmr->vmr_gpa);
1548
	if (len < n)
1549
		len = 0;
1550
	else
1551
		len -= n;
1552
	gpa = vmr->vmr_gpa + vmr->vmr_size;
1553
	for (i = i + 1; len != 0 && i < vcp->vcp_nmemranges; i++) {
1554
		vmr = &vcp->vcp_memranges[i];
1555
		if (gpa != vmr->vmr_gpa)
1556
			return (NULL);
1557
		if (len <= vmr->vmr_size)
1558
			len = 0;
1559
		else
1560
			len -= vmr->vmr_size;
1561
1562
		gpa = vmr->vmr_gpa + vmr->vmr_size;
1563
	}
1564
1565
	if (len != 0)
1566
		return (NULL);
1567
1568
	return (vmr);
1569
}
1570
1571
void *
1572
vaddr_mem(paddr_t gpa, size_t len)
1573
{
1574
	struct vm_create_params *vcp = &current_vm->vm_params.vmc_params;
1575
	size_t i;
1576
	struct vm_mem_range *vmr;
1577
	paddr_t gpend = gpa + len;
1578
1579
	/* Find the first vm_mem_range that contains gpa */
1580
	for (i = 0; i < vcp->vcp_nmemranges; i++) {
1581
		vmr = &vcp->vcp_memranges[i];
1582
		if (gpa < vmr->vmr_gpa)
1583
			continue;
1584
1585
		if (gpend >= vmr->vmr_gpa + vmr->vmr_size)
1586
			continue;
1587
1588
		return ((char *)vmr->vmr_va + (gpa - vmr->vmr_gpa));
1589
	}
1590
1591
	return (NULL);
1592
}
1593
1594
/*
1595
 * write_mem
1596
 *
1597
 * Copies data from 'buf' into the guest VM's memory at paddr 'dst'.
1598
 *
1599
 * Parameters:
1600
 *  dst: the destination paddr_t in the guest VM
1601
 *  buf: data to copy
1602
 *  len: number of bytes to copy
1603
 *
1604
 * Return values:
1605
 *  0: success
1606
 *  EINVAL: if the guest physical memory range [dst, dst + len) does not
1607
 *      exist in the guest.
1608
 */
1609
int
1610
write_mem(paddr_t dst, const void *buf, size_t len)
1611
{
1612
	const char *from = buf;
1613
	char *to;
1614
	size_t n, off;
1615
	struct vm_mem_range *vmr;
1616
1617
	vmr = find_gpa_range(&current_vm->vm_params.vmc_params, dst, len);
1618
	if (vmr == NULL) {
1619
		errno = EINVAL;
1620
		log_warn("%s: failed - invalid memory range dst = 0x%lx, "
1621
		    "len = 0x%zx", __func__, dst, len);
1622
		return (EINVAL);
1623
	}
1624
1625
	off = dst - vmr->vmr_gpa;
1626
	while (len != 0) {
1627
		n = vmr->vmr_size - off;
1628
		if (len < n)
1629
			n = len;
1630
1631
		to = (char *)vmr->vmr_va + off;
1632
		memcpy(to, from, n);
1633
1634
		from += n;
1635
		len -= n;
1636
		off = 0;
1637
		vmr++;
1638
	}
1639
1640
	return (0);
1641
}
1642
1643
/*
1644
 * read_mem
1645
 *
1646
 * Reads memory at guest paddr 'src' into 'buf'.
1647
 *
1648
 * Parameters:
1649
 *  src: the source paddr_t in the guest VM to read from.
1650
 *  buf: destination (local) buffer
1651
 *  len: number of bytes to read
1652
 *
1653
 * Return values:
1654
 *  0: success
1655
 *  EINVAL: if the guest physical memory range [dst, dst + len) does not
1656
 *      exist in the guest.
1657
 */
1658
int
1659
read_mem(paddr_t src, void *buf, size_t len)
1660
{
1661
	char *from, *to = buf;
1662
	size_t n, off;
1663
	struct vm_mem_range *vmr;
1664
1665
	vmr = find_gpa_range(&current_vm->vm_params.vmc_params, src, len);
1666
	if (vmr == NULL) {
1667
		errno = EINVAL;
1668
		log_warn("%s: failed - invalid memory range src = 0x%lx, "
1669
		    "len = 0x%zx", __func__, src, len);
1670
		return (EINVAL);
1671
	}
1672
1673
	off = src - vmr->vmr_gpa;
1674
	while (len != 0) {
1675
		n = vmr->vmr_size - off;
1676
		if (len < n)
1677
			n = len;
1678
1679
		from = (char *)vmr->vmr_va + off;
1680
		memcpy(to, from, n);
1681
1682
		to += n;
1683
		len -= n;
1684
		off = 0;
1685
		vmr++;
1686
	}
1687
1688
	return (0);
1689
}
1690
1691
int
1692
iovec_mem(paddr_t src, size_t len, struct iovec *iov, int iovcnt)
1693
{
1694
	size_t n, off;
1695
	struct vm_mem_range *vmr;
1696
	int niov = 0;
1697
1698
	vmr = find_gpa_range(&current_vm->vm_params.vmc_params, src, len);
1699
	if (vmr == NULL) {
1700
		errno = EINVAL;
1701
		return (-1);
1702
	}
1703
1704
	off = src - vmr->vmr_gpa;
1705
	while (len > 0) {
1706
		if (niov == iovcnt) {
1707
			errno = ENOMEM;
1708
			return (-1);
1709
		}
1710
1711
		n = vmr->vmr_size - off;
1712
		if (len < n)
1713
			n = len;
1714
1715
		iov[niov].iov_base = (char *)vmr->vmr_va + off;
1716
		iov[niov].iov_len = n;
1717
1718
		niov++;
1719
1720
		len -= n;
1721
		off = 0;
1722
		vmr++;
1723
	}
1724
1725
	return (niov);
1726
}
1727
1728
/*
1729
 * vcpu_assert_pic_irq
1730
 *
1731
 * Injects the specified IRQ on the supplied vcpu/vm
1732
 *
1733
 * Parameters:
1734
 *  vm_id: VM ID to inject to
1735
 *  vcpu_id: VCPU ID to inject to
1736
 *  irq: IRQ to inject
1737
 */
1738
void
1739
vcpu_assert_pic_irq(uint32_t vm_id, uint32_t vcpu_id, int irq)
1740
{
1741
	int ret;
1742
1743
	i8259_assert_irq(irq);
1744
1745
	if (i8259_is_pending()) {
1746
		if (vcpu_pic_intr(vm_id, vcpu_id, 1))
1747
			fatalx("%s: can't assert INTR", __func__);
1748
1749
		ret = pthread_mutex_lock(&vcpu_run_mtx[vcpu_id]);
1750
		if (ret)
1751
			fatalx("%s: can't lock vcpu mtx (%d)", __func__, ret);
1752
1753
		vcpu_hlt[vcpu_id] = 0;
1754
		ret = pthread_cond_signal(&vcpu_run_cond[vcpu_id]);
1755
		if (ret)
1756
			fatalx("%s: can't signal (%d)", __func__, ret);
1757
		ret = pthread_mutex_unlock(&vcpu_run_mtx[vcpu_id]);
1758
		if (ret)
1759
			fatalx("%s: can't unlock vcpu mtx (%d)", __func__, ret);
1760
	}
1761
}
1762
1763
/*
1764
 * vcpu_deassert_pic_irq
1765
 *
1766
 * Clears the specified IRQ on the supplied vcpu/vm
1767
 *
1768
 * Parameters:
1769
 *  vm_id: VM ID to clear in
1770
 *  vcpu_id: VCPU ID to clear in
1771
 *  irq: IRQ to clear
1772
 */
1773
void
1774
vcpu_deassert_pic_irq(uint32_t vm_id, uint32_t vcpu_id, int irq)
1775
{
1776
	i8259_deassert_irq(irq);
1777
1778
	if (!i8259_is_pending()) {
1779
		if (vcpu_pic_intr(vm_id, vcpu_id, 0))
1780
			fatalx("%s: can't deassert INTR", __func__);
1781
	}
1782
}
1783
1784
/*
1785
 * fd_hasdata
1786
 *
1787
 * Determines if data can be read from a file descriptor.
1788
 *
1789
 * Parameters:
1790
 *  fd: the fd to check
1791
 *
1792
 * Return values:
1793
 *  1 if data can be read from an fd, or 0 otherwise.
1794
 */
1795
int
1796
fd_hasdata(int fd)
1797
{
1798
	struct pollfd pfd[1];
1799
	int nready, hasdata = 0;
1800
1801
	pfd[0].fd = fd;
1802
	pfd[0].events = POLLIN;
1803
	nready = poll(pfd, 1, 0);
1804
	if (nready == -1)
1805
		log_warn("checking file descriptor for data failed");
1806
	else if (nready == 1 && pfd[0].revents & POLLIN)
1807
		hasdata = 1;
1808
	return (hasdata);
1809
}
1810
1811
/*
1812
 * mutex_lock
1813
 *
1814
 * Wrapper function for pthread_mutex_lock that does error checking and that
1815
 * exits on failure
1816
 */
1817
void
1818
mutex_lock(pthread_mutex_t *m)
1819
{
1820
	int ret;
1821
1822
	ret = pthread_mutex_lock(m);
1823
	if (ret) {
1824
		errno = ret;
1825
		fatal("could not acquire mutex");
1826
	}
1827
}
1828
1829
/*
1830
 * mutex_unlock
1831
 *
1832
 * Wrapper function for pthread_mutex_unlock that does error checking and that
1833
 * exits on failure
1834
 */
1835
void
1836
mutex_unlock(pthread_mutex_t *m)
1837
{
1838
	int ret;
1839
1840
	ret = pthread_mutex_unlock(m);
1841
	if (ret) {
1842
		errno = ret;
1843
		fatal("could not release mutex");
1844
	}
1845
}
1846
1847
/*
1848
 * set_return_data
1849
 *
1850
 * Utility function for manipulating register data in vm exit info structs. This
1851
 * function ensures that the data is copied to the vei->vei.vei_data field with
1852
 * the proper size for the operation being performed.
1853
 *
1854
 * Parameters:
1855
 *  vei: exit information
1856
 *  data: return data
1857
 */
1858
void
1859
set_return_data(union vm_exit *vei, uint32_t data)
1860
{
1861
	switch (vei->vei.vei_size) {
1862
	case 1:
1863
		vei->vei.vei_data &= ~0xFF;
1864
		vei->vei.vei_data |= (uint8_t)data;
1865
		break;
1866
	case 2:
1867
		vei->vei.vei_data &= ~0xFFFF;
1868
		vei->vei.vei_data |= (uint16_t)data;
1869
		break;
1870
	case 4:
1871
		vei->vei.vei_data = data;
1872
		break;
1873
	}
1874
}
1875
1876
/*
1877
 * get_input_data
1878
 *
1879
 * Utility function for manipulating register data in vm exit info structs. This
1880
 * function ensures that the data is copied from the vei->vei.vei_data field with
1881
 * the proper size for the operation being performed.
1882
 *
1883
 * Parameters:
1884
 *  vei: exit information
1885
 *  data: location to store the result
1886
 */
1887
void
1888
get_input_data(union vm_exit *vei, uint32_t *data)
1889
{
1890
	switch (vei->vei.vei_size) {
1891
	case 1:
1892
		*data &= 0xFFFFFF00;
1893
		*data |= (uint8_t)vei->vei.vei_data;
1894
		break;
1895
	case 2:
1896
		*data &= 0xFFFF0000;
1897
		*data |= (uint16_t)vei->vei.vei_data;
1898
		break;
1899
	case 4:
1900
		*data = vei->vei.vei_data;
1901
		break;
1902
	default:
1903
		log_warnx("%s: invalid i/o size %d", __func__,
1904
		    vei->vei.vei_size);
1905
	}
1906
1907
}