GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: lib/libkvm/kvm.c Lines: 0 392 0.0 %
Date: 2017-11-13 Branches: 0 244 0.0 %

Line Branch Exec Source
1
/*	$OpenBSD: kvm.c,v 1.62 2016/07/10 23:06:48 tedu Exp $ */
2
/*	$NetBSD: kvm.c,v 1.43 1996/05/05 04:31:59 gwr Exp $	*/
3
4
/*-
5
 * Copyright (c) 1989, 1992, 1993
6
 *	The Regents of the University of California.  All rights reserved.
7
 *
8
 * This code is derived from software developed by the Computer Systems
9
 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10
 * BG 91-66 and contributed to Berkeley.
11
 *
12
 * Redistribution and use in source and binary forms, with or without
13
 * modification, are permitted provided that the following conditions
14
 * are met:
15
 * 1. Redistributions of source code must retain the above copyright
16
 *    notice, this list of conditions and the following disclaimer.
17
 * 2. Redistributions in binary form must reproduce the above copyright
18
 *    notice, this list of conditions and the following disclaimer in the
19
 *    documentation and/or other materials provided with the distribution.
20
 * 3. Neither the name of the University nor the names of its contributors
21
 *    may be used to endorse or promote products derived from this software
22
 *    without specific prior written permission.
23
 *
24
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34
 * SUCH DAMAGE.
35
 */
36
37
#include <sys/param.h>	/* MAXCOMLEN MID_MACHINE */
38
#include <sys/proc.h>
39
#include <sys/ioctl.h>
40
#include <sys/stat.h>
41
#include <sys/sysctl.h>
42
43
#include <sys/core.h>
44
#include <sys/exec.h>
45
#include <sys/kcore.h>
46
47
#include <stddef.h>
48
#include <errno.h>
49
#include <ctype.h>
50
#include <db.h>
51
#include <fcntl.h>
52
#include <libgen.h>
53
#include <limits.h>
54
#include <nlist.h>
55
#include <paths.h>
56
#include <stdio.h>
57
#include <stdlib.h>
58
#include <string.h>
59
#include <unistd.h>
60
#include <kvm.h>
61
#include <stdarg.h>
62
63
#include "kvm_private.h"
64
65
extern int __fdnlist(int, struct nlist *);
66
67
static int	kvm_dbopen(kvm_t *, const char *);
68
static int	kvm_opennamelist(kvm_t *, const char *);
69
static int	_kvm_get_header(kvm_t *);
70
static kvm_t	*_kvm_open(kvm_t *, const char *, const char *, const char *,
71
		     int, char *);
72
static int	clear_gap(kvm_t *, FILE *, int);
73
74
char *
75
kvm_geterr(kvm_t *kd)
76
{
77
	return (kd->errbuf);
78
}
79
80
/*
81
 * Wrapper around pread.
82
 */
83
ssize_t
84
_kvm_pread(kvm_t *kd, int fd, void *buf, size_t nbytes, off_t offset)
85
{
86
	ssize_t rval;
87
88
	errno = 0;
89
	rval = pread(fd, buf, nbytes, offset);
90
	if (rval == -1 || errno != 0) {
91
		_kvm_syserr(kd, kd->program, "pread");
92
	}
93
	return (rval);
94
}
95
96
/*
97
 * Wrapper around pwrite.
98
 */
99
ssize_t
100
_kvm_pwrite(kvm_t *kd, int fd, const void *buf, size_t nbytes, off_t offset)
101
{
102
	ssize_t rval;
103
104
	errno = 0;
105
	rval = pwrite(fd, buf, nbytes, offset);
106
	if (rval == -1 || errno != 0) {
107
		_kvm_syserr(kd, kd->program, "pwrite");
108
	}
109
	return (rval);
110
}
111
112
/*
113
 * Report an error using printf style arguments.  "program" is kd->program
114
 * on hard errors, and 0 on soft errors, so that under sun error emulation,
115
 * only hard errors are printed out (otherwise, programs like gdb will
116
 * generate tons of error messages when trying to access bogus pointers).
117
 */
118
void
119
_kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
120
{
121
	va_list ap;
122
123
	va_start(ap, fmt);
124
	if (program != NULL) {
125
		(void)fprintf(stderr, "%s: ", program);
126
		(void)vfprintf(stderr, fmt, ap);
127
		(void)fputc('\n', stderr);
128
	} else
129
		(void)vsnprintf(kd->errbuf,
130
		    sizeof(kd->errbuf), fmt, ap);
131
132
	va_end(ap);
133
}
134
135
void
136
_kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
137
{
138
	va_list ap;
139
	size_t n;
140
141
	va_start(ap, fmt);
142
	if (program != NULL) {
143
		(void)fprintf(stderr, "%s: ", program);
144
		(void)vfprintf(stderr, fmt, ap);
145
		(void)fprintf(stderr, ": %s\n", strerror(errno));
146
	} else {
147
		char *cp = kd->errbuf;
148
149
		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
150
		n = strlen(cp);
151
		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
152
		    strerror(errno));
153
	}
154
	va_end(ap);
155
}
156
157
void *
158
_kvm_malloc(kvm_t *kd, size_t n)
159
{
160
	void *p;
161
162
	if ((p = malloc(n)) == NULL)
163
		_kvm_err(kd, kd->program, "%s", strerror(errno));
164
	return (p);
165
}
166
167
void *
168
_kvm_realloc(kvm_t *kd, void *p, size_t n)
169
{
170
	if ((p = realloc(p, n)) == NULL)
171
		_kvm_err(kd, kd->program, "%s", strerror(errno));
172
	return (p);
173
}
174
175
static kvm_t *
176
_kvm_open(kvm_t *kd, const char *uf, const char *mf, const char *sf,
177
    int flag, char *errout)
178
{
179
	struct stat st;
180
181
	kd->db = 0;
182
	kd->pmfd = -1;
183
	kd->vmfd = -1;
184
	kd->swfd = -1;
185
	kd->nlfd = -1;
186
	kd->alive = 0;
187
	kd->filebase = NULL;
188
	kd->procbase = NULL;
189
	kd->nbpg = getpagesize();
190
	kd->swapspc = 0;
191
	kd->argspc = 0;
192
	kd->argbuf = 0;
193
	kd->argv = 0;
194
	kd->vmst = NULL;
195
	kd->vm_page_buckets = 0;
196
	kd->kcore_hdr = 0;
197
	kd->cpu_dsize = 0;
198
	kd->cpu_data = 0;
199
	kd->dump_off = 0;
200
201
	if (flag & KVM_NO_FILES) {
202
		kd->alive = 1;
203
		return (kd);
204
	}
205
206
	if (uf && strlen(uf) >= PATH_MAX) {
207
		_kvm_err(kd, kd->program, "exec file name too long");
208
		goto failed;
209
	}
210
	if (flag != O_RDONLY && flag != O_WRONLY && flag != O_RDWR) {
211
		_kvm_err(kd, kd->program, "bad flags arg");
212
		goto failed;
213
	}
214
	flag |= O_CLOEXEC;
215
216
	if (mf == 0)
217
		mf = _PATH_MEM;
218
219
	if ((kd->pmfd = open(mf, flag)) < 0) {
220
		_kvm_syserr(kd, kd->program, "%s", mf);
221
		goto failed;
222
	}
223
	if (fstat(kd->pmfd, &st) < 0) {
224
		_kvm_syserr(kd, kd->program, "%s", mf);
225
		goto failed;
226
	}
227
	if (S_ISCHR(st.st_mode)) {
228
		/*
229
		 * If this is a character special device, then check that
230
		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
231
		 * make it work for either /dev/mem or /dev/kmem -- in either
232
		 * case you're working with a live kernel.)
233
		 */
234
		if (strcmp(mf, _PATH_MEM) != 0) {	/* XXX */
235
			_kvm_err(kd, kd->program,
236
				 "%s: not physical memory device", mf);
237
			goto failed;
238
		}
239
		if ((kd->vmfd = open(_PATH_KMEM, flag)) < 0) {
240
			_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
241
			goto failed;
242
		}
243
		kd->alive = 1;
244
		if (sf != NULL && (kd->swfd = open(sf, flag)) < 0) {
245
			_kvm_syserr(kd, kd->program, "%s", sf);
246
			goto failed;
247
		}
248
		/*
249
		 * Open kvm nlist database.  We only try to use
250
		 * the pre-built database if the namelist file name
251
		 * pointer is NULL.  If the database cannot or should
252
		 * not be opened, open the namelist argument so we
253
		 * revert to slow nlist() calls.
254
		 * If no file is specified, try opening _PATH_KSYMS and
255
		 * fall back to _PATH_UNIX.
256
		 */
257
		if (kvm_dbopen(kd, uf ? uf : _PATH_UNIX) == -1 &&
258
		    kvm_opennamelist(kd, uf))
259
			goto failed;
260
	} else {
261
		/*
262
		 * This is a crash dump.
263
		 * Initialize the virtual address translation machinery,
264
		 * but first setup the namelist fd.
265
		 * If no file is specified, try opening _PATH_KSYMS and
266
		 * fall back to _PATH_UNIX.
267
		 */
268
		if (kvm_opennamelist(kd, uf))
269
			goto failed;
270
271
		/*
272
		 * If there is no valid core header, fail silently here.
273
		 * The address translations however will fail without
274
		 * header. Things can be made to run by calling
275
		 * kvm_dump_mkheader() before doing any translation.
276
		 */
277
		if (_kvm_get_header(kd) == 0) {
278
			if (_kvm_initvtop(kd) < 0)
279
				goto failed;
280
		}
281
	}
282
	return (kd);
283
failed:
284
	/*
285
	 * Copy out the error if doing sane error semantics.
286
	 */
287
	if (errout != 0)
288
		(void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
289
	(void)kvm_close(kd);
290
	return (0);
291
}
292
293
static int
294
kvm_opennamelist(kvm_t *kd, const char *uf)
295
{
296
	int fd;
297
298
	if (uf != NULL)
299
		fd = open(uf, O_RDONLY | O_CLOEXEC);
300
	else {
301
		fd = open(_PATH_KSYMS, O_RDONLY | O_CLOEXEC);
302
		uf = _PATH_UNIX;
303
		if (fd == -1)
304
			fd = open(uf, O_RDONLY | O_CLOEXEC);
305
	}
306
	if (fd == -1) {
307
		_kvm_syserr(kd, kd->program, "%s", uf);
308
		return (-1);
309
	}
310
311
	kd->nlfd = fd;
312
	return (0);
313
}
314
315
/*
316
 * The kernel dump file (from savecore) contains:
317
 *    kcore_hdr_t kcore_hdr;
318
 *    kcore_seg_t cpu_hdr;
319
 *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
320
 *    kcore_seg_t mem_hdr;
321
 *    (memory)    mem_data; (size is mem_hdr.c_size)
322
 *
323
 * Note: khdr is padded to khdr.c_hdrsize;
324
 * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
325
 */
326
static int
327
_kvm_get_header(kvm_t *kd)
328
{
329
	kcore_hdr_t	kcore_hdr;
330
	kcore_seg_t	cpu_hdr;
331
	kcore_seg_t	mem_hdr;
332
	size_t		offset;
333
	ssize_t		sz;
334
335
	/*
336
	 * Read the kcore_hdr_t
337
	 */
338
	sz = _kvm_pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
339
	if (sz != sizeof(kcore_hdr)) {
340
		return (-1);
341
	}
342
343
	/*
344
	 * Currently, we only support dump-files made by the current
345
	 * architecture...
346
	 */
347
	if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
348
	    (CORE_GETMID(kcore_hdr) != MID_MACHINE))
349
		return (-1);
350
351
	/*
352
	 * Currently, we only support exactly 2 segments: cpu-segment
353
	 * and data-segment in exactly that order.
354
	 */
355
	if (kcore_hdr.c_nseg != 2)
356
		return (-1);
357
358
	/*
359
	 * Save away the kcore_hdr.  All errors after this
360
	 * should do a to "goto fail" to deallocate things.
361
	 */
362
	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
363
	if (kd->kcore_hdr == NULL)
364
		goto fail;
365
	memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
366
	offset = kcore_hdr.c_hdrsize;
367
368
	/*
369
	 * Read the CPU segment header
370
	 */
371
	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
372
	if (sz != sizeof(cpu_hdr)) {
373
		goto fail;
374
	}
375
376
	if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
377
	    (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
378
		goto fail;
379
	offset += kcore_hdr.c_seghdrsize;
380
381
	/*
382
	 * Read the CPU segment DATA.
383
	 */
384
	kd->cpu_dsize = cpu_hdr.c_size;
385
	kd->cpu_data = _kvm_malloc(kd, (size_t)cpu_hdr.c_size);
386
	if (kd->cpu_data == NULL)
387
		goto fail;
388
389
	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
390
	    (off_t)offset);
391
	if (sz != (size_t)cpu_hdr.c_size) {
392
		goto fail;
393
	}
394
395
	offset += cpu_hdr.c_size;
396
397
	/*
398
	 * Read the next segment header: data segment
399
	 */
400
	sz = _kvm_pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
401
	if (sz != sizeof(mem_hdr)) {
402
		goto fail;
403
	}
404
405
	offset += kcore_hdr.c_seghdrsize;
406
407
	if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
408
	    (CORE_GETFLAG(mem_hdr) != CORE_DATA))
409
		goto fail;
410
411
	kd->dump_off = offset;
412
	return (0);
413
414
fail:
415
	free(kd->kcore_hdr);
416
	kd->kcore_hdr = NULL;
417
	if (kd->cpu_data != NULL) {
418
		free(kd->cpu_data);
419
		kd->cpu_data = NULL;
420
		kd->cpu_dsize = 0;
421
	}
422
423
	return (-1);
424
}
425
426
/*
427
 * The format while on the dump device is: (new format)
428
 *    kcore_seg_t cpu_hdr;
429
 *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
430
 *    kcore_seg_t mem_hdr;
431
 *    (memory)    mem_data; (size is mem_hdr.c_size)
432
 */
433
int
434
kvm_dump_mkheader(kvm_t *kd, off_t dump_off)
435
{
436
	kcore_seg_t	cpu_hdr;
437
	int	hdr_size;
438
	ssize_t sz;
439
440
	if (kd->kcore_hdr != NULL) {
441
	    _kvm_err(kd, kd->program, "already has a dump header");
442
	    return (-1);
443
	}
444
	if (ISALIVE(kd)) {
445
		_kvm_err(kd, kd->program, "don't use on live kernel");
446
		return (-1);
447
	}
448
449
	/*
450
	 * Validate new format crash dump
451
	 */
452
	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)dump_off);
453
	if (sz != sizeof(cpu_hdr)) {
454
		return (-1);
455
	}
456
	if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
457
		|| (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
458
		_kvm_err(kd, 0, "invalid magic in cpu_hdr");
459
		return (-1);
460
	}
461
	hdr_size = ALIGN(sizeof(cpu_hdr));
462
463
	/*
464
	 * Read the CPU segment.
465
	 */
466
	kd->cpu_dsize = cpu_hdr.c_size;
467
	kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
468
	if (kd->cpu_data == NULL)
469
		goto fail;
470
471
	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
472
	    (off_t)dump_off+hdr_size);
473
	if (sz != (ssize_t)cpu_hdr.c_size) {
474
		_kvm_err(kd, 0, "invalid size in cpu_hdr");
475
		goto fail;
476
	}
477
	hdr_size += kd->cpu_dsize;
478
479
	/*
480
	 * Leave phys mem pointer at beginning of memory data
481
	 */
482
	kd->dump_off = dump_off + hdr_size;
483
	errno = 0;
484
	if (lseek(kd->pmfd, kd->dump_off, SEEK_SET) != kd->dump_off && errno != 0) {
485
		_kvm_err(kd, 0, "invalid dump offset - lseek");
486
		goto fail;
487
	}
488
489
	/*
490
	 * Create a kcore_hdr.
491
	 */
492
	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
493
	if (kd->kcore_hdr == NULL)
494
		goto fail;
495
496
	kd->kcore_hdr->c_hdrsize    = ALIGN(sizeof(kcore_hdr_t));
497
	kd->kcore_hdr->c_seghdrsize = ALIGN(sizeof(kcore_seg_t));
498
	kd->kcore_hdr->c_nseg       = 2;
499
	CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
500
501
	/*
502
	 * Now that we have a valid header, enable translations.
503
	 */
504
	if (_kvm_initvtop(kd) == 0)
505
		/* Success */
506
		return (hdr_size);
507
508
fail:
509
	free(kd->kcore_hdr);
510
	kd->kcore_hdr = NULL;
511
	if (kd->cpu_data != NULL) {
512
		free(kd->cpu_data);
513
		kd->cpu_data = NULL;
514
		kd->cpu_dsize = 0;
515
	}
516
	return (-1);
517
}
518
519
static int
520
clear_gap(kvm_t *kd, FILE *fp, int size)
521
{
522
	if (size <= 0) /* XXX - < 0 should never happen */
523
		return (0);
524
	while (size-- > 0) {
525
		if (fputc(0, fp) == EOF) {
526
			_kvm_syserr(kd, kd->program, "clear_gap");
527
			return (-1);
528
		}
529
	}
530
	return (0);
531
}
532
533
/*
534
 * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
535
 * because 'fp' might be a file pointer obtained by zopen().
536
 */
537
int
538
kvm_dump_wrtheader(kvm_t *kd, FILE *fp, int dumpsize)
539
{
540
	kcore_seg_t	seghdr;
541
	long		offset;
542
	int		gap;
543
544
	if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
545
		_kvm_err(kd, kd->program, "no valid dump header(s)");
546
		return (-1);
547
	}
548
549
	/*
550
	 * Write the generic header
551
	 */
552
	offset = 0;
553
	if (fwrite(kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) < 1) {
554
		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
555
		return (-1);
556
	}
557
	offset += kd->kcore_hdr->c_hdrsize;
558
	gap     = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
559
	if (clear_gap(kd, fp, gap) == -1)
560
		return (-1);
561
562
	/*
563
	 * Write the cpu header
564
	 */
565
	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
566
	seghdr.c_size = (u_long)ALIGN(kd->cpu_dsize);
567
	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
568
		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
569
		return (-1);
570
	}
571
	offset += kd->kcore_hdr->c_seghdrsize;
572
	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
573
	if (clear_gap(kd, fp, gap) == -1)
574
		return (-1);
575
576
	if (fwrite(kd->cpu_data, kd->cpu_dsize, 1, fp) < 1) {
577
		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
578
		return (-1);
579
	}
580
	offset += seghdr.c_size;
581
	gap     = seghdr.c_size - kd->cpu_dsize;
582
	if (clear_gap(kd, fp, gap) == -1)
583
		return (-1);
584
585
	/*
586
	 * Write the actual dump data segment header
587
	 */
588
	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
589
	seghdr.c_size = dumpsize;
590
	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
591
		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
592
		return (-1);
593
	}
594
	offset += kd->kcore_hdr->c_seghdrsize;
595
	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
596
	if (clear_gap(kd, fp, gap) == -1)
597
		return (-1);
598
599
	return (offset);
600
}
601
602
kvm_t *
603
kvm_openfiles(const char *uf, const char *mf, const char *sf,
604
    int flag, char *errout)
605
{
606
	kvm_t *kd;
607
608
	if ((kd = malloc(sizeof(*kd))) == NULL) {
609
		(void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
610
		return (0);
611
	}
612
	kd->program = 0;
613
	return (_kvm_open(kd, uf, mf, sf, flag, errout));
614
}
615
616
kvm_t *
617
kvm_open(const char *uf, const char *mf, const char *sf, int flag,
618
    const char *program)
619
{
620
	kvm_t *kd;
621
622
	if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
623
		(void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
624
		return (0);
625
	}
626
	kd->program = program;
627
	return (_kvm_open(kd, uf, mf, sf, flag, NULL));
628
}
629
630
int
631
kvm_close(kvm_t *kd)
632
{
633
	int error = 0;
634
635
	if (kd->pmfd >= 0)
636
		error |= close(kd->pmfd);
637
	if (kd->vmfd >= 0)
638
		error |= close(kd->vmfd);
639
	kd->alive = 0;
640
	if (kd->nlfd >= 0)
641
		error |= close(kd->nlfd);
642
	if (kd->swfd >= 0)
643
		error |= close(kd->swfd);
644
	if (kd->db != 0)
645
		error |= (kd->db->close)(kd->db);
646
	if (kd->vmst)
647
		_kvm_freevtop(kd);
648
	kd->cpu_dsize = 0;
649
	if (kd->cpu_data != NULL)
650
		free((void *)kd->cpu_data);
651
	if (kd->kcore_hdr != NULL)
652
		free((void *)kd->kcore_hdr);
653
	free(kd->filebase);
654
	free(kd->procbase);
655
	if (kd->swapspc != 0)
656
		free((void *)kd->swapspc);
657
	if (kd->argspc != 0)
658
		free((void *)kd->argspc);
659
	if (kd->argbuf != 0)
660
		free((void *)kd->argbuf);
661
	if (kd->argv != 0)
662
		free((void *)kd->argv);
663
	free((void *)kd);
664
665
	return (error);
666
}
667
668
/*
669
 * Set up state necessary to do queries on the kernel namelist
670
 * data base.  If the data base is out-of-data/incompatible with
671
 * given executable, set up things so we revert to standard nlist call.
672
 * Only called for live kernels.  Return 0 on success, -1 on failure.
673
 */
674
static int
675
kvm_dbopen(kvm_t *kd, const char *uf)
676
{
677
	char dbversion[_POSIX2_LINE_MAX], kversion[_POSIX2_LINE_MAX];
678
	char dbname[PATH_MAX];
679
	struct nlist nitem;
680
	size_t dbversionlen;
681
	DBT rec;
682
683
	uf = basename(uf);
684
685
	(void)snprintf(dbname, sizeof(dbname), "%skvm_%s.db", _PATH_VARDB, uf);
686
	kd->db = dbopen(dbname, O_RDONLY, 0, DB_HASH, NULL);
687
	if (kd->db == NULL) {
688
		switch (errno) {
689
		case ENOENT:
690
			/* No kvm_bsd.db, fall back to /bsd silently */
691
			break;
692
		case EFTYPE:
693
			_kvm_err(kd, kd->program,
694
			    "file %s is incorrectly formatted", dbname);
695
			break;
696
		case EINVAL:
697
			_kvm_err(kd, kd->program,
698
			    "invalid argument to dbopen()");
699
			break;
700
		default:
701
			_kvm_err(kd, kd->program, "unknown dbopen() error");
702
			break;
703
		}
704
		return (-1);
705
	}
706
707
	/*
708
	 * read version out of database
709
	 */
710
	rec.data = VRS_KEY;
711
	rec.size = sizeof(VRS_KEY) - 1;
712
	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
713
		goto close;
714
	if (rec.data == 0 || rec.size > sizeof(dbversion))
715
		goto close;
716
717
	bcopy(rec.data, dbversion, rec.size);
718
	dbversionlen = rec.size;
719
720
	/*
721
	 * Read version string from kernel memory.
722
	 * Since we are dealing with a live kernel, we can call kvm_read()
723
	 * at this point.
724
	 */
725
	rec.data = VRS_SYM;
726
	rec.size = sizeof(VRS_SYM) - 1;
727
	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
728
		goto close;
729
	if (rec.data == 0 || rec.size != sizeof(struct nlist))
730
		goto close;
731
	bcopy(rec.data, &nitem, sizeof(nitem));
732
	if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
733
	    dbversionlen)
734
		goto close;
735
	/*
736
	 * If they match, we win - otherwise clear out kd->db so
737
	 * we revert to slow nlist().
738
	 */
739
	if (bcmp(dbversion, kversion, dbversionlen) == 0)
740
		return (0);
741
close:
742
	(void)(kd->db->close)(kd->db);
743
	kd->db = 0;
744
745
	return (-1);
746
}
747
748
int
749
kvm_nlist(kvm_t *kd, struct nlist *nl)
750
{
751
	struct nlist *p;
752
	int nvalid, rv;
753
754
	/*
755
	 * If we can't use the data base, revert to the
756
	 * slow library call.
757
	 */
758
	if (kd->db == 0) {
759
		rv = __fdnlist(kd->nlfd, nl);
760
		if (rv == -1)
761
			_kvm_err(kd, 0, "bad namelist");
762
		return (rv);
763
	}
764
765
	/*
766
	 * We can use the kvm data base.  Go through each nlist entry
767
	 * and look it up with a db query.
768
	 */
769
	nvalid = 0;
770
	for (p = nl; p->n_name && p->n_name[0]; ++p) {
771
		size_t len;
772
		DBT rec;
773
774
		if ((len = strlen(p->n_name)) > 4096) {
775
			/* sanity */
776
			_kvm_err(kd, kd->program, "symbol too large");
777
			return (-1);
778
		}
779
		rec.data = p->n_name;
780
		rec.size = len;
781
782
		/*
783
		 * Make sure that n_value = 0 when the symbol isn't found
784
		 */
785
		p->n_value = 0;
786
787
		if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
788
			continue;
789
		if (rec.data == 0 || rec.size != sizeof(struct nlist))
790
			continue;
791
		++nvalid;
792
		/*
793
		 * Avoid alignment issues.
794
		 */
795
		bcopy((char *)rec.data + offsetof(struct nlist, n_type),
796
		    &p->n_type, sizeof(p->n_type));
797
		bcopy((char *)rec.data + offsetof(struct nlist, n_value),
798
		    &p->n_value, sizeof(p->n_value));
799
	}
800
	/*
801
	 * Return the number of entries that weren't found.
802
	 */
803
	return ((p - nl) - nvalid);
804
}
805
806
int
807
kvm_dump_inval(kvm_t *kd)
808
{
809
	struct nlist	nl[2];
810
	u_long		x;
811
	paddr_t		pa;
812
813
	if (ISALIVE(kd)) {
814
		_kvm_err(kd, kd->program, "clearing dump on live kernel");
815
		return (-1);
816
	}
817
	nl[0].n_name = "_dumpmag";
818
	nl[1].n_name = NULL;
819
820
	if (kvm_nlist(kd, nl) == -1) {
821
		_kvm_err(kd, 0, "bad namelist");
822
		return (-1);
823
	}
824
825
	if (nl[0].n_value == 0) {
826
		_kvm_err(kd, nl[0].n_name, "not in name list");
827
		return (-1);
828
	}
829
830
	if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
831
		return (-1);
832
833
	x = 0;
834
	if (_kvm_pwrite(kd, kd->pmfd, &x, sizeof(x),
835
	    (off_t)_kvm_pa2off(kd, pa)) != sizeof(x)) {
836
		_kvm_err(kd, 0, "cannot invalidate dump");
837
		return (-1);
838
	}
839
	return (0);
840
}
841
842
ssize_t
843
kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
844
{
845
	ssize_t cc;
846
	void *cp;
847
848
	if (ISALIVE(kd)) {
849
		/*
850
		 * We're using /dev/kmem.  Just read straight from the
851
		 * device and let the active kernel do the address translation.
852
		 */
853
		cc = _kvm_pread(kd, kd->vmfd, buf, len, (off_t)kva);
854
		if (cc == -1) {
855
			_kvm_err(kd, 0, "invalid address (%lx)", kva);
856
			return (-1);
857
		} else if (cc < len)
858
			_kvm_err(kd, kd->program, "short read");
859
		return (cc);
860
	} else {
861
		if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
862
			_kvm_err(kd, kd->program, "no valid dump header");
863
			return (-1);
864
		}
865
		cp = buf;
866
		while (len > 0) {
867
			paddr_t	pa;
868
869
			/* In case of error, _kvm_kvatop sets the err string */
870
			cc = _kvm_kvatop(kd, kva, &pa);
871
			if (cc == 0)
872
				return (-1);
873
			if (cc > len)
874
				cc = len;
875
			cc = _kvm_pread(kd, kd->pmfd, cp, (size_t)cc,
876
			    (off_t)_kvm_pa2off(kd, pa));
877
			if (cc == -1) {
878
				_kvm_syserr(kd, 0, _PATH_MEM);
879
				break;
880
			}
881
			/*
882
			 * If kvm_kvatop returns a bogus value or our core
883
			 * file is truncated, we might wind up seeking beyond
884
			 * the end of the core file in which case the read will
885
			 * return 0 (EOF).
886
			 */
887
			if (cc == 0)
888
				break;
889
			cp = (char *)cp + cc;
890
			kva += cc;
891
			len -= cc;
892
		}
893
		return ((char *)cp - (char *)buf);
894
	}
895
	/* NOTREACHED */
896
}
897
898
ssize_t
899
kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
900
{
901
	int cc;
902
903
	if (ISALIVE(kd)) {
904
		/*
905
		 * Just like kvm_read, only we write.
906
		 */
907
		cc = _kvm_pwrite(kd, kd->vmfd, buf, len, (off_t)kva);
908
		if (cc == -1) {
909
			_kvm_err(kd, 0, "invalid address (%lx)", kva);
910
			return (-1);
911
		} else if (cc < len)
912
			_kvm_err(kd, kd->program, "short write");
913
		return (cc);
914
	} else {
915
		_kvm_err(kd, kd->program,
916
		    "kvm_write not implemented for dead kernels");
917
		return (-1);
918
	}
919
	/* NOTREACHED */
920
}