1 |
|
|
/* $OpenBSD: pci.c,v 1.22 2017/09/17 23:07:56 pd Exp $ */ |
2 |
|
|
|
3 |
|
|
/* |
4 |
|
|
* Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> |
5 |
|
|
* |
6 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
7 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
8 |
|
|
* copyright notice and this permission notice appear in all copies. |
9 |
|
|
* |
10 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include <sys/types.h> |
20 |
|
|
|
21 |
|
|
#include <dev/pci/pcireg.h> |
22 |
|
|
#include <dev/pci/pcidevs.h> |
23 |
|
|
#include <dev/pv/virtioreg.h> |
24 |
|
|
#include <machine/vmmvar.h> |
25 |
|
|
|
26 |
|
|
#include <string.h> |
27 |
|
|
#include <unistd.h> |
28 |
|
|
#include "vmd.h" |
29 |
|
|
#include "pci.h" |
30 |
|
|
#include "vmm.h" |
31 |
|
|
#include "atomicio.h" |
32 |
|
|
|
33 |
|
|
struct pci pci; |
34 |
|
|
|
35 |
|
|
extern char *__progname; |
36 |
|
|
|
37 |
|
|
/* PIC IRQs, assigned to devices in order */ |
38 |
|
|
const uint8_t pci_pic_irqs[PCI_MAX_PIC_IRQS] = {3, 5, 6, 7, 9, 10, 11, 12, |
39 |
|
|
14, 15}; |
40 |
|
|
|
41 |
|
|
/* |
42 |
|
|
* pci_add_bar |
43 |
|
|
* |
44 |
|
|
* Adds a BAR for the PCI device 'id'. On access, 'barfn' will be |
45 |
|
|
* called, and passed 'cookie' as an identifier. |
46 |
|
|
* |
47 |
|
|
* BARs are fixed size, meaning all I/O BARs requested have the |
48 |
|
|
* same size and all MMIO BARs have the same size. |
49 |
|
|
* |
50 |
|
|
* Parameters: |
51 |
|
|
* id: PCI device to add the BAR to (local count, eg if id == 4, |
52 |
|
|
* this BAR is to be added to the VM's 5th PCI device) |
53 |
|
|
* type: type of the BAR to add (PCI_MAPREG_TYPE_xxx) |
54 |
|
|
* barfn: callback function invoked on BAR access |
55 |
|
|
* cookie: cookie passed to barfn on access |
56 |
|
|
* |
57 |
|
|
* Returns 0 if the BAR was added successfully, 1 otherwise. |
58 |
|
|
*/ |
59 |
|
|
int |
60 |
|
|
pci_add_bar(uint8_t id, uint32_t type, void *barfn, void *cookie) |
61 |
|
|
{ |
62 |
|
|
uint8_t bar_reg_idx, bar_ct; |
63 |
|
|
|
64 |
|
|
/* Check id */ |
65 |
|
|
if (id >= pci.pci_dev_ct) |
66 |
|
|
return (1); |
67 |
|
|
|
68 |
|
|
/* Can only add PCI_MAX_BARS BARs to any device */ |
69 |
|
|
bar_ct = pci.pci_devices[id].pd_bar_ct; |
70 |
|
|
if (bar_ct >= PCI_MAX_BARS) |
71 |
|
|
return (1); |
72 |
|
|
|
73 |
|
|
/* Compute BAR address and add */ |
74 |
|
|
bar_reg_idx = (PCI_MAPREG_START + (bar_ct * 4)) / 4; |
75 |
|
|
if (type == PCI_MAPREG_TYPE_MEM) { |
76 |
|
|
if (pci.pci_next_mmio_bar >= VMM_PCI_MMIO_BAR_END) |
77 |
|
|
return (1); |
78 |
|
|
|
79 |
|
|
pci.pci_devices[id].pd_cfg_space[bar_reg_idx] = |
80 |
|
|
PCI_MAPREG_MEM_ADDR(pci.pci_next_mmio_bar); |
81 |
|
|
pci.pci_next_mmio_bar += VMM_PCI_MMIO_BAR_SIZE; |
82 |
|
|
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn; |
83 |
|
|
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie; |
84 |
|
|
pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_MMIO; |
85 |
|
|
pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_MMIO_BAR_SIZE; |
86 |
|
|
pci.pci_devices[id].pd_bar_ct++; |
87 |
|
|
} else if (type == PCI_MAPREG_TYPE_IO) { |
88 |
|
|
if (pci.pci_next_io_bar >= VMM_PCI_IO_BAR_END) |
89 |
|
|
return (1); |
90 |
|
|
|
91 |
|
|
pci.pci_devices[id].pd_cfg_space[bar_reg_idx] = |
92 |
|
|
PCI_MAPREG_IO_ADDR(pci.pci_next_io_bar) | |
93 |
|
|
PCI_MAPREG_TYPE_IO; |
94 |
|
|
pci.pci_next_io_bar += VMM_PCI_IO_BAR_SIZE; |
95 |
|
|
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn; |
96 |
|
|
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie; |
97 |
|
|
dprintf("%s: adding pci bar cookie for dev %d bar %d = %p", |
98 |
|
|
__progname, id, bar_ct, cookie); |
99 |
|
|
pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_IO; |
100 |
|
|
pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_IO_BAR_SIZE; |
101 |
|
|
pci.pci_devices[id].pd_bar_ct++; |
102 |
|
|
} |
103 |
|
|
|
104 |
|
|
return (0); |
105 |
|
|
} |
106 |
|
|
|
107 |
|
|
int |
108 |
|
|
pci_set_bar_fn(uint8_t id, uint8_t bar_ct, void *barfn, void *cookie) |
109 |
|
|
{ |
110 |
|
|
/* Check id */ |
111 |
|
|
if (id >= pci.pci_dev_ct) |
112 |
|
|
return (1); |
113 |
|
|
|
114 |
|
|
if (bar_ct >= PCI_MAX_BARS) |
115 |
|
|
return (1); |
116 |
|
|
|
117 |
|
|
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn; |
118 |
|
|
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie; |
119 |
|
|
|
120 |
|
|
return (0); |
121 |
|
|
} |
122 |
|
|
|
123 |
|
|
/* |
124 |
|
|
* pci_get_dev_irq |
125 |
|
|
* |
126 |
|
|
* Returns the IRQ for the specified PCI device |
127 |
|
|
* |
128 |
|
|
* Parameters: |
129 |
|
|
* id: PCI device id to return IRQ for |
130 |
|
|
* |
131 |
|
|
* Return values: |
132 |
|
|
* The IRQ for the device, or 0xff if no device IRQ assigned |
133 |
|
|
*/ |
134 |
|
|
uint8_t |
135 |
|
|
pci_get_dev_irq(uint8_t id) |
136 |
|
|
{ |
137 |
|
|
if (pci.pci_devices[id].pd_int) |
138 |
|
|
return pci.pci_devices[id].pd_irq; |
139 |
|
|
else |
140 |
|
|
return 0xFF; |
141 |
|
|
} |
142 |
|
|
|
143 |
|
|
/* |
144 |
|
|
* pci_add_device |
145 |
|
|
* |
146 |
|
|
* Adds a PCI device to the guest VM defined by the supplied parameters. |
147 |
|
|
* |
148 |
|
|
* Parameters: |
149 |
|
|
* id: the new PCI device ID (0 .. PCI_CONFIG_MAX_DEV) |
150 |
|
|
* vid: PCI VID of the new device |
151 |
|
|
* pid: PCI PID of the new device |
152 |
|
|
* class: PCI 'class' of the new device |
153 |
|
|
* subclass: PCI 'subclass' of the new device |
154 |
|
|
* subsys_vid: subsystem VID of the new device |
155 |
|
|
* subsys_id: subsystem ID of the new device |
156 |
|
|
* irq_needed: 1 if an IRQ should be assigned to this PCI device, 0 otherwise |
157 |
|
|
* csfunc: PCI config space callback function when the guest VM accesses |
158 |
|
|
* CS of this PCI device |
159 |
|
|
* |
160 |
|
|
* Return values: |
161 |
|
|
* 0: the PCI device was added successfully. The PCI device ID is in 'id'. |
162 |
|
|
* 1: the PCI device addition failed. |
163 |
|
|
*/ |
164 |
|
|
int |
165 |
|
|
pci_add_device(uint8_t *id, uint16_t vid, uint16_t pid, uint8_t class, |
166 |
|
|
uint8_t subclass, uint16_t subsys_vid, uint16_t subsys_id, |
167 |
|
|
uint8_t irq_needed, pci_cs_fn_t csfunc) |
168 |
|
|
{ |
169 |
|
|
/* Exceeded max devices? */ |
170 |
|
|
if (pci.pci_dev_ct >= PCI_CONFIG_MAX_DEV) |
171 |
|
|
return (1); |
172 |
|
|
|
173 |
|
|
/* Exceeded max IRQs? */ |
174 |
|
|
/* XXX we could share IRQs ... */ |
175 |
|
|
if (pci.pci_next_pic_irq >= PCI_MAX_PIC_IRQS && irq_needed) |
176 |
|
|
return (1); |
177 |
|
|
|
178 |
|
|
*id = pci.pci_dev_ct; |
179 |
|
|
|
180 |
|
|
pci.pci_devices[*id].pd_vid = vid; |
181 |
|
|
pci.pci_devices[*id].pd_did = pid; |
182 |
|
|
pci.pci_devices[*id].pd_class = class; |
183 |
|
|
pci.pci_devices[*id].pd_subclass = subclass; |
184 |
|
|
pci.pci_devices[*id].pd_subsys_vid = subsys_vid; |
185 |
|
|
pci.pci_devices[*id].pd_subsys_id = subsys_id; |
186 |
|
|
|
187 |
|
|
pci.pci_devices[*id].pd_csfunc = csfunc; |
188 |
|
|
|
189 |
|
|
if (irq_needed) { |
190 |
|
|
pci.pci_devices[*id].pd_irq = |
191 |
|
|
pci_pic_irqs[pci.pci_next_pic_irq]; |
192 |
|
|
pci.pci_devices[*id].pd_int = 1; |
193 |
|
|
pci.pci_next_pic_irq++; |
194 |
|
|
dprintf("assigned irq %d to pci dev %d", |
195 |
|
|
pci.pci_devices[*id].pd_irq, *id); |
196 |
|
|
} |
197 |
|
|
|
198 |
|
|
pci.pci_dev_ct ++; |
199 |
|
|
|
200 |
|
|
return (0); |
201 |
|
|
} |
202 |
|
|
|
203 |
|
|
/* |
204 |
|
|
* pci_init |
205 |
|
|
* |
206 |
|
|
* Initializes the PCI subsystem for the VM by adding a PCI host bridge |
207 |
|
|
* as the first PCI device. |
208 |
|
|
*/ |
209 |
|
|
void |
210 |
|
|
pci_init(void) |
211 |
|
|
{ |
212 |
|
|
uint8_t id; |
213 |
|
|
|
214 |
|
|
memset(&pci, 0, sizeof(pci)); |
215 |
|
|
pci.pci_next_mmio_bar = VMM_PCI_MMIO_BAR_BASE; |
216 |
|
|
pci.pci_next_io_bar = VMM_PCI_IO_BAR_BASE; |
217 |
|
|
|
218 |
|
|
if (pci_add_device(&id, PCI_VENDOR_OPENBSD, PCI_PRODUCT_OPENBSD_PCHB, |
219 |
|
|
PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_HOST, |
220 |
|
|
PCI_VENDOR_OPENBSD, 0, 0, NULL)) { |
221 |
|
|
log_warnx("%s: can't add PCI host bridge", __progname); |
222 |
|
|
return; |
223 |
|
|
} |
224 |
|
|
} |
225 |
|
|
|
226 |
|
|
void |
227 |
|
|
pci_handle_address_reg(struct vm_run_params *vrp) |
228 |
|
|
{ |
229 |
|
|
union vm_exit *vei = vrp->vrp_exit; |
230 |
|
|
|
231 |
|
|
/* |
232 |
|
|
* vei_dir == VEI_DIR_OUT : out instruction |
233 |
|
|
* |
234 |
|
|
* The guest wrote to the address register. |
235 |
|
|
*/ |
236 |
|
|
if (vei->vei.vei_dir == VEI_DIR_OUT) { |
237 |
|
|
get_input_data(vei, &pci.pci_addr_reg); |
238 |
|
|
} else { |
239 |
|
|
/* |
240 |
|
|
* vei_dir == VEI_DIR_IN : in instruction |
241 |
|
|
* |
242 |
|
|
* The guest read the address register |
243 |
|
|
*/ |
244 |
|
|
set_return_data(vei, pci.pci_addr_reg); |
245 |
|
|
} |
246 |
|
|
} |
247 |
|
|
|
248 |
|
|
uint8_t |
249 |
|
|
pci_handle_io(struct vm_run_params *vrp) |
250 |
|
|
{ |
251 |
|
|
int i, j, k, l; |
252 |
|
|
uint16_t reg, b_hi, b_lo; |
253 |
|
|
pci_iobar_fn_t fn; |
254 |
|
|
union vm_exit *vei = vrp->vrp_exit; |
255 |
|
|
uint8_t intr, dir; |
256 |
|
|
|
257 |
|
|
k = -1; |
258 |
|
|
l = -1; |
259 |
|
|
reg = vei->vei.vei_port; |
260 |
|
|
dir = vei->vei.vei_dir; |
261 |
|
|
intr = 0xFF; |
262 |
|
|
|
263 |
|
|
for (i = 0 ; i < pci.pci_dev_ct ; i++) { |
264 |
|
|
for (j = 0 ; j < pci.pci_devices[i].pd_bar_ct; j++) { |
265 |
|
|
b_lo = PCI_MAPREG_IO_ADDR(pci.pci_devices[i].pd_bar[j]); |
266 |
|
|
b_hi = b_lo + VMM_PCI_IO_BAR_SIZE; |
267 |
|
|
if (reg >= b_lo && reg < b_hi) { |
268 |
|
|
if (pci.pci_devices[i].pd_barfunc[j]) { |
269 |
|
|
k = j; |
270 |
|
|
l = i; |
271 |
|
|
} |
272 |
|
|
} |
273 |
|
|
} |
274 |
|
|
} |
275 |
|
|
|
276 |
|
|
if (k >= 0 && l >= 0) { |
277 |
|
|
fn = (pci_iobar_fn_t)pci.pci_devices[l].pd_barfunc[k]; |
278 |
|
|
if (fn(vei->vei.vei_dir, reg - |
279 |
|
|
PCI_MAPREG_IO_ADDR(pci.pci_devices[l].pd_bar[k]), |
280 |
|
|
&vei->vei.vei_data, &intr, |
281 |
|
|
pci.pci_devices[l].pd_bar_cookie[k], |
282 |
|
|
vei->vei.vei_size)) { |
283 |
|
|
log_warnx("%s: pci i/o access function failed", |
284 |
|
|
__progname); |
285 |
|
|
} |
286 |
|
|
} else { |
287 |
|
|
log_warnx("%s: no pci i/o function for reg 0x%llx", |
288 |
|
|
__progname, (uint64_t)reg); |
289 |
|
|
/* Reads from undefined ports return 0xFF */ |
290 |
|
|
if (dir == VEI_DIR_IN) |
291 |
|
|
set_return_data(vei, 0xFFFFFFFF); |
292 |
|
|
} |
293 |
|
|
|
294 |
|
|
if (intr != 0xFF) { |
295 |
|
|
intr = pci.pci_devices[l].pd_irq; |
296 |
|
|
} |
297 |
|
|
|
298 |
|
|
return (intr); |
299 |
|
|
} |
300 |
|
|
|
301 |
|
|
void |
302 |
|
|
pci_handle_data_reg(struct vm_run_params *vrp) |
303 |
|
|
{ |
304 |
|
|
union vm_exit *vei = vrp->vrp_exit; |
305 |
|
|
uint8_t b, d, f, o, baridx, ofs, sz; |
306 |
|
|
int ret; |
307 |
|
|
pci_cs_fn_t csfunc; |
308 |
|
|
|
309 |
|
|
/* abort if the address register is wack */ |
310 |
|
|
if (!(pci.pci_addr_reg & PCI_MODE1_ENABLE)) { |
311 |
|
|
/* if read, return FFs */ |
312 |
|
|
if (vei->vei.vei_dir == VEI_DIR_IN) |
313 |
|
|
set_return_data(vei, 0xFFFFFFFF); |
314 |
|
|
log_warnx("invalid address register during pci read: " |
315 |
|
|
"0x%llx", (uint64_t)pci.pci_addr_reg); |
316 |
|
|
return; |
317 |
|
|
} |
318 |
|
|
|
319 |
|
|
/* I/Os to 0xCFC..0xCFF are permitted */ |
320 |
|
|
ofs = vei->vei.vei_port - 0xCFC; |
321 |
|
|
sz = vei->vei.vei_size; |
322 |
|
|
|
323 |
|
|
b = (pci.pci_addr_reg >> 16) & 0xff; |
324 |
|
|
d = (pci.pci_addr_reg >> 11) & 0x1f; |
325 |
|
|
f = (pci.pci_addr_reg >> 8) & 0x7; |
326 |
|
|
o = (pci.pci_addr_reg & 0xfc); |
327 |
|
|
|
328 |
|
|
csfunc = pci.pci_devices[d].pd_csfunc; |
329 |
|
|
if (csfunc != NULL) { |
330 |
|
|
ret = csfunc(vei->vei.vei_dir, (o / 4), &vei->vei.vei_data); |
331 |
|
|
if (ret) |
332 |
|
|
log_warnx("cfg space access function failed for " |
333 |
|
|
"pci device %d", d); |
334 |
|
|
return; |
335 |
|
|
} |
336 |
|
|
|
337 |
|
|
/* No config space function, fallback to default simple r/w impl. */ |
338 |
|
|
|
339 |
|
|
o += ofs; |
340 |
|
|
|
341 |
|
|
/* |
342 |
|
|
* vei_dir == VEI_DIR_OUT : out instruction |
343 |
|
|
* |
344 |
|
|
* The guest wrote to the config space location denoted by the current |
345 |
|
|
* value in the address register. |
346 |
|
|
*/ |
347 |
|
|
if (vei->vei.vei_dir == VEI_DIR_OUT) { |
348 |
|
|
if ((o >= 0x10 && o <= 0x24) && |
349 |
|
|
vei->vei.vei_data == 0xffffffff) { |
350 |
|
|
/* |
351 |
|
|
* Compute BAR index: |
352 |
|
|
* o = 0x10 -> baridx = 0 |
353 |
|
|
* o = 0x14 -> baridx = 1 |
354 |
|
|
* o = 0x18 -> baridx = 2 |
355 |
|
|
* o = 0x1c -> baridx = 3 |
356 |
|
|
* o = 0x20 -> baridx = 4 |
357 |
|
|
* o = 0x24 -> baridx = 5 |
358 |
|
|
*/ |
359 |
|
|
baridx = (o / 4) - 4; |
360 |
|
|
if (baridx < pci.pci_devices[d].pd_bar_ct) |
361 |
|
|
vei->vei.vei_data = 0xfffff000; |
362 |
|
|
else |
363 |
|
|
vei->vei.vei_data = 0; |
364 |
|
|
} |
365 |
|
|
|
366 |
|
|
/* IOBAR registers must have bit 0 set */ |
367 |
|
|
if (o >= 0x10 && o <= 0x24) { |
368 |
|
|
baridx = (o / 4) - 4; |
369 |
|
|
if (baridx < pci.pci_devices[d].pd_bar_ct && |
370 |
|
|
pci.pci_devices[d].pd_bartype[baridx] == |
371 |
|
|
PCI_BAR_TYPE_IO) |
372 |
|
|
vei->vei.vei_data |= 1; |
373 |
|
|
} |
374 |
|
|
|
375 |
|
|
/* |
376 |
|
|
* Discard writes to "option rom base address" as none of our |
377 |
|
|
* emulated devices have PCI option roms. Accept any other |
378 |
|
|
* writes and copy data to config space registers. |
379 |
|
|
*/ |
380 |
|
|
if (o != PCI_EXROMADDR_0) |
381 |
|
|
get_input_data(vei, |
382 |
|
|
&pci.pci_devices[d].pd_cfg_space[o / 4]); |
383 |
|
|
} else { |
384 |
|
|
/* |
385 |
|
|
* vei_dir == VEI_DIR_IN : in instruction |
386 |
|
|
* |
387 |
|
|
* The guest read from the config space location determined by |
388 |
|
|
* the current value in the address register. |
389 |
|
|
*/ |
390 |
|
|
if (d > pci.pci_dev_ct || b > 0 || f > 0) |
391 |
|
|
set_return_data(vei, 0xFFFFFFFF); |
392 |
|
|
else { |
393 |
|
|
switch (sz) { |
394 |
|
|
case 4: |
395 |
|
|
set_return_data(vei, pci.pci_devices[d].pd_cfg_space[o / 4]); |
396 |
|
|
break; |
397 |
|
|
case 2: |
398 |
|
|
if (ofs == 0) |
399 |
|
|
set_return_data(vei, |
400 |
|
|
pci.pci_devices[d].pd_cfg_space[o / 4]); |
401 |
|
|
else |
402 |
|
|
set_return_data(vei, |
403 |
|
|
pci.pci_devices[d].pd_cfg_space[o / 4] >> 16); |
404 |
|
|
break; |
405 |
|
|
case 1: |
406 |
|
|
set_return_data(vei, |
407 |
|
|
pci.pci_devices[d].pd_cfg_space[o / 4] >> (ofs * 3)); |
408 |
|
|
break; |
409 |
|
|
} |
410 |
|
|
} |
411 |
|
|
} |
412 |
|
|
} |
413 |
|
|
|
414 |
|
|
int |
415 |
|
|
pci_dump(int fd) |
416 |
|
|
{ |
417 |
|
|
log_debug("%s: sending pci", __func__); |
418 |
|
|
if (atomicio(vwrite, fd, &pci, sizeof(pci)) != sizeof(pci)) { |
419 |
|
|
log_warnx("%s: error writing pci to fd", __func__); |
420 |
|
|
return (-1); |
421 |
|
|
} |
422 |
|
|
return (0); |
423 |
|
|
} |
424 |
|
|
|
425 |
|
|
int |
426 |
|
|
pci_restore(int fd) |
427 |
|
|
{ |
428 |
|
|
log_debug("%s: receiving pci", __func__); |
429 |
|
|
if (atomicio(read, fd, &pci, sizeof(pci)) != sizeof(pci)) { |
430 |
|
|
log_warnx("%s: error reading pci from fd", __func__); |
431 |
|
|
return (-1); |
432 |
|
|
} |
433 |
|
|
return (0); |
434 |
|
|
} |