Line data Source code
1 : /*-
2 : * Copyright (c) 2009-2012 Microsoft Corp.
3 : * Copyright (c) 2012 NetApp Inc.
4 : * Copyright (c) 2012 Citrix Inc.
5 : * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6 : * All rights reserved.
7 : *
8 : * Redistribution and use in source and binary forms, with or without
9 : * modification, are permitted provided that the following conditions
10 : * are met:
11 : * 1. Redistributions of source code must retain the above copyright
12 : * notice unmodified, this list of conditions, and the following
13 : * disclaimer.
14 : * 2. Redistributions in binary form must reproduce the above copyright
15 : * notice, this list of conditions and the following disclaimer in the
16 : * documentation and/or other materials provided with the distribution.
17 : *
18 : * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 : * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 : * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 : * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 : * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 : * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 : * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 : * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 : * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 : */
29 :
30 : /*
31 : * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32 : */
33 :
34 : #include <sys/param.h>
35 :
36 : /* Hyperv requires locked atomic operations */
37 : #ifndef MULTIPROCESSOR
38 : #define _HYPERVMPATOMICS
39 : #define MULTIPROCESSOR
40 : #endif
41 : #include <sys/atomic.h>
42 : #ifdef _HYPERVMPATOMICS
43 : #undef MULTIPROCESSOR
44 : #undef _HYPERVMPATOMICS
45 : #endif
46 :
47 : #include <sys/systm.h>
48 : #include <sys/proc.h>
49 : #include <sys/signal.h>
50 : #include <sys/signalvar.h>
51 : #include <sys/malloc.h>
52 : #include <sys/kernel.h>
53 : #include <sys/device.h>
54 : #include <sys/timetc.h>
55 : #include <sys/task.h>
56 : #include <sys/syslog.h>
57 :
58 : #include <machine/bus.h>
59 : #include <machine/cpu.h>
60 : #include <machine/cpufunc.h>
61 :
62 : #include <uvm/uvm_extern.h>
63 :
64 : #include <machine/i82489var.h>
65 :
66 : #include <dev/rndvar.h>
67 :
68 : #include <dev/pv/pvvar.h>
69 : #include <dev/pv/pvreg.h>
70 : #include <dev/pv/hypervreg.h>
71 : #include <dev/pv/hypervvar.h>
72 :
73 : /* Command submission flags */
74 : #define HCF_SLEEPOK 0x0001 /* M_WAITOK */
75 : #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */
76 : #define HCF_NOREPLY 0x0004
77 :
78 : struct hv_softc *hv_sc;
79 :
80 : int hv_match(struct device *, void *, void *);
81 : void hv_attach(struct device *, struct device *, void *);
82 : void hv_set_version(struct hv_softc *);
83 : u_int hv_gettime(struct timecounter *);
84 : int hv_init_hypercall(struct hv_softc *);
85 : uint64_t hv_hypercall(struct hv_softc *, uint64_t, void *, void *);
86 : int hv_init_interrupts(struct hv_softc *);
87 : int hv_init_synic(struct hv_softc *);
88 : int hv_cmd(struct hv_softc *, void *, size_t, void *, size_t, int);
89 : int hv_start(struct hv_softc *, struct hv_msg *);
90 : int hv_reply(struct hv_softc *, struct hv_msg *);
91 : void hv_wait(struct hv_softc *, int (*done)(struct hv_softc *,
92 : struct hv_msg *), struct hv_msg *, void *, const char *);
93 : uint16_t hv_intr_signal(struct hv_softc *, void *);
94 : void hv_intr(void);
95 : void hv_event_intr(struct hv_softc *);
96 : void hv_message_intr(struct hv_softc *);
97 : int hv_vmbus_connect(struct hv_softc *);
98 : void hv_channel_response(struct hv_softc *, struct vmbus_chanmsg_hdr *);
99 : void hv_channel_offer(struct hv_softc *, struct vmbus_chanmsg_hdr *);
100 : void hv_channel_rescind(struct hv_softc *, struct vmbus_chanmsg_hdr *);
101 : void hv_channel_delivered(struct hv_softc *, struct vmbus_chanmsg_hdr *);
102 : int hv_channel_scan(struct hv_softc *);
103 : void hv_process_offer(struct hv_softc *, struct hv_offer *);
104 : struct hv_channel *
105 : hv_channel_lookup(struct hv_softc *, uint32_t);
106 : int hv_channel_ring_create(struct hv_channel *, uint32_t);
107 : void hv_channel_ring_destroy(struct hv_channel *);
108 : void hv_channel_pause(struct hv_channel *);
109 : uint hv_channel_unpause(struct hv_channel *);
110 : uint hv_channel_ready(struct hv_channel *);
111 : extern void hv_attach_icdevs(struct hv_softc *);
112 : int hv_attach_devices(struct hv_softc *);
113 :
114 : struct {
115 : int hmd_response;
116 : int hmd_request;
117 : void (*hmd_handler)(struct hv_softc *,
118 : struct vmbus_chanmsg_hdr *);
119 : } hv_msg_dispatch[] = {
120 : { 0, 0, NULL },
121 : { VMBUS_CHANMSG_CHOFFER, 0, hv_channel_offer },
122 : { VMBUS_CHANMSG_CHRESCIND, 0, hv_channel_rescind },
123 : { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER,
124 : NULL },
125 : { VMBUS_CHANMSG_CHOFFER_DONE, 0,
126 : hv_channel_delivered },
127 : { VMBUS_CHANMSG_CHOPEN, 0, NULL },
128 : { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN,
129 : hv_channel_response },
130 : { VMBUS_CHANMSG_CHCLOSE, 0, NULL },
131 : { VMBUS_CHANMSG_GPADL_CONN, 0, NULL },
132 : { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL },
133 : { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN,
134 : hv_channel_response },
135 : { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL },
136 : { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN,
137 : hv_channel_response },
138 : { VMBUS_CHANMSG_CHFREE, 0, NULL },
139 : { VMBUS_CHANMSG_CONNECT, 0, NULL },
140 : { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT,
141 : hv_channel_response },
142 : { VMBUS_CHANMSG_DISCONNECT, 0, NULL },
143 : };
144 :
145 : struct timecounter hv_timecounter = {
146 : hv_gettime, 0, 0xffffffff, 10000000, "hyperv", 9001
147 : };
148 :
149 : struct cfdriver hyperv_cd = {
150 : NULL, "hyperv", DV_DULL
151 : };
152 :
153 : const struct cfattach hyperv_ca = {
154 : sizeof(struct hv_softc), hv_match, hv_attach
155 : };
156 :
157 : const struct hv_guid hv_guid_network = {
158 : { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46,
159 : 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e }
160 : };
161 :
162 : const struct hv_guid hv_guid_ide = {
163 : { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
164 : 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 }
165 : };
166 :
167 : const struct hv_guid hv_guid_scsi = {
168 : { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
169 : 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f }
170 : };
171 :
172 : const struct hv_guid hv_guid_shutdown = {
173 : { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49,
174 : 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb }
175 : };
176 :
177 : const struct hv_guid hv_guid_timesync = {
178 : { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
179 : 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf }
180 : };
181 :
182 : const struct hv_guid hv_guid_heartbeat = {
183 : { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
184 : 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d }
185 : };
186 :
187 : const struct hv_guid hv_guid_kvp = {
188 : { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
189 : 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 }
190 : };
191 :
192 : #ifdef HYPERV_DEBUG
193 : const struct hv_guid hv_guid_vss = {
194 : { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42,
195 : 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 }
196 : };
197 :
198 : const struct hv_guid hv_guid_dynmem = {
199 : { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46,
200 : 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 }
201 : };
202 :
203 : const struct hv_guid hv_guid_mouse = {
204 : { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c,
205 : 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a }
206 : };
207 :
208 : const struct hv_guid hv_guid_kbd = {
209 : { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48,
210 : 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 }
211 : };
212 :
213 : const struct hv_guid hv_guid_video = {
214 : { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a,
215 : 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 }
216 : };
217 :
218 : const struct hv_guid hv_guid_fc = {
219 : { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a,
220 : 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda }
221 : };
222 :
223 : const struct hv_guid hv_guid_fcopy = {
224 : { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41,
225 : 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 }
226 : };
227 :
228 : const struct hv_guid hv_guid_pcie = {
229 : { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44,
230 : 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f }
231 : };
232 :
233 : const struct hv_guid hv_guid_netdir = {
234 : { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b,
235 : 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 }
236 : };
237 :
238 : const struct hv_guid hv_guid_rdesktop = {
239 : { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42,
240 : 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe }
241 : };
242 :
243 : /* Automatic Virtual Machine Activation (AVMA) Services */
244 : const struct hv_guid hv_guid_avma1 = {
245 : { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40,
246 : 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 }
247 : };
248 :
249 : const struct hv_guid hv_guid_avma2 = {
250 : { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b,
251 : 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b }
252 : };
253 :
254 : const struct hv_guid hv_guid_avma3 = {
255 : { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11,
256 : 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e }
257 : };
258 :
259 : const struct hv_guid hv_guid_avma4 = {
260 : { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a,
261 : 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 }
262 : };
263 : #endif /* HYPERV_DEBUG */
264 :
265 : int
266 0 : hv_match(struct device *parent, void *match, void *aux)
267 : {
268 0 : struct pv_attach_args *pva = aux;
269 0 : struct pvbus_hv *hv = &pva->pva_hv[PVBUS_HYPERV];
270 :
271 0 : if ((hv->hv_major == 0 && hv->hv_minor == 0) || hv->hv_base == 0)
272 0 : return (0);
273 :
274 0 : return (1);
275 0 : }
276 :
277 : void
278 0 : hv_attach(struct device *parent, struct device *self, void *aux)
279 : {
280 0 : struct hv_softc *sc = (struct hv_softc *)self;
281 0 : struct pv_attach_args *pva = aux;
282 0 : struct pvbus_hv *hv = &pva->pva_hv[PVBUS_HYPERV];
283 :
284 0 : sc->sc_pvbus = hv;
285 0 : sc->sc_dmat = pva->pva_dmat;
286 :
287 0 : if (!(hv->hv_features & CPUID_HV_MSR_HYPERCALL) ||
288 0 : !(hv->hv_features & CPUID_HV_MSR_SYNIC)) {
289 0 : printf(": not functional\n");
290 0 : return;
291 : }
292 :
293 : DPRINTF("\n");
294 :
295 0 : hv_set_version(sc);
296 :
297 0 : if (hv->hv_features & CPUID_HV_MSR_TIME_REFCNT)
298 0 : tc_init(&hv_timecounter);
299 :
300 0 : if (hv_init_hypercall(sc))
301 0 : return;
302 :
303 : /* Wire it up to the global */
304 0 : hv_sc = sc;
305 :
306 0 : if (hv_init_interrupts(sc))
307 0 : return;
308 :
309 0 : if (hv_vmbus_connect(sc))
310 0 : return;
311 :
312 : DPRINTF("%s", sc->sc_dev.dv_xname);
313 0 : printf(": protocol %d.%d, features %#x\n",
314 0 : VMBUS_VERSION_MAJOR(sc->sc_proto),
315 0 : VMBUS_VERSION_MINOR(sc->sc_proto),
316 0 : hv->hv_features);
317 :
318 0 : if (hv_channel_scan(sc))
319 0 : return;
320 :
321 : /* Attach heartbeat, KVP and other "internal" services */
322 0 : hv_attach_icdevs(sc);
323 :
324 : /* Attach devices with external drivers */
325 0 : hv_attach_devices(sc);
326 0 : }
327 :
328 : void
329 0 : hv_set_version(struct hv_softc *sc)
330 : {
331 : uint64_t ver;
332 :
333 : /* OpenBSD build date */
334 : ver = MSR_HV_GUESTID_OSTYPE_OPENBSD;
335 : ver |= (uint64_t)OpenBSD << MSR_HV_GUESTID_VERSION_SHIFT;
336 0 : wrmsr(MSR_HV_GUEST_OS_ID, ver);
337 0 : }
338 :
339 : u_int
340 0 : hv_gettime(struct timecounter *tc)
341 : {
342 0 : u_int now = rdmsr(MSR_HV_TIME_REF_COUNT);
343 :
344 0 : return (now);
345 : }
346 :
347 : int
348 0 : hv_init_hypercall(struct hv_softc *sc)
349 : {
350 : extern void *hv_hypercall_page;
351 : uint64_t msr;
352 0 : paddr_t pa;
353 :
354 0 : sc->sc_hc = &hv_hypercall_page;
355 :
356 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_hc, &pa)) {
357 0 : printf(": hypercall page PA extraction failed\n");
358 0 : return (-1);
359 : }
360 :
361 0 : msr = (atop(pa) << MSR_HV_HYPERCALL_PGSHIFT) | MSR_HV_HYPERCALL_ENABLE;
362 0 : wrmsr(MSR_HV_HYPERCALL, msr);
363 :
364 0 : if (!(rdmsr(MSR_HV_HYPERCALL) & MSR_HV_HYPERCALL_ENABLE)) {
365 0 : printf(": failed to set up a hypercall page\n");
366 0 : return (-1);
367 : }
368 :
369 0 : return (0);
370 0 : }
371 :
372 : uint64_t
373 0 : hv_hypercall(struct hv_softc *sc, uint64_t control, void *input,
374 : void *output)
375 : {
376 0 : paddr_t input_pa = 0, output_pa = 0;
377 : uint64_t status = 0;
378 :
379 0 : if (input != NULL &&
380 0 : pmap_extract(pmap_kernel(), (vaddr_t)input, &input_pa) == 0) {
381 0 : printf("%s: hypercall input PA extraction failed\n",
382 0 : sc->sc_dev.dv_xname);
383 0 : return (~HYPERCALL_STATUS_SUCCESS);
384 : }
385 :
386 0 : if (output != NULL &&
387 0 : pmap_extract(pmap_kernel(), (vaddr_t)output, &output_pa) == 0) {
388 0 : printf("%s: hypercall output PA extraction failed\n",
389 0 : sc->sc_dev.dv_xname);
390 0 : return (~HYPERCALL_STATUS_SUCCESS);
391 : }
392 :
393 : #ifdef __amd64__
394 0 : __asm__ __volatile__ ("mov %0, %%r8" : : "r" (output_pa) : "r8");
395 0 : __asm__ __volatile__ ("call *%3" : "=a" (status) : "c" (control),
396 0 : "d" (input_pa), "m" (sc->sc_hc));
397 : #else /* __i386__ */
398 : {
399 : uint32_t control_hi = control >> 32;
400 : uint32_t control_lo = control & 0xfffffffff;
401 : uint32_t status_hi = 1;
402 : uint32_t status_lo = 1;
403 :
404 : __asm__ __volatile__ ("call *%8" :
405 : "=d" (status_hi), "=a"(status_lo) :
406 : "d" (control_hi), "a" (control_lo),
407 : "b" (0), "c" (input_pa), "D" (0), "S" (output_pa),
408 : "m" (sc->sc_hc));
409 :
410 : status = status_lo | ((uint64_t)status_hi << 32);
411 : }
412 : #endif /* __amd64__ */
413 :
414 0 : return (status);
415 0 : }
416 :
417 : int
418 0 : hv_init_interrupts(struct hv_softc *sc)
419 : {
420 0 : struct cpu_info *ci = curcpu();
421 0 : int cpu = CPU_INFO_UNIT(ci);
422 :
423 0 : sc->sc_idtvec = LAPIC_HYPERV_VECTOR;
424 :
425 0 : TAILQ_INIT(&sc->sc_reqs);
426 0 : mtx_init(&sc->sc_reqlck, IPL_NET);
427 :
428 0 : TAILQ_INIT(&sc->sc_rsps);
429 0 : mtx_init(&sc->sc_rsplck, IPL_NET);
430 :
431 0 : sc->sc_simp[cpu] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
432 0 : if (sc->sc_simp[cpu] == NULL) {
433 0 : printf(": failed to allocate SIMP\n");
434 0 : return (-1);
435 : }
436 :
437 0 : sc->sc_siep[cpu] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
438 0 : if (sc->sc_siep[cpu] == NULL) {
439 0 : printf(": failed to allocate SIEP\n");
440 0 : km_free(sc->sc_simp[cpu], PAGE_SIZE, &kv_any, &kp_zero);
441 0 : return (-1);
442 : }
443 :
444 0 : sc->sc_proto = VMBUS_VERSION_WS2008;
445 :
446 0 : return (hv_init_synic(sc));
447 0 : }
448 :
449 : int
450 0 : hv_init_synic(struct hv_softc *sc)
451 : {
452 0 : struct cpu_info *ci = curcpu();
453 0 : int cpu = CPU_INFO_UNIT(ci);
454 : uint64_t simp, siefp, sctrl, sint;
455 0 : paddr_t pa;
456 :
457 : /*
458 : * Setup the Synic's message page
459 : */
460 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_simp[cpu], &pa)) {
461 0 : printf(": SIMP PA extraction failed\n");
462 0 : return (-1);
463 : }
464 0 : simp = rdmsr(MSR_HV_SIMP);
465 0 : simp &= (1 << MSR_HV_SIMP_PGSHIFT) - 1;
466 0 : simp |= (atop(pa) << MSR_HV_SIMP_PGSHIFT);
467 0 : simp |= MSR_HV_SIMP_ENABLE;
468 0 : wrmsr(MSR_HV_SIMP, simp);
469 :
470 : /*
471 : * Setup the Synic's event page
472 : */
473 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_siep[cpu], &pa)) {
474 0 : printf(": SIEP PA extraction failed\n");
475 0 : return (-1);
476 : }
477 0 : siefp = rdmsr(MSR_HV_SIEFP);
478 0 : siefp &= (1<<MSR_HV_SIEFP_PGSHIFT) - 1;
479 0 : siefp |= (atop(pa) << MSR_HV_SIEFP_PGSHIFT);
480 0 : siefp |= MSR_HV_SIEFP_ENABLE;
481 0 : wrmsr(MSR_HV_SIEFP, siefp);
482 :
483 : /*
484 : * Configure and unmask SINT for message and event flags
485 : */
486 0 : sint = rdmsr(MSR_HV_SINT0 + VMBUS_SINT_MESSAGE);
487 0 : sint = sc->sc_idtvec | MSR_HV_SINT_AUTOEOI |
488 0 : (sint & MSR_HV_SINT_RSVD_MASK);
489 0 : wrmsr(MSR_HV_SINT0 + VMBUS_SINT_MESSAGE, sint);
490 :
491 : /* Enable the global synic bit */
492 0 : sctrl = rdmsr(MSR_HV_SCONTROL);
493 0 : sctrl |= MSR_HV_SCTRL_ENABLE;
494 0 : wrmsr(MSR_HV_SCONTROL, sctrl);
495 :
496 0 : sc->sc_vcpus[cpu] = rdmsr(MSR_HV_VP_INDEX);
497 :
498 : DPRINTF("vcpu%u: SIMP %#llx SIEFP %#llx SCTRL %#llx\n",
499 : sc->sc_vcpus[cpu], simp, siefp, sctrl);
500 :
501 0 : return (0);
502 0 : }
503 :
504 : int
505 0 : hv_cmd(struct hv_softc *sc, void *cmd, size_t cmdlen, void *rsp,
506 : size_t rsplen, int flags)
507 : {
508 0 : struct hv_msg msg;
509 : int rv;
510 :
511 0 : if (cmdlen > VMBUS_MSG_DSIZE_MAX) {
512 0 : printf("%s: payload too large (%lu)\n", sc->sc_dev.dv_xname,
513 : cmdlen);
514 0 : return (EMSGSIZE);
515 : }
516 :
517 0 : memset(&msg, 0, sizeof(msg));
518 :
519 0 : msg.msg_req.hc_dsize = cmdlen;
520 0 : memcpy(msg.msg_req.hc_data, cmd, cmdlen);
521 :
522 0 : if (!(flags & HCF_NOREPLY)) {
523 0 : msg.msg_rsp = rsp;
524 0 : msg.msg_rsplen = rsplen;
525 0 : } else
526 0 : msg.msg_flags |= MSGF_NOQUEUE;
527 :
528 0 : if (flags & HCF_NOSLEEP)
529 0 : msg.msg_flags |= MSGF_NOSLEEP;
530 :
531 0 : if ((rv = hv_start(sc, &msg)) != 0)
532 0 : return (rv);
533 0 : return (hv_reply(sc, &msg));
534 0 : }
535 :
536 : int
537 0 : hv_start(struct hv_softc *sc, struct hv_msg *msg)
538 : {
539 : const int delays[] = { 100, 100, 100, 500, 500, 5000, 5000, 5000 };
540 : const char *wchan = "hvstart";
541 : uint16_t status;
542 : int i, s;
543 :
544 0 : msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE;
545 0 : msg->msg_req.hc_msgtype = 1;
546 :
547 0 : if (!(msg->msg_flags & MSGF_NOQUEUE)) {
548 0 : mtx_enter(&sc->sc_reqlck);
549 0 : TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry);
550 0 : mtx_leave(&sc->sc_reqlck);
551 0 : }
552 :
553 0 : for (i = 0; i < nitems(delays); i++) {
554 0 : status = hv_hypercall(sc, HYPERCALL_POST_MESSAGE,
555 0 : &msg->msg_req, NULL);
556 0 : if (status == HYPERCALL_STATUS_SUCCESS)
557 : break;
558 0 : if (msg->msg_flags & MSGF_NOSLEEP) {
559 0 : delay(delays[i]);
560 0 : s = splnet();
561 0 : hv_intr();
562 0 : splx(s);
563 0 : } else
564 0 : tsleep(wchan, PRIBIO, wchan, 1);
565 : }
566 0 : if (status != 0) {
567 0 : printf("%s: posting vmbus message failed with %d\n",
568 0 : sc->sc_dev.dv_xname, status);
569 0 : if (!(msg->msg_flags & MSGF_NOQUEUE)) {
570 0 : mtx_enter(&sc->sc_reqlck);
571 0 : TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
572 0 : mtx_leave(&sc->sc_reqlck);
573 0 : }
574 0 : return (EIO);
575 : }
576 :
577 0 : return (0);
578 0 : }
579 :
580 : static int
581 0 : hv_reply_done(struct hv_softc *sc, struct hv_msg *msg)
582 : {
583 : struct hv_msg *m;
584 :
585 0 : mtx_enter(&sc->sc_rsplck);
586 0 : TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) {
587 0 : if (m == msg) {
588 0 : mtx_leave(&sc->sc_rsplck);
589 0 : return (1);
590 : }
591 : }
592 0 : mtx_leave(&sc->sc_rsplck);
593 0 : return (0);
594 0 : }
595 :
596 : int
597 0 : hv_reply(struct hv_softc *sc, struct hv_msg *msg)
598 : {
599 0 : if (msg->msg_flags & MSGF_NOQUEUE)
600 0 : return (0);
601 :
602 0 : hv_wait(sc, hv_reply_done, msg, msg, "hvreply");
603 :
604 0 : mtx_enter(&sc->sc_rsplck);
605 0 : TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry);
606 0 : mtx_leave(&sc->sc_rsplck);
607 :
608 0 : return (0);
609 0 : }
610 :
611 : void
612 0 : hv_wait(struct hv_softc *sc, int (*cond)(struct hv_softc *, struct hv_msg *),
613 : struct hv_msg *msg, void *wchan, const char *wmsg)
614 : {
615 : int s;
616 :
617 0 : KASSERT(cold ? msg->msg_flags & MSGF_NOSLEEP : 1);
618 :
619 0 : while (!cond(sc, msg)) {
620 0 : if (msg->msg_flags & MSGF_NOSLEEP) {
621 0 : delay(1000);
622 0 : s = splnet();
623 0 : hv_intr();
624 0 : splx(s);
625 0 : } else
626 0 : tsleep(wchan, PRIBIO, wmsg ? wmsg : "hvwait", 1);
627 : }
628 0 : }
629 :
630 : uint16_t
631 0 : hv_intr_signal(struct hv_softc *sc, void *con)
632 : {
633 : uint64_t status;
634 :
635 0 : status = hv_hypercall(sc, HYPERCALL_SIGNAL_EVENT, con, NULL);
636 0 : return ((uint16_t)status);
637 : }
638 :
639 : void
640 0 : hv_intr(void)
641 : {
642 0 : struct hv_softc *sc = hv_sc;
643 :
644 0 : hv_event_intr(sc);
645 0 : hv_message_intr(sc);
646 0 : }
647 :
648 : void
649 0 : hv_event_intr(struct hv_softc *sc)
650 : {
651 : struct vmbus_evtflags *evt;
652 0 : struct cpu_info *ci = curcpu();
653 0 : int cpu = CPU_INFO_UNIT(ci);
654 : int bit, row, maxrow, chanid;
655 : struct hv_channel *ch;
656 : u_long *revents, pending;
657 :
658 0 : evt = (struct vmbus_evtflags *)sc->sc_siep[cpu] +
659 : VMBUS_SINT_MESSAGE;
660 0 : if ((sc->sc_proto == VMBUS_VERSION_WS2008) ||
661 0 : (sc->sc_proto == VMBUS_VERSION_WIN7)) {
662 0 : if (!test_bit(0, &evt->evt_flags[0]))
663 0 : return;
664 0 : clear_bit(0, &evt->evt_flags[0]);
665 : maxrow = VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN;
666 : /*
667 : * receive size is 1/2 page and divide that by 4 bytes
668 : */
669 0 : revents = sc->sc_revents;
670 0 : } else {
671 : maxrow = nitems(evt->evt_flags);
672 : /*
673 : * On Host with Win8 or above, the event page can be
674 : * checked directly to get the id of the channel
675 : * that has the pending interrupt.
676 : */
677 0 : revents = &evt->evt_flags[0];
678 : }
679 :
680 0 : for (row = 0; row < maxrow; row++) {
681 0 : if (revents[row] == 0)
682 : continue;
683 0 : pending = atomic_swap_ulong(&revents[row], 0);
684 0 : for (bit = 0; pending > 0; pending >>= 1, bit++) {
685 0 : if ((pending & 1) == 0)
686 : continue;
687 0 : chanid = (row * LONG_BIT) + bit;
688 : /* vmbus channel protocol message */
689 0 : if (chanid == 0)
690 : continue;
691 0 : ch = hv_channel_lookup(sc, chanid);
692 0 : if (ch == NULL) {
693 0 : printf("%s: unhandled event on %d\n",
694 0 : sc->sc_dev.dv_xname, chanid);
695 0 : continue;
696 : }
697 0 : if (ch->ch_state != HV_CHANSTATE_OPENED) {
698 0 : printf("%s: channel %d is not active\n",
699 0 : sc->sc_dev.dv_xname, chanid);
700 0 : continue;
701 : }
702 0 : ch->ch_evcnt.ec_count++;
703 0 : hv_channel_schedule(ch);
704 0 : }
705 : }
706 0 : }
707 :
708 : void
709 0 : hv_message_intr(struct hv_softc *sc)
710 : {
711 : struct vmbus_message *msg;
712 : struct vmbus_chanmsg_hdr *hdr;
713 0 : struct cpu_info *ci = curcpu();
714 0 : int cpu = CPU_INFO_UNIT(ci);
715 :
716 0 : for (;;) {
717 0 : msg = (struct vmbus_message *)sc->sc_simp[cpu] +
718 : VMBUS_SINT_MESSAGE;
719 0 : if (msg->msg_type == VMBUS_MSGTYPE_NONE)
720 : break;
721 :
722 0 : hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data;
723 0 : if (hdr->chm_type >= VMBUS_CHANMSG_COUNT) {
724 0 : printf("%s: unhandled message type %u flags %#x\n",
725 0 : sc->sc_dev.dv_xname, hdr->chm_type,
726 0 : msg->msg_flags);
727 0 : goto skip;
728 : }
729 0 : if (hv_msg_dispatch[hdr->chm_type].hmd_handler)
730 0 : hv_msg_dispatch[hdr->chm_type].hmd_handler(sc, hdr);
731 : else
732 0 : printf("%s: unhandled message type %u\n",
733 0 : sc->sc_dev.dv_xname, hdr->chm_type);
734 : skip:
735 0 : msg->msg_type = VMBUS_MSGTYPE_NONE;
736 0 : virtio_membar_sync();
737 0 : if (msg->msg_flags & VMBUS_MSGFLAG_PENDING)
738 0 : wrmsr(MSR_HV_EOM, 0);
739 : }
740 0 : }
741 :
742 : void
743 0 : hv_channel_response(struct hv_softc *sc, struct vmbus_chanmsg_hdr *rsphdr)
744 : {
745 : struct hv_msg *msg;
746 : struct vmbus_chanmsg_hdr *reqhdr;
747 : int req;
748 :
749 0 : req = hv_msg_dispatch[rsphdr->chm_type].hmd_request;
750 0 : mtx_enter(&sc->sc_reqlck);
751 0 : TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) {
752 0 : reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data;
753 0 : if (reqhdr->chm_type == req) {
754 0 : TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
755 0 : break;
756 : }
757 : }
758 0 : mtx_leave(&sc->sc_reqlck);
759 0 : if (msg != NULL) {
760 0 : memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen);
761 0 : mtx_enter(&sc->sc_rsplck);
762 0 : TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry);
763 0 : mtx_leave(&sc->sc_rsplck);
764 0 : wakeup(msg);
765 0 : }
766 0 : }
767 :
768 : void
769 0 : hv_channel_offer(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
770 : {
771 : struct hv_offer *co;
772 :
773 0 : co = malloc(sizeof(*co), M_DEVBUF, M_NOWAIT | M_ZERO);
774 0 : if (co == NULL) {
775 0 : printf("%s: failed to allocate an offer object\n",
776 0 : sc->sc_dev.dv_xname);
777 0 : return;
778 : }
779 :
780 0 : memcpy(&co->co_chan, hdr, sizeof(co->co_chan));
781 :
782 0 : mtx_enter(&sc->sc_offerlck);
783 0 : SIMPLEQ_INSERT_TAIL(&sc->sc_offers, co, co_entry);
784 0 : mtx_leave(&sc->sc_offerlck);
785 0 : }
786 :
787 : void
788 0 : hv_channel_rescind(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
789 : {
790 : const struct vmbus_chanmsg_chrescind *cmd;
791 :
792 0 : cmd = (const struct vmbus_chanmsg_chrescind *)hdr;
793 0 : printf("%s: revoking channel %u\n", sc->sc_dev.dv_xname,
794 0 : cmd->chm_chanid);
795 0 : }
796 :
797 : void
798 0 : hv_channel_delivered(struct hv_softc *sc, struct vmbus_chanmsg_hdr *hdr)
799 : {
800 0 : atomic_setbits_int(&sc->sc_flags, HSF_OFFERS_DELIVERED);
801 0 : wakeup(&sc->sc_offers);
802 0 : }
803 :
804 : int
805 0 : hv_vmbus_connect(struct hv_softc *sc)
806 : {
807 : const uint32_t versions[] = {
808 : VMBUS_VERSION_WIN8_1, VMBUS_VERSION_WIN8,
809 : VMBUS_VERSION_WIN7, VMBUS_VERSION_WS2008
810 : };
811 0 : struct vmbus_chanmsg_connect cmd;
812 0 : struct vmbus_chanmsg_connect_resp rsp;
813 0 : paddr_t epa, mpa1, mpa2;
814 : int i;
815 :
816 0 : sc->sc_events = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
817 0 : if (sc->sc_events == NULL) {
818 0 : printf(": failed to allocate channel port events page\n");
819 0 : goto errout;
820 : }
821 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_events, &epa)) {
822 0 : printf(": channel port events page PA extraction failed\n");
823 0 : goto errout;
824 : }
825 :
826 0 : sc->sc_wevents = (u_long *)sc->sc_events;
827 0 : sc->sc_revents = (u_long *)((caddr_t)sc->sc_events + (PAGE_SIZE >> 1));
828 :
829 0 : sc->sc_monitor[0] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
830 0 : if (sc->sc_monitor[0] == NULL) {
831 0 : printf(": failed to allocate monitor page 1\n");
832 0 : goto errout;
833 : }
834 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_monitor[0], &mpa1)) {
835 0 : printf(": monitor page 1 PA extraction failed\n");
836 0 : goto errout;
837 : }
838 :
839 0 : sc->sc_monitor[1] = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
840 0 : if (sc->sc_monitor[1] == NULL) {
841 0 : printf(": failed to allocate monitor page 2\n");
842 0 : goto errout;
843 : }
844 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_monitor[1], &mpa2)) {
845 0 : printf(": monitor page 2 PA extraction failed\n");
846 0 : goto errout;
847 : }
848 :
849 0 : memset(&cmd, 0, sizeof(cmd));
850 0 : cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT;
851 0 : cmd.chm_evtflags = (uint64_t)epa;
852 0 : cmd.chm_mnf1 = (uint64_t)mpa1;
853 0 : cmd.chm_mnf2 = (uint64_t)mpa2;
854 :
855 0 : memset(&rsp, 0, sizeof(rsp));
856 :
857 0 : for (i = 0; i < nitems(versions); i++) {
858 0 : cmd.chm_ver = versions[i];
859 0 : if (hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
860 : HCF_NOSLEEP)) {
861 : DPRINTF("%s: CONNECT failed\n",
862 : sc->sc_dev.dv_xname);
863 : goto errout;
864 : }
865 0 : if (rsp.chm_done) {
866 0 : sc->sc_flags |= HSF_CONNECTED;
867 0 : sc->sc_proto = versions[i];
868 0 : sc->sc_handle = VMBUS_GPADL_START;
869 0 : break;
870 : }
871 : }
872 0 : if (i == nitems(versions)) {
873 0 : printf("%s: failed to negotiate protocol version\n",
874 0 : sc->sc_dev.dv_xname);
875 0 : goto errout;
876 : }
877 :
878 0 : return (0);
879 :
880 : errout:
881 0 : if (sc->sc_events) {
882 0 : km_free(sc->sc_events, PAGE_SIZE, &kv_any, &kp_zero);
883 0 : sc->sc_events = NULL;
884 0 : sc->sc_wevents = NULL;
885 0 : sc->sc_revents = NULL;
886 0 : }
887 0 : if (sc->sc_monitor[0]) {
888 0 : km_free(sc->sc_monitor[0], PAGE_SIZE, &kv_any, &kp_zero);
889 0 : sc->sc_monitor[0] = NULL;
890 0 : }
891 0 : if (sc->sc_monitor[1]) {
892 0 : km_free(sc->sc_monitor[1], PAGE_SIZE, &kv_any, &kp_zero);
893 0 : sc->sc_monitor[1] = NULL;
894 0 : }
895 0 : return (-1);
896 0 : }
897 :
898 : #ifdef HYPERV_DEBUG
899 : static inline char *
900 : guidprint(struct hv_guid *a)
901 : {
902 : /* 3 0 5 4 7 6 8 9 10 15 */
903 : /* 33221100-5544-7766-9988-FFEEDDCCBBAA */
904 : static char buf[16 * 2 + 4 + 1];
905 : int i, j = 0;
906 :
907 : for (i = 3; i != -1; i -= 1, j += 2)
908 : snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
909 : buf[j++] = '-';
910 : for (i = 5; i != 3; i -= 1, j += 2)
911 : snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
912 : buf[j++] = '-';
913 : for (i = 7; i != 5; i -= 1, j += 2)
914 : snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
915 : buf[j++] = '-';
916 : for (i = 8; i < 10; i += 1, j += 2)
917 : snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
918 : buf[j++] = '-';
919 : for (i = 10; i < 16; i += 1, j += 2)
920 : snprintf(&buf[j], 3, "%02x", (uint8_t)a->data[i]);
921 : return (&buf[0]);
922 : }
923 : #endif /* HYPERV_DEBUG */
924 :
925 : void
926 0 : hv_guid_sprint(struct hv_guid *guid, char *str, size_t size)
927 : {
928 : const struct {
929 : const struct hv_guid *guid;
930 : const char *ident;
931 : } map[] = {
932 : { &hv_guid_network, "network" },
933 : { &hv_guid_ide, "ide" },
934 : { &hv_guid_scsi, "scsi" },
935 : { &hv_guid_shutdown, "shutdown" },
936 : { &hv_guid_timesync, "timesync" },
937 : { &hv_guid_heartbeat, "heartbeat" },
938 : { &hv_guid_kvp, "kvp" },
939 : #ifdef HYPERV_DEBUG
940 : { &hv_guid_vss, "vss" },
941 : { &hv_guid_dynmem, "dynamic-memory" },
942 : { &hv_guid_mouse, "mouse" },
943 : { &hv_guid_kbd, "keyboard" },
944 : { &hv_guid_video, "video" },
945 : { &hv_guid_fc, "fiber-channel" },
946 : { &hv_guid_fcopy, "file-copy" },
947 : { &hv_guid_pcie, "pcie-passthrough" },
948 : { &hv_guid_netdir, "network-direct" },
949 : { &hv_guid_rdesktop, "remote-desktop" },
950 : { &hv_guid_avma1, "avma-1" },
951 : { &hv_guid_avma2, "avma-2" },
952 : { &hv_guid_avma3, "avma-3" },
953 : { &hv_guid_avma4, "avma-4" },
954 : #endif
955 : };
956 : int i;
957 :
958 0 : for (i = 0; i < nitems(map); i++) {
959 0 : if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) {
960 0 : strlcpy(str, map[i].ident, size);
961 0 : return;
962 : }
963 : }
964 : #ifdef HYPERV_DEBUG
965 : strlcpy(str, guidprint(guid), size);
966 : #endif
967 0 : }
968 :
969 : static int
970 0 : hv_channel_scan_done(struct hv_softc *sc, struct hv_msg *msg __unused)
971 : {
972 0 : return (sc->sc_flags & HSF_OFFERS_DELIVERED);
973 : }
974 :
975 : int
976 0 : hv_channel_scan(struct hv_softc *sc)
977 : {
978 0 : struct vmbus_chanmsg_hdr hdr;
979 0 : struct vmbus_chanmsg_choffer rsp;
980 : struct hv_offer *co;
981 :
982 0 : SIMPLEQ_INIT(&sc->sc_offers);
983 0 : mtx_init(&sc->sc_offerlck, IPL_NET);
984 :
985 0 : memset(&hdr, 0, sizeof(hdr));
986 0 : hdr.chm_type = VMBUS_CHANMSG_CHREQUEST;
987 :
988 0 : if (hv_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp),
989 : HCF_NOSLEEP | HCF_NOREPLY)) {
990 : DPRINTF("%s: CHREQUEST failed\n", sc->sc_dev.dv_xname);
991 0 : return (-1);
992 : }
993 :
994 0 : hv_wait(sc, hv_channel_scan_done, (struct hv_msg *)&hdr,
995 0 : &sc->sc_offers, "hvscan");
996 :
997 0 : TAILQ_INIT(&sc->sc_channels);
998 0 : mtx_init(&sc->sc_channelck, IPL_NET);
999 :
1000 0 : mtx_enter(&sc->sc_offerlck);
1001 0 : while (!SIMPLEQ_EMPTY(&sc->sc_offers)) {
1002 : co = SIMPLEQ_FIRST(&sc->sc_offers);
1003 0 : SIMPLEQ_REMOVE_HEAD(&sc->sc_offers, co_entry);
1004 0 : mtx_leave(&sc->sc_offerlck);
1005 :
1006 0 : hv_process_offer(sc, co);
1007 0 : free(co, M_DEVBUF, sizeof(*co));
1008 :
1009 0 : mtx_enter(&sc->sc_offerlck);
1010 : }
1011 0 : mtx_leave(&sc->sc_offerlck);
1012 :
1013 0 : return (0);
1014 0 : }
1015 :
1016 : void
1017 0 : hv_process_offer(struct hv_softc *sc, struct hv_offer *co)
1018 : {
1019 : struct hv_channel *ch, *nch;
1020 :
1021 0 : nch = malloc(sizeof(*nch), M_DEVBUF, M_ZERO | M_NOWAIT);
1022 0 : if (nch == NULL) {
1023 0 : printf("%s: failed to allocate memory for the channel\n",
1024 0 : sc->sc_dev.dv_xname);
1025 0 : return;
1026 : }
1027 0 : nch->ch_sc = sc;
1028 0 : hv_guid_sprint(&co->co_chan.chm_chtype, nch->ch_ident,
1029 : sizeof(nch->ch_ident));
1030 :
1031 : /*
1032 : * By default we setup state to enable batched reading.
1033 : * A specific service can choose to disable this prior
1034 : * to opening the channel.
1035 : */
1036 0 : nch->ch_flags |= CHF_BATCHED;
1037 :
1038 0 : KASSERT((((vaddr_t)&nch->ch_monprm) & 0x7) == 0);
1039 0 : memset(&nch->ch_monprm, 0, sizeof(nch->ch_monprm));
1040 0 : nch->ch_monprm.mp_connid = VMBUS_CONNID_EVENT;
1041 :
1042 0 : if (sc->sc_proto != VMBUS_VERSION_WS2008)
1043 0 : nch->ch_monprm.mp_connid = co->co_chan.chm_connid;
1044 :
1045 0 : if (co->co_chan.chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1046 0 : nch->ch_mgroup = co->co_chan.chm_montrig / VMBUS_MONTRIG_LEN;
1047 0 : nch->ch_mindex = co->co_chan.chm_montrig % VMBUS_MONTRIG_LEN;
1048 0 : nch->ch_flags |= CHF_MONITOR;
1049 0 : }
1050 :
1051 0 : nch->ch_id = co->co_chan.chm_chanid;
1052 :
1053 0 : memcpy(&nch->ch_type, &co->co_chan.chm_chtype, sizeof(ch->ch_type));
1054 0 : memcpy(&nch->ch_inst, &co->co_chan.chm_chinst, sizeof(ch->ch_inst));
1055 :
1056 0 : mtx_enter(&sc->sc_channelck);
1057 0 : TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1058 0 : if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) &&
1059 0 : !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst)))
1060 : break;
1061 : }
1062 0 : if (ch != NULL) {
1063 0 : if (co->co_chan.chm_subidx == 0) {
1064 0 : printf("%s: unknown offer \"%s\"\n",
1065 0 : sc->sc_dev.dv_xname, nch->ch_ident);
1066 0 : mtx_leave(&sc->sc_channelck);
1067 0 : free(nch, M_DEVBUF, sizeof(*nch));
1068 0 : return;
1069 : }
1070 : #ifdef HYPERV_DEBUG
1071 : printf("%s: subchannel %u for \"%s\"\n", sc->sc_dev.dv_xname,
1072 : co->co_chan.chm_subidx, ch->ch_ident);
1073 : #endif
1074 0 : mtx_leave(&sc->sc_channelck);
1075 0 : free(nch, M_DEVBUF, sizeof(*nch));
1076 0 : return;
1077 : }
1078 :
1079 0 : nch->ch_state = HV_CHANSTATE_OFFERED;
1080 :
1081 0 : TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry);
1082 0 : mtx_leave(&sc->sc_channelck);
1083 :
1084 : #ifdef HYPERV_DEBUG
1085 : printf("%s: channel %u: \"%s\"", sc->sc_dev.dv_xname, nch->ch_id,
1086 : nch->ch_ident);
1087 : if (nch->ch_flags & CHF_MONITOR)
1088 : printf(", monitor %u\n", co->co_chan.chm_montrig);
1089 : else
1090 : printf("\n");
1091 : #endif
1092 0 : }
1093 :
1094 : struct hv_channel *
1095 0 : hv_channel_lookup(struct hv_softc *sc, uint32_t relid)
1096 : {
1097 : struct hv_channel *ch;
1098 :
1099 0 : TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1100 0 : if (ch->ch_id == relid)
1101 0 : return (ch);
1102 : }
1103 0 : return (NULL);
1104 0 : }
1105 :
1106 : int
1107 0 : hv_channel_ring_create(struct hv_channel *ch, uint32_t buflen)
1108 : {
1109 0 : struct hv_softc *sc = ch->ch_sc;
1110 :
1111 0 : buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring);
1112 0 : ch->ch_ring = km_alloc(2 * buflen, &kv_any, &kp_zero, cold ?
1113 : &kd_nowait : &kd_waitok);
1114 0 : if (ch->ch_ring == NULL) {
1115 0 : printf("%s: failed to allocate channel ring\n",
1116 0 : sc->sc_dev.dv_xname);
1117 0 : return (-1);
1118 : }
1119 0 : ch->ch_ring_size = 2 * buflen;
1120 :
1121 0 : memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1122 0 : ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring;
1123 0 : ch->ch_wrd.rd_size = buflen;
1124 0 : ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1125 0 : mtx_init(&ch->ch_wrd.rd_lock, IPL_NET);
1126 :
1127 0 : memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1128 0 : ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring +
1129 : buflen);
1130 0 : ch->ch_rrd.rd_size = buflen;
1131 0 : ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1132 0 : mtx_init(&ch->ch_rrd.rd_lock, IPL_NET);
1133 :
1134 0 : if (hv_handle_alloc(ch, ch->ch_ring, 2 * buflen, &ch->ch_ring_gpadl)) {
1135 0 : printf("%s: failed to obtain a PA handle for the ring\n",
1136 0 : sc->sc_dev.dv_xname);
1137 0 : hv_channel_ring_destroy(ch);
1138 0 : return (-1);
1139 : }
1140 :
1141 0 : return (0);
1142 0 : }
1143 :
1144 : void
1145 0 : hv_channel_ring_destroy(struct hv_channel *ch)
1146 : {
1147 0 : km_free(ch->ch_ring, ch->ch_ring_size, &kv_any, &kp_zero);
1148 0 : ch->ch_ring = NULL;
1149 0 : hv_handle_free(ch, ch->ch_ring_gpadl);
1150 :
1151 0 : memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1152 0 : memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1153 0 : }
1154 :
1155 : int
1156 0 : hv_channel_open(struct hv_channel *ch, size_t buflen, void *udata,
1157 : size_t udatalen, void (*handler)(void *), void *arg)
1158 : {
1159 0 : struct hv_softc *sc = ch->ch_sc;
1160 0 : struct vmbus_chanmsg_chopen cmd;
1161 0 : struct vmbus_chanmsg_chopen_resp rsp;
1162 : int rv;
1163 :
1164 0 : if (ch->ch_ring == NULL &&
1165 0 : hv_channel_ring_create(ch, buflen)) {
1166 : DPRINTF("%s: failed to create channel ring\n",
1167 : sc->sc_dev.dv_xname);
1168 0 : return (-1);
1169 : }
1170 :
1171 0 : memset(&cmd, 0, sizeof(cmd));
1172 0 : cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN;
1173 0 : cmd.chm_openid = ch->ch_id;
1174 0 : cmd.chm_chanid = ch->ch_id;
1175 0 : cmd.chm_gpadl = ch->ch_ring_gpadl;
1176 0 : cmd.chm_txbr_pgcnt = ch->ch_wrd.rd_size >> PAGE_SHIFT;
1177 0 : cmd.chm_vcpuid = ch->ch_vcpu;
1178 :
1179 0 : if (udata && udatalen > 0)
1180 0 : memcpy(cmd.chm_udata, udata, udatalen);
1181 :
1182 0 : memset(&rsp, 0, sizeof(rsp));
1183 :
1184 0 : ch->ch_handler = handler;
1185 0 : ch->ch_ctx = arg;
1186 :
1187 0 : ch->ch_state = HV_CHANSTATE_OPENED;
1188 :
1189 0 : rv = hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
1190 0 : cold ? HCF_NOSLEEP : HCF_SLEEPOK);
1191 0 : if (rv) {
1192 0 : hv_channel_ring_destroy(ch);
1193 : DPRINTF("%s: CHOPEN failed with %d\n",
1194 : sc->sc_dev.dv_xname, rv);
1195 0 : ch->ch_handler = NULL;
1196 0 : ch->ch_ctx = NULL;
1197 0 : ch->ch_state = HV_CHANSTATE_OFFERED;
1198 0 : return (-1);
1199 : }
1200 :
1201 0 : return (0);
1202 0 : }
1203 :
1204 : int
1205 0 : hv_channel_close(struct hv_channel *ch)
1206 : {
1207 0 : struct hv_softc *sc = ch->ch_sc;
1208 0 : struct vmbus_chanmsg_chclose cmd;
1209 : int rv;
1210 :
1211 0 : memset(&cmd, 0, sizeof(cmd));
1212 0 : cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE;
1213 0 : cmd.chm_chanid = ch->ch_id;
1214 :
1215 0 : ch->ch_state = HV_CHANSTATE_CLOSING;
1216 0 : rv = hv_cmd(sc, &cmd, sizeof(cmd), NULL, 0, HCF_NOREPLY);
1217 0 : if (rv) {
1218 : DPRINTF("%s: CHCLOSE failed with %d\n",
1219 : sc->sc_dev.dv_xname, rv);
1220 0 : return (-1);
1221 : }
1222 0 : ch->ch_state = HV_CHANSTATE_CLOSED;
1223 0 : hv_channel_ring_destroy(ch);
1224 0 : return (0);
1225 0 : }
1226 :
1227 : static inline void
1228 0 : hv_channel_setevent(struct hv_softc *sc, struct hv_channel *ch)
1229 : {
1230 : struct vmbus_mon_trig *mtg;
1231 :
1232 : /* Each uint32_t represents 32 channels */
1233 0 : set_bit(ch->ch_id, sc->sc_wevents);
1234 0 : if (ch->ch_flags & CHF_MONITOR) {
1235 0 : mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup];
1236 0 : set_bit(ch->ch_mindex, &mtg->mt_pending);
1237 0 : } else
1238 0 : hv_intr_signal(sc, &ch->ch_monprm);
1239 0 : }
1240 :
1241 : void
1242 0 : hv_channel_intr(void *arg)
1243 : {
1244 0 : struct hv_channel *ch = arg;
1245 :
1246 0 : if (hv_channel_ready(ch))
1247 0 : ch->ch_handler(ch->ch_ctx);
1248 :
1249 0 : if (hv_channel_unpause(ch) == 0)
1250 0 : return;
1251 :
1252 0 : hv_channel_pause(ch);
1253 0 : hv_channel_schedule(ch);
1254 0 : }
1255 :
1256 : int
1257 0 : hv_channel_setdeferred(struct hv_channel *ch, const char *name)
1258 : {
1259 0 : ch->ch_taskq = taskq_create(name, 1, IPL_NET, TASKQ_MPSAFE);
1260 0 : if (ch->ch_taskq == NULL)
1261 0 : return (-1);
1262 0 : task_set(&ch->ch_task, hv_channel_intr, ch);
1263 0 : return (0);
1264 0 : }
1265 :
1266 : void
1267 0 : hv_channel_schedule(struct hv_channel *ch)
1268 : {
1269 0 : if (ch->ch_handler) {
1270 0 : if (!cold && (ch->ch_flags & CHF_BATCHED)) {
1271 0 : hv_channel_pause(ch);
1272 0 : task_add(ch->ch_taskq, &ch->ch_task);
1273 0 : } else
1274 0 : ch->ch_handler(ch->ch_ctx);
1275 : }
1276 0 : }
1277 :
1278 : static inline void
1279 0 : hv_ring_put(struct hv_ring_data *wrd, uint8_t *data, uint32_t datalen)
1280 : {
1281 0 : int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod);
1282 :
1283 0 : memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left);
1284 0 : memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left);
1285 0 : wrd->rd_prod += datalen;
1286 0 : if (wrd->rd_prod >= wrd->rd_dsize)
1287 0 : wrd->rd_prod -= wrd->rd_dsize;
1288 0 : }
1289 :
1290 : static inline void
1291 0 : hv_ring_get(struct hv_ring_data *rrd, uint8_t *data, uint32_t datalen,
1292 : int peek)
1293 : {
1294 0 : int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons);
1295 :
1296 0 : memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left);
1297 0 : memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left);
1298 0 : if (!peek) {
1299 0 : rrd->rd_cons += datalen;
1300 0 : if (rrd->rd_cons >= rrd->rd_dsize)
1301 0 : rrd->rd_cons -= rrd->rd_dsize;
1302 : }
1303 0 : }
1304 :
1305 : static inline void
1306 0 : hv_ring_avail(struct hv_ring_data *rd, uint32_t *towrite, uint32_t *toread)
1307 : {
1308 0 : uint32_t ridx = rd->rd_ring->br_rindex;
1309 0 : uint32_t widx = rd->rd_ring->br_windex;
1310 : uint32_t r, w;
1311 :
1312 0 : if (widx >= ridx)
1313 0 : w = rd->rd_dsize - (widx - ridx);
1314 : else
1315 0 : w = ridx - widx;
1316 0 : r = rd->rd_dsize - w;
1317 0 : if (towrite)
1318 0 : *towrite = w;
1319 0 : if (toread)
1320 0 : *toread = r;
1321 0 : }
1322 :
1323 : int
1324 0 : hv_ring_write(struct hv_ring_data *wrd, struct iovec *iov, int iov_cnt,
1325 : int *needsig)
1326 : {
1327 0 : uint64_t indices = 0;
1328 0 : uint32_t avail, oprod, datalen = sizeof(indices);
1329 : int i;
1330 :
1331 0 : for (i = 0; i < iov_cnt; i++)
1332 0 : datalen += iov[i].iov_len;
1333 :
1334 0 : KASSERT(datalen <= wrd->rd_dsize);
1335 :
1336 0 : hv_ring_avail(wrd, &avail, NULL);
1337 0 : if (avail <= datalen) {
1338 : DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1339 0 : return (EAGAIN);
1340 : }
1341 :
1342 0 : oprod = wrd->rd_prod;
1343 :
1344 0 : for (i = 0; i < iov_cnt; i++)
1345 0 : hv_ring_put(wrd, iov[i].iov_base, iov[i].iov_len);
1346 :
1347 0 : indices = (uint64_t)oprod << 32;
1348 0 : hv_ring_put(wrd, (uint8_t *)&indices, sizeof(indices));
1349 :
1350 0 : virtio_membar_sync();
1351 0 : wrd->rd_ring->br_windex = wrd->rd_prod;
1352 0 : virtio_membar_sync();
1353 :
1354 : /* Signal when the ring transitions from being empty to non-empty */
1355 0 : if (wrd->rd_ring->br_imask == 0 &&
1356 0 : wrd->rd_ring->br_rindex == oprod)
1357 0 : *needsig = 1;
1358 : else
1359 0 : *needsig = 0;
1360 :
1361 0 : return (0);
1362 0 : }
1363 :
1364 : int
1365 0 : hv_channel_send(struct hv_channel *ch, void *data, uint32_t datalen,
1366 : uint64_t rid, int type, uint32_t flags)
1367 : {
1368 0 : struct hv_softc *sc = ch->ch_sc;
1369 0 : struct vmbus_chanpkt cp;
1370 0 : struct iovec iov[3];
1371 : uint32_t pktlen, pktlen_aligned;
1372 0 : uint64_t zeropad = 0;
1373 0 : int rv, needsig = 0;
1374 :
1375 0 : pktlen = sizeof(cp) + datalen;
1376 0 : pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1377 :
1378 0 : cp.cp_hdr.cph_type = type;
1379 0 : cp.cp_hdr.cph_flags = flags;
1380 0 : VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp));
1381 0 : VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1382 0 : cp.cp_hdr.cph_tid = rid;
1383 :
1384 0 : iov[0].iov_base = &cp;
1385 0 : iov[0].iov_len = sizeof(cp);
1386 :
1387 0 : iov[1].iov_base = data;
1388 0 : iov[1].iov_len = datalen;
1389 :
1390 0 : iov[2].iov_base = &zeropad;
1391 0 : iov[2].iov_len = pktlen_aligned - pktlen;
1392 :
1393 0 : mtx_enter(&ch->ch_wrd.rd_lock);
1394 0 : rv = hv_ring_write(&ch->ch_wrd, iov, 3, &needsig);
1395 0 : mtx_leave(&ch->ch_wrd.rd_lock);
1396 0 : if (rv == 0 && needsig)
1397 0 : hv_channel_setevent(sc, ch);
1398 :
1399 0 : return (rv);
1400 0 : }
1401 :
1402 : int
1403 0 : hv_channel_send_sgl(struct hv_channel *ch, struct vmbus_gpa *sgl,
1404 : uint32_t nsge, void *data, uint32_t datalen, uint64_t rid)
1405 : {
1406 0 : struct hv_softc *sc = ch->ch_sc;
1407 0 : struct vmbus_chanpkt_sglist cp;
1408 0 : struct iovec iov[4];
1409 : uint32_t buflen, pktlen, pktlen_aligned;
1410 0 : uint64_t zeropad = 0;
1411 0 : int rv, needsig = 0;
1412 :
1413 0 : buflen = sizeof(struct vmbus_gpa) * nsge;
1414 0 : pktlen = sizeof(cp) + datalen + buflen;
1415 0 : pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1416 :
1417 0 : cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1418 0 : cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1419 0 : VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1420 0 : VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1421 0 : cp.cp_hdr.cph_tid = rid;
1422 0 : cp.cp_gpa_cnt = nsge;
1423 0 : cp.cp_rsvd = 0;
1424 :
1425 0 : iov[0].iov_base = &cp;
1426 0 : iov[0].iov_len = sizeof(cp);
1427 :
1428 0 : iov[1].iov_base = sgl;
1429 0 : iov[1].iov_len = buflen;
1430 :
1431 0 : iov[2].iov_base = data;
1432 0 : iov[2].iov_len = datalen;
1433 :
1434 0 : iov[3].iov_base = &zeropad;
1435 0 : iov[3].iov_len = pktlen_aligned - pktlen;
1436 :
1437 0 : mtx_enter(&ch->ch_wrd.rd_lock);
1438 0 : rv = hv_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1439 0 : mtx_leave(&ch->ch_wrd.rd_lock);
1440 0 : if (rv == 0 && needsig)
1441 0 : hv_channel_setevent(sc, ch);
1442 :
1443 0 : return (rv);
1444 0 : }
1445 :
1446 : int
1447 0 : hv_channel_send_prpl(struct hv_channel *ch, struct vmbus_gpa_range *prpl,
1448 : uint32_t nprp, void *data, uint32_t datalen, uint64_t rid)
1449 : {
1450 0 : struct hv_softc *sc = ch->ch_sc;
1451 0 : struct vmbus_chanpkt_prplist cp;
1452 0 : struct iovec iov[4];
1453 : uint32_t buflen, pktlen, pktlen_aligned;
1454 0 : uint64_t zeropad = 0;
1455 0 : int rv, needsig = 0;
1456 :
1457 0 : buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1);
1458 0 : pktlen = sizeof(cp) + datalen + buflen;
1459 0 : pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1460 :
1461 0 : cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1462 0 : cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1463 0 : VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1464 0 : VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1465 0 : cp.cp_hdr.cph_tid = rid;
1466 0 : cp.cp_range_cnt = 1;
1467 0 : cp.cp_rsvd = 0;
1468 :
1469 0 : iov[0].iov_base = &cp;
1470 0 : iov[0].iov_len = sizeof(cp);
1471 :
1472 0 : iov[1].iov_base = prpl;
1473 0 : iov[1].iov_len = buflen;
1474 :
1475 0 : iov[2].iov_base = data;
1476 0 : iov[2].iov_len = datalen;
1477 :
1478 0 : iov[3].iov_base = &zeropad;
1479 0 : iov[3].iov_len = pktlen_aligned - pktlen;
1480 :
1481 0 : mtx_enter(&ch->ch_wrd.rd_lock);
1482 0 : rv = hv_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1483 0 : mtx_leave(&ch->ch_wrd.rd_lock);
1484 0 : if (rv == 0 && needsig)
1485 0 : hv_channel_setevent(sc, ch);
1486 :
1487 0 : return (rv);
1488 0 : }
1489 :
1490 : int
1491 0 : hv_ring_peek(struct hv_ring_data *rrd, void *data, uint32_t datalen)
1492 : {
1493 0 : uint32_t avail;
1494 :
1495 0 : KASSERT(datalen <= rrd->rd_dsize);
1496 :
1497 0 : hv_ring_avail(rrd, NULL, &avail);
1498 0 : if (avail < datalen)
1499 0 : return (EAGAIN);
1500 :
1501 0 : hv_ring_get(rrd, (uint8_t *)data, datalen, 1);
1502 0 : return (0);
1503 0 : }
1504 :
1505 : int
1506 0 : hv_ring_read(struct hv_ring_data *rrd, void *data, uint32_t datalen,
1507 : uint32_t offset)
1508 : {
1509 0 : uint64_t indices;
1510 0 : uint32_t avail;
1511 :
1512 0 : KASSERT(datalen <= rrd->rd_dsize);
1513 :
1514 0 : hv_ring_avail(rrd, NULL, &avail);
1515 0 : if (avail < datalen) {
1516 : DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1517 0 : return (EAGAIN);
1518 : }
1519 :
1520 0 : if (offset) {
1521 0 : rrd->rd_cons += offset;
1522 0 : if (rrd->rd_cons >= rrd->rd_dsize)
1523 0 : rrd->rd_cons -= rrd->rd_dsize;
1524 : }
1525 :
1526 0 : hv_ring_get(rrd, (uint8_t *)data, datalen, 0);
1527 0 : hv_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0);
1528 :
1529 0 : virtio_membar_sync();
1530 0 : rrd->rd_ring->br_rindex = rrd->rd_cons;
1531 :
1532 0 : return (0);
1533 0 : }
1534 :
1535 : int
1536 0 : hv_channel_recv(struct hv_channel *ch, void *data, uint32_t datalen,
1537 : uint32_t *rlen, uint64_t *rid, int raw)
1538 : {
1539 0 : struct vmbus_chanpkt_hdr cph;
1540 : uint32_t offset, pktlen;
1541 : int rv;
1542 :
1543 0 : *rlen = 0;
1544 :
1545 0 : mtx_enter(&ch->ch_rrd.rd_lock);
1546 :
1547 0 : if ((rv = hv_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) {
1548 0 : mtx_leave(&ch->ch_rrd.rd_lock);
1549 0 : return (rv);
1550 : }
1551 :
1552 0 : offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen);
1553 0 : pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset;
1554 0 : if (pktlen > datalen) {
1555 0 : mtx_leave(&ch->ch_rrd.rd_lock);
1556 0 : printf("%s: pktlen %u datalen %u\n", __func__, pktlen, datalen);
1557 0 : return (EINVAL);
1558 : }
1559 :
1560 0 : rv = hv_ring_read(&ch->ch_rrd, data, pktlen, offset);
1561 0 : if (rv == 0) {
1562 0 : *rlen = pktlen;
1563 0 : *rid = cph.cph_tid;
1564 0 : }
1565 :
1566 0 : mtx_leave(&ch->ch_rrd.rd_lock);
1567 :
1568 0 : return (rv);
1569 0 : }
1570 :
1571 : static inline void
1572 0 : hv_ring_mask(struct hv_ring_data *rd)
1573 : {
1574 0 : virtio_membar_sync();
1575 0 : rd->rd_ring->br_imask = 1;
1576 0 : virtio_membar_sync();
1577 0 : }
1578 :
1579 : static inline void
1580 0 : hv_ring_unmask(struct hv_ring_data *rd)
1581 : {
1582 0 : virtio_membar_sync();
1583 0 : rd->rd_ring->br_imask = 0;
1584 0 : virtio_membar_sync();
1585 0 : }
1586 :
1587 : void
1588 0 : hv_channel_pause(struct hv_channel *ch)
1589 : {
1590 0 : hv_ring_mask(&ch->ch_rrd);
1591 0 : }
1592 :
1593 : uint
1594 0 : hv_channel_unpause(struct hv_channel *ch)
1595 : {
1596 0 : uint32_t avail;
1597 :
1598 0 : hv_ring_unmask(&ch->ch_rrd);
1599 0 : hv_ring_avail(&ch->ch_rrd, NULL, &avail);
1600 :
1601 0 : return (avail);
1602 0 : }
1603 :
1604 : uint
1605 0 : hv_channel_ready(struct hv_channel *ch)
1606 : {
1607 0 : uint32_t avail;
1608 :
1609 0 : hv_ring_avail(&ch->ch_rrd, NULL, &avail);
1610 :
1611 0 : return (avail);
1612 0 : }
1613 :
1614 : /* How many PFNs can be referenced by the header */
1615 : #define HV_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \
1616 : sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t))
1617 :
1618 : /* How many PFNs can be referenced by the body */
1619 : #define HV_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \
1620 : sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t))
1621 :
1622 : int
1623 0 : hv_handle_alloc(struct hv_channel *ch, void *buffer, uint32_t buflen,
1624 : uint32_t *handle)
1625 : {
1626 0 : struct hv_softc *sc = ch->ch_sc;
1627 : struct vmbus_chanmsg_gpadl_conn *hdr;
1628 : struct vmbus_chanmsg_gpadl_subconn *cmd;
1629 0 : struct vmbus_chanmsg_gpadl_connresp rsp;
1630 : struct hv_msg *msg;
1631 : int i, j, last, left, rv;
1632 : int bodylen = 0, ncmds = 0, pfn = 0;
1633 0 : int waitflag = cold ? M_NOWAIT : M_WAITOK;
1634 : uint64_t *frames;
1635 0 : paddr_t pa;
1636 : caddr_t body;
1637 : /* Total number of pages to reference */
1638 0 : int total = atop(buflen);
1639 : /* Number of pages that will fit the header */
1640 0 : int inhdr = MIN(total, HV_NPFNHDR);
1641 :
1642 0 : KASSERT((buflen & (PAGE_SIZE - 1)) == 0);
1643 :
1644 0 : if ((msg = malloc(sizeof(*msg), M_DEVBUF, M_ZERO | waitflag)) == NULL)
1645 0 : return (ENOMEM);
1646 :
1647 : /* Prepare array of frame addresses */
1648 0 : if ((frames = mallocarray(total, sizeof(*frames), M_DEVBUF, M_ZERO |
1649 0 : waitflag)) == NULL) {
1650 0 : free(msg, M_DEVBUF, sizeof(*msg));
1651 0 : return (ENOMEM);
1652 : }
1653 0 : for (i = 0; i < total; i++) {
1654 0 : if (!pmap_extract(pmap_kernel(), (vaddr_t)buffer +
1655 0 : PAGE_SIZE * i, &pa)) {
1656 0 : free(msg, M_DEVBUF, sizeof(*msg));
1657 0 : free(frames, M_DEVBUF, total * sizeof(*frames));
1658 0 : return (EFAULT);
1659 : }
1660 0 : frames[i] = atop(pa);
1661 : }
1662 :
1663 0 : msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) +
1664 0 : inhdr * sizeof(uint64_t);
1665 0 : hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data;
1666 0 : msg->msg_rsp = &rsp;
1667 0 : msg->msg_rsplen = sizeof(rsp);
1668 0 : if (waitflag == M_NOWAIT)
1669 0 : msg->msg_flags = MSGF_NOSLEEP;
1670 :
1671 0 : left = total - inhdr;
1672 :
1673 : /* Allocate additional gpadl_body structures if required */
1674 0 : if (left > 0) {
1675 0 : ncmds = MAX(1, left / HV_NPFNBODY + left % HV_NPFNBODY);
1676 0 : bodylen = ncmds * VMBUS_MSG_DSIZE_MAX;
1677 0 : body = malloc(bodylen, M_DEVBUF, M_ZERO | waitflag);
1678 0 : if (body == NULL) {
1679 0 : free(msg, M_DEVBUF, sizeof(*msg));
1680 0 : free(frames, M_DEVBUF, atop(buflen) * sizeof(*frames));
1681 0 : return (ENOMEM);
1682 : }
1683 : }
1684 :
1685 0 : *handle = atomic_inc_int_nv(&sc->sc_handle);
1686 :
1687 0 : hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN;
1688 0 : hdr->chm_chanid = ch->ch_id;
1689 0 : hdr->chm_gpadl = *handle;
1690 :
1691 : /* Single range for a contiguous buffer */
1692 0 : hdr->chm_range_cnt = 1;
1693 0 : hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total *
1694 : sizeof(uint64_t);
1695 0 : hdr->chm_range.gpa_ofs = 0;
1696 0 : hdr->chm_range.gpa_len = buflen;
1697 :
1698 : /* Fit as many pages as possible into the header */
1699 0 : for (i = 0; i < inhdr; i++)
1700 0 : hdr->chm_range.gpa_page[i] = frames[pfn++];
1701 :
1702 0 : for (i = 0; i < ncmds; i++) {
1703 0 : cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
1704 0 : VMBUS_MSG_DSIZE_MAX * i);
1705 0 : cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN;
1706 0 : cmd->chm_gpadl = *handle;
1707 0 : last = MIN(left, HV_NPFNBODY);
1708 0 : for (j = 0; j < last; j++)
1709 0 : cmd->chm_gpa_page[j] = frames[pfn++];
1710 0 : left -= last;
1711 : }
1712 :
1713 0 : rv = hv_start(sc, msg);
1714 0 : if (rv != 0) {
1715 : DPRINTF("%s: GPADL_CONN failed\n", sc->sc_dev.dv_xname);
1716 : goto out;
1717 : }
1718 0 : for (i = 0; i < ncmds; i++) {
1719 : int cmdlen = sizeof(*cmd);
1720 0 : cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
1721 0 : VMBUS_MSG_DSIZE_MAX * i);
1722 : /* Last element can be short */
1723 0 : if (i == ncmds - 1)
1724 0 : cmdlen += last * sizeof(uint64_t);
1725 : else
1726 : cmdlen += HV_NPFNBODY * sizeof(uint64_t);
1727 0 : rv = hv_cmd(sc, cmd, cmdlen, NULL, 0, waitflag | HCF_NOREPLY);
1728 0 : if (rv != 0) {
1729 : DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed "
1730 : "with %d\n", sc->sc_dev.dv_xname, i, ncmds, rv);
1731 0 : goto out;
1732 : }
1733 0 : }
1734 0 : rv = hv_reply(sc, msg);
1735 0 : if (rv != 0)
1736 : DPRINTF("%s: GPADL allocation failed with %d\n",
1737 : sc->sc_dev.dv_xname, rv);
1738 :
1739 : out:
1740 0 : free(msg, M_DEVBUF, sizeof(*msg));
1741 0 : free(frames, M_DEVBUF, total * sizeof(*frames));
1742 0 : if (bodylen > 0)
1743 0 : free(body, M_DEVBUF, bodylen);
1744 0 : if (rv != 0)
1745 0 : return (rv);
1746 :
1747 0 : KASSERT(*handle == rsp.chm_gpadl);
1748 :
1749 0 : return (0);
1750 0 : }
1751 :
1752 : void
1753 0 : hv_handle_free(struct hv_channel *ch, uint32_t handle)
1754 : {
1755 0 : struct hv_softc *sc = ch->ch_sc;
1756 0 : struct vmbus_chanmsg_gpadl_disconn cmd;
1757 0 : struct vmbus_chanmsg_gpadl_disconn rsp;
1758 : int rv;
1759 :
1760 0 : memset(&cmd, 0, sizeof(cmd));
1761 0 : cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN;
1762 0 : cmd.chm_chanid = ch->ch_id;
1763 0 : cmd.chm_gpadl = handle;
1764 :
1765 0 : rv = hv_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), cold ?
1766 : HCF_NOSLEEP : 0);
1767 : if (rv)
1768 : DPRINTF("%s: GPADL_DISCONN failed with %d\n",
1769 : sc->sc_dev.dv_xname, rv);
1770 0 : }
1771 :
1772 : static int
1773 0 : hv_attach_print(void *aux, const char *name)
1774 : {
1775 0 : struct hv_attach_args *aa = aux;
1776 :
1777 0 : if (name)
1778 0 : printf("\"%s\" at %s", aa->aa_ident, name);
1779 :
1780 0 : return (UNCONF);
1781 : }
1782 :
1783 : int
1784 0 : hv_attach_devices(struct hv_softc *sc)
1785 : {
1786 : struct hv_dev *dv;
1787 : struct hv_channel *ch;
1788 :
1789 0 : SLIST_INIT(&sc->sc_devs);
1790 0 : mtx_init(&sc->sc_devlck, IPL_NET);
1791 :
1792 0 : TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1793 0 : if (ch->ch_state != HV_CHANSTATE_OFFERED)
1794 : continue;
1795 0 : if (!(ch->ch_flags & CHF_MONITOR))
1796 : continue;
1797 0 : dv = malloc(sizeof(*dv), M_DEVBUF, M_ZERO | M_NOWAIT);
1798 0 : if (dv == NULL) {
1799 0 : printf("%s: failed to allocate device object\n",
1800 0 : sc->sc_dev.dv_xname);
1801 0 : return (-1);
1802 : }
1803 0 : dv->dv_aa.aa_parent = sc;
1804 0 : dv->dv_aa.aa_type = &ch->ch_type;
1805 0 : dv->dv_aa.aa_inst = &ch->ch_inst;
1806 0 : dv->dv_aa.aa_ident = ch->ch_ident;
1807 0 : dv->dv_aa.aa_chan = ch;
1808 0 : dv->dv_aa.aa_dmat = sc->sc_dmat;
1809 0 : mtx_enter(&sc->sc_devlck);
1810 0 : SLIST_INSERT_HEAD(&sc->sc_devs, dv, dv_entry);
1811 0 : mtx_leave(&sc->sc_devlck);
1812 0 : config_found((struct device *)sc, &dv->dv_aa, hv_attach_print);
1813 0 : }
1814 0 : return (0);
1815 0 : }
1816 :
1817 : void
1818 0 : hv_evcount_attach(struct hv_channel *ch, const char *name)
1819 : {
1820 0 : struct hv_softc *sc = ch->ch_sc;
1821 :
1822 0 : evcount_attach(&ch->ch_evcnt, name, &sc->sc_idtvec);
1823 0 : }
|