blob: 2f164bd746874549709a7c51122217d52ad05111 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <linux/hyperv.h>
30#include <linux/uio.h>
31#include <linux/interrupt.h>
32#include <asm/page.h>
33
34#include "hyperv_vmbus.h"
35
36#define NUM_PAGES_SPANNED(addr, len) \
37((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
38
39static unsigned long virt_to_hvpfn(void *addr)
40{
41 unsigned long paddr;
42
43 if (is_vmalloc_addr(addr))
44 paddr = page_to_phys(vmalloc_to_page(addr)) +
45 offset_in_page(addr);
46 else
47 paddr = __pa(addr);
48
49 return paddr >> PAGE_SHIFT;
50}
51
52/*
53 * vmbus_setevent- Trigger an event notification on the specified
54 * channel.
55 */
56void vmbus_setevent(struct vmbus_channel *channel)
57{
58 struct hv_monitor_page *monitorpage;
59
60 trace_vmbus_setevent(channel);
61
62 /*
63 * For channels marked as in "low latency" mode
64 * bypass the monitor page mechanism.
65 */
66 if (channel->offermsg.monitor_allocated && !channel->low_latency) {
67 vmbus_send_interrupt(channel->offermsg.child_relid);
68
69 /* Get the child to parent monitor page */
70 monitorpage = vmbus_connection.monitor_pages[1];
71
72 sync_set_bit(channel->monitor_bit,
73 (unsigned long *)&monitorpage->trigger_group
74 [channel->monitor_grp].pending);
75
76 } else {
77 vmbus_set_event(channel);
78 }
79}
80EXPORT_SYMBOL_GPL(vmbus_setevent);
81
82/*
83 * vmbus_open - Open the specified channel.
84 */
85int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
86 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
87 void (*onchannelcallback)(void *context), void *context)
88{
89 struct vmbus_channel_open_channel *open_msg;
90 struct vmbus_channel_msginfo *open_info = NULL;
91 unsigned long flags;
92 int ret, err = 0;
93 struct page *page;
94
95 if (send_ringbuffer_size % PAGE_SIZE ||
96 recv_ringbuffer_size % PAGE_SIZE)
97 return -EINVAL;
98
99 spin_lock_irqsave(&newchannel->lock, flags);
100 if (newchannel->state == CHANNEL_OPEN_STATE) {
101 newchannel->state = CHANNEL_OPENING_STATE;
102 } else {
103 spin_unlock_irqrestore(&newchannel->lock, flags);
104 return -EINVAL;
105 }
106 spin_unlock_irqrestore(&newchannel->lock, flags);
107
108 newchannel->onchannel_callback = onchannelcallback;
109 newchannel->channel_callback_context = context;
110
111 /* Allocate the ring buffer */
112 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
113 GFP_KERNEL|__GFP_ZERO,
114 get_order(send_ringbuffer_size +
115 recv_ringbuffer_size));
116
117 if (!page)
118 page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
119 get_order(send_ringbuffer_size +
120 recv_ringbuffer_size));
121
122 if (!page) {
123 err = -ENOMEM;
124 goto error_set_chnstate;
125 }
126
127 newchannel->ringbuffer_pages = page_address(page);
128 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
129 recv_ringbuffer_size) >> PAGE_SHIFT;
130
131 ret = hv_ringbuffer_init(&newchannel->outbound, page,
132 send_ringbuffer_size >> PAGE_SHIFT);
133
134 if (ret != 0) {
135 err = ret;
136 goto error_free_pages;
137 }
138
139 ret = hv_ringbuffer_init(&newchannel->inbound,
140 &page[send_ringbuffer_size >> PAGE_SHIFT],
141 recv_ringbuffer_size >> PAGE_SHIFT);
142 if (ret != 0) {
143 err = ret;
144 goto error_free_pages;
145 }
146
147
148 /* Establish the gpadl for the ring buffer */
149 newchannel->ringbuffer_gpadlhandle = 0;
150
151 ret = vmbus_establish_gpadl(newchannel,
152 page_address(page),
153 send_ringbuffer_size +
154 recv_ringbuffer_size,
155 &newchannel->ringbuffer_gpadlhandle);
156
157 if (ret != 0) {
158 err = ret;
159 goto error_free_pages;
160 }
161
162 /* Create and init the channel open message */
163 open_info = kmalloc(sizeof(*open_info) +
164 sizeof(struct vmbus_channel_open_channel),
165 GFP_KERNEL);
166 if (!open_info) {
167 err = -ENOMEM;
168 goto error_free_gpadl;
169 }
170
171 init_completion(&open_info->waitevent);
172 open_info->waiting_channel = newchannel;
173
174 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
175 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
176 open_msg->openid = newchannel->offermsg.child_relid;
177 open_msg->child_relid = newchannel->offermsg.child_relid;
178 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
179 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
180 PAGE_SHIFT;
181 open_msg->target_vp = newchannel->target_vp;
182
183 if (userdatalen > MAX_USER_DEFINED_BYTES) {
184 err = -EINVAL;
185 goto error_free_gpadl;
186 }
187
188 if (userdatalen)
189 memcpy(open_msg->userdata, userdata, userdatalen);
190
191 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
192 list_add_tail(&open_info->msglistentry,
193 &vmbus_connection.chn_msg_list);
194 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
195
196 if (newchannel->rescind) {
197 err = -ENODEV;
198 goto error_free_gpadl;
199 }
200
201 ret = vmbus_post_msg(open_msg,
202 sizeof(struct vmbus_channel_open_channel), true);
203
204 trace_vmbus_open(open_msg, ret);
205
206 if (ret != 0) {
207 err = ret;
208 goto error_clean_msglist;
209 }
210
211 wait_for_completion(&open_info->waitevent);
212
213 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
214 list_del(&open_info->msglistentry);
215 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
216
217 if (newchannel->rescind) {
218 err = -ENODEV;
219 goto error_free_gpadl;
220 }
221
222 if (open_info->response.open_result.status) {
223 err = -EAGAIN;
224 goto error_free_gpadl;
225 }
226
227 newchannel->state = CHANNEL_OPENED_STATE;
228 kfree(open_info);
229 return 0;
230
231error_clean_msglist:
232 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
233 list_del(&open_info->msglistentry);
234 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
235
236error_free_gpadl:
237 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
238 kfree(open_info);
239error_free_pages:
240 hv_ringbuffer_cleanup(&newchannel->outbound);
241 hv_ringbuffer_cleanup(&newchannel->inbound);
242 __free_pages(page,
243 get_order(send_ringbuffer_size + recv_ringbuffer_size));
244error_set_chnstate:
245 newchannel->state = CHANNEL_OPEN_STATE;
246 return err;
247}
248EXPORT_SYMBOL_GPL(vmbus_open);
249
250/* Used for Hyper-V Socket: a guest client's connect() to the host */
251int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
252 const uuid_le *shv_host_servie_id)
253{
254 struct vmbus_channel_tl_connect_request conn_msg;
255 int ret;
256
257 memset(&conn_msg, 0, sizeof(conn_msg));
258 conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
259 conn_msg.guest_endpoint_id = *shv_guest_servie_id;
260 conn_msg.host_service_id = *shv_host_servie_id;
261
262 ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
263
264 trace_vmbus_send_tl_connect_request(&conn_msg, ret);
265
266 return ret;
267}
268EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
269
270/*
271 * create_gpadl_header - Creates a gpadl for the specified buffer
272 */
273static int create_gpadl_header(void *kbuffer, u32 size,
274 struct vmbus_channel_msginfo **msginfo)
275{
276 int i;
277 int pagecount;
278 struct vmbus_channel_gpadl_header *gpadl_header;
279 struct vmbus_channel_gpadl_body *gpadl_body;
280 struct vmbus_channel_msginfo *msgheader;
281 struct vmbus_channel_msginfo *msgbody = NULL;
282 u32 msgsize;
283
284 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
285
286 pagecount = size >> PAGE_SHIFT;
287
288 /* do we need a gpadl body msg */
289 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
290 sizeof(struct vmbus_channel_gpadl_header) -
291 sizeof(struct gpa_range);
292 pfncount = pfnsize / sizeof(u64);
293
294 if (pagecount > pfncount) {
295 /* we need a gpadl body */
296 /* fill in the header */
297 msgsize = sizeof(struct vmbus_channel_msginfo) +
298 sizeof(struct vmbus_channel_gpadl_header) +
299 sizeof(struct gpa_range) + pfncount * sizeof(u64);
300 msgheader = kzalloc(msgsize, GFP_KERNEL);
301 if (!msgheader)
302 goto nomem;
303
304 INIT_LIST_HEAD(&msgheader->submsglist);
305 msgheader->msgsize = msgsize;
306
307 gpadl_header = (struct vmbus_channel_gpadl_header *)
308 msgheader->msg;
309 gpadl_header->rangecount = 1;
310 gpadl_header->range_buflen = sizeof(struct gpa_range) +
311 pagecount * sizeof(u64);
312 gpadl_header->range[0].byte_offset = 0;
313 gpadl_header->range[0].byte_count = size;
314 for (i = 0; i < pfncount; i++)
315 gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
316 kbuffer + PAGE_SIZE * i);
317 *msginfo = msgheader;
318
319 pfnsum = pfncount;
320 pfnleft = pagecount - pfncount;
321
322 /* how many pfns can we fit */
323 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
324 sizeof(struct vmbus_channel_gpadl_body);
325 pfncount = pfnsize / sizeof(u64);
326
327 /* fill in the body */
328 while (pfnleft) {
329 if (pfnleft > pfncount)
330 pfncurr = pfncount;
331 else
332 pfncurr = pfnleft;
333
334 msgsize = sizeof(struct vmbus_channel_msginfo) +
335 sizeof(struct vmbus_channel_gpadl_body) +
336 pfncurr * sizeof(u64);
337 msgbody = kzalloc(msgsize, GFP_KERNEL);
338
339 if (!msgbody) {
340 struct vmbus_channel_msginfo *pos = NULL;
341 struct vmbus_channel_msginfo *tmp = NULL;
342 /*
343 * Free up all the allocated messages.
344 */
345 list_for_each_entry_safe(pos, tmp,
346 &msgheader->submsglist,
347 msglistentry) {
348
349 list_del(&pos->msglistentry);
350 kfree(pos);
351 }
352
353 goto nomem;
354 }
355
356 msgbody->msgsize = msgsize;
357 gpadl_body =
358 (struct vmbus_channel_gpadl_body *)msgbody->msg;
359
360 /*
361 * Gpadl is u32 and we are using a pointer which could
362 * be 64-bit
363 * This is governed by the guest/host protocol and
364 * so the hypervisor guarantees that this is ok.
365 */
366 for (i = 0; i < pfncurr; i++)
367 gpadl_body->pfn[i] = virt_to_hvpfn(
368 kbuffer + PAGE_SIZE * (pfnsum + i));
369
370 /* add to msg header */
371 list_add_tail(&msgbody->msglistentry,
372 &msgheader->submsglist);
373 pfnsum += pfncurr;
374 pfnleft -= pfncurr;
375 }
376 } else {
377 /* everything fits in a header */
378 msgsize = sizeof(struct vmbus_channel_msginfo) +
379 sizeof(struct vmbus_channel_gpadl_header) +
380 sizeof(struct gpa_range) + pagecount * sizeof(u64);
381 msgheader = kzalloc(msgsize, GFP_KERNEL);
382 if (msgheader == NULL)
383 goto nomem;
384
385 INIT_LIST_HEAD(&msgheader->submsglist);
386 msgheader->msgsize = msgsize;
387
388 gpadl_header = (struct vmbus_channel_gpadl_header *)
389 msgheader->msg;
390 gpadl_header->rangecount = 1;
391 gpadl_header->range_buflen = sizeof(struct gpa_range) +
392 pagecount * sizeof(u64);
393 gpadl_header->range[0].byte_offset = 0;
394 gpadl_header->range[0].byte_count = size;
395 for (i = 0; i < pagecount; i++)
396 gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
397 kbuffer + PAGE_SIZE * i);
398
399 *msginfo = msgheader;
400 }
401
402 return 0;
403nomem:
404 kfree(msgheader);
405 kfree(msgbody);
406 return -ENOMEM;
407}
408
409/*
410 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
411 *
412 * @channel: a channel
413 * @kbuffer: from kmalloc or vmalloc
414 * @size: page-size multiple
415 * @gpadl_handle: some funky thing
416 */
417int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
418 u32 size, u32 *gpadl_handle)
419{
420 struct vmbus_channel_gpadl_header *gpadlmsg;
421 struct vmbus_channel_gpadl_body *gpadl_body;
422 struct vmbus_channel_msginfo *msginfo = NULL;
423 struct vmbus_channel_msginfo *submsginfo, *tmp;
424 struct list_head *curr;
425 u32 next_gpadl_handle;
426 unsigned long flags;
427 int ret = 0;
428
429 next_gpadl_handle =
430 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
431
432 ret = create_gpadl_header(kbuffer, size, &msginfo);
433 if (ret)
434 return ret;
435
436 init_completion(&msginfo->waitevent);
437 msginfo->waiting_channel = channel;
438
439 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
440 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
441 gpadlmsg->child_relid = channel->offermsg.child_relid;
442 gpadlmsg->gpadl = next_gpadl_handle;
443
444
445 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
446 list_add_tail(&msginfo->msglistentry,
447 &vmbus_connection.chn_msg_list);
448
449 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
450
451 if (channel->rescind) {
452 ret = -ENODEV;
453 goto cleanup;
454 }
455
456 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
457 sizeof(*msginfo), true);
458
459 trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
460
461 if (ret != 0)
462 goto cleanup;
463
464 list_for_each(curr, &msginfo->submsglist) {
465 submsginfo = (struct vmbus_channel_msginfo *)curr;
466 gpadl_body =
467 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
468
469 gpadl_body->header.msgtype =
470 CHANNELMSG_GPADL_BODY;
471 gpadl_body->gpadl = next_gpadl_handle;
472
473 ret = vmbus_post_msg(gpadl_body,
474 submsginfo->msgsize - sizeof(*submsginfo),
475 true);
476
477 trace_vmbus_establish_gpadl_body(gpadl_body, ret);
478
479 if (ret != 0)
480 goto cleanup;
481
482 }
483 wait_for_completion(&msginfo->waitevent);
484
485 if (msginfo->response.gpadl_created.creation_status != 0) {
486 pr_err("Failed to establish GPADL: err = 0x%x\n",
487 msginfo->response.gpadl_created.creation_status);
488
489 ret = -EDQUOT;
490 goto cleanup;
491 }
492
493 if (channel->rescind) {
494 ret = -ENODEV;
495 goto cleanup;
496 }
497
498 /* At this point, we received the gpadl created msg */
499 *gpadl_handle = gpadlmsg->gpadl;
500
501cleanup:
502 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
503 list_del(&msginfo->msglistentry);
504 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
505 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
506 msglistentry) {
507 kfree(submsginfo);
508 }
509
510 kfree(msginfo);
511 return ret;
512}
513EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
514
515/*
516 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
517 */
518int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
519{
520 struct vmbus_channel_gpadl_teardown *msg;
521 struct vmbus_channel_msginfo *info;
522 unsigned long flags;
523 int ret;
524
525 info = kmalloc(sizeof(*info) +
526 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
527 if (!info)
528 return -ENOMEM;
529
530 init_completion(&info->waitevent);
531 info->waiting_channel = channel;
532
533 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
534
535 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
536 msg->child_relid = channel->offermsg.child_relid;
537 msg->gpadl = gpadl_handle;
538
539 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
540 list_add_tail(&info->msglistentry,
541 &vmbus_connection.chn_msg_list);
542 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
543
544 if (channel->rescind)
545 goto post_msg_err;
546
547 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
548 true);
549
550 trace_vmbus_teardown_gpadl(msg, ret);
551
552 if (ret)
553 goto post_msg_err;
554
555 wait_for_completion(&info->waitevent);
556
557post_msg_err:
558 /*
559 * If the channel has been rescinded;
560 * we will be awakened by the rescind
561 * handler; set the error code to zero so we don't leak memory.
562 */
563 if (channel->rescind)
564 ret = 0;
565
566 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
567 list_del(&info->msglistentry);
568 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
569
570 kfree(info);
571 return ret;
572}
573EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
574
575static void reset_channel_cb(void *arg)
576{
577 struct vmbus_channel *channel = arg;
578
579 channel->onchannel_callback = NULL;
580}
581
582void vmbus_reset_channel_cb(struct vmbus_channel *channel)
583{
584 /*
585 * vmbus_on_event(), running in the per-channel tasklet, can race
586 * with vmbus_close_internal() in the case of SMP guest, e.g., when
587 * the former is accessing channel->inbound.ring_buffer, the latter
588 * could be freeing the ring_buffer pages, so here we must stop it
589 * first.
590 */
591 tasklet_disable(&channel->callback_event);
592
593 channel->sc_creation_callback = NULL;
594
595 /* Stop the callback asap */
596 if (channel->target_cpu != get_cpu()) {
597 put_cpu();
598 smp_call_function_single(channel->target_cpu, reset_channel_cb,
599 channel, true);
600 } else {
601 reset_channel_cb(channel);
602 put_cpu();
603 }
604
605 /* Re-enable tasklet for use on re-open */
606 tasklet_enable(&channel->callback_event);
607}
608
609static int vmbus_close_internal(struct vmbus_channel *channel)
610{
611 struct vmbus_channel_close_channel *msg;
612 int ret;
613
614 vmbus_reset_channel_cb(channel);
615
616 /*
617 * In case a device driver's probe() fails (e.g.,
618 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
619 * rescinded later (e.g., we dynamically disable an Integrated Service
620 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
621 * here we should skip most of the below cleanup work.
622 */
623 if (channel->state != CHANNEL_OPENED_STATE) {
624 ret = -EINVAL;
625 goto out;
626 }
627
628 channel->state = CHANNEL_OPEN_STATE;
629
630 /* Send a closing message */
631
632 msg = &channel->close_msg.msg;
633
634 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
635 msg->child_relid = channel->offermsg.child_relid;
636
637 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
638 true);
639
640 trace_vmbus_close_internal(msg, ret);
641
642 if (ret) {
643 pr_err("Close failed: close post msg return is %d\n", ret);
644 /*
645 * If we failed to post the close msg,
646 * it is perhaps better to leak memory.
647 */
648 goto out;
649 }
650
651 /* Tear down the gpadl for the channel's ring buffer */
652 if (channel->ringbuffer_gpadlhandle) {
653 ret = vmbus_teardown_gpadl(channel,
654 channel->ringbuffer_gpadlhandle);
655 if (ret) {
656 pr_err("Close failed: teardown gpadl return %d\n", ret);
657 /*
658 * If we failed to teardown gpadl,
659 * it is perhaps better to leak memory.
660 */
661 goto out;
662 }
663 }
664
665 /* Cleanup the ring buffers for this channel */
666 hv_ringbuffer_cleanup(&channel->outbound);
667 hv_ringbuffer_cleanup(&channel->inbound);
668
669 free_pages((unsigned long)channel->ringbuffer_pages,
670 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
671
672out:
673 return ret;
674}
675
676/*
677 * vmbus_close - Close the specified channel
678 */
679void vmbus_close(struct vmbus_channel *channel)
680{
681 struct list_head *cur, *tmp;
682 struct vmbus_channel *cur_channel;
683
684 if (channel->primary_channel != NULL) {
685 /*
686 * We will only close sub-channels when
687 * the primary is closed.
688 */
689 return;
690 }
691 /*
692 * Close all the sub-channels first and then close the
693 * primary channel.
694 */
695 list_for_each_safe(cur, tmp, &channel->sc_list) {
696 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
697 if (cur_channel->rescind) {
698 wait_for_completion(&cur_channel->rescind_event);
699 mutex_lock(&vmbus_connection.channel_mutex);
700 vmbus_close_internal(cur_channel);
701 hv_process_channel_removal(
702 cur_channel->offermsg.child_relid);
703 } else {
704 mutex_lock(&vmbus_connection.channel_mutex);
705 vmbus_close_internal(cur_channel);
706 }
707 mutex_unlock(&vmbus_connection.channel_mutex);
708 }
709 /*
710 * Now close the primary.
711 */
712 mutex_lock(&vmbus_connection.channel_mutex);
713 vmbus_close_internal(channel);
714 mutex_unlock(&vmbus_connection.channel_mutex);
715}
716EXPORT_SYMBOL_GPL(vmbus_close);
717
718/**
719 * vmbus_sendpacket() - Send the specified buffer on the given channel
720 * @channel: Pointer to vmbus_channel structure.
721 * @buffer: Pointer to the buffer you want to receive the data into.
722 * @bufferlen: Maximum size of what the the buffer will hold
723 * @requestid: Identifier of the request
724 * @type: Type of packet that is being send e.g. negotiate, time
725 * packet etc.
726 *
727 * Sends data in @buffer directly to hyper-v via the vmbus
728 * This will send the data unparsed to hyper-v.
729 *
730 * Mainly used by Hyper-V drivers.
731 */
732int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
733 u32 bufferlen, u64 requestid,
734 enum vmbus_packet_type type, u32 flags)
735{
736 struct vmpacket_descriptor desc;
737 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
738 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
739 struct kvec bufferlist[3];
740 u64 aligned_data = 0;
741 int num_vecs = ((bufferlen != 0) ? 3 : 1);
742
743
744 /* Setup the descriptor */
745 desc.type = type; /* VmbusPacketTypeDataInBand; */
746 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
747 /* in 8-bytes granularity */
748 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
749 desc.len8 = (u16)(packetlen_aligned >> 3);
750 desc.trans_id = requestid;
751
752 bufferlist[0].iov_base = &desc;
753 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
754 bufferlist[1].iov_base = buffer;
755 bufferlist[1].iov_len = bufferlen;
756 bufferlist[2].iov_base = &aligned_data;
757 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
758
759 return hv_ringbuffer_write(channel, bufferlist, num_vecs);
760}
761EXPORT_SYMBOL(vmbus_sendpacket);
762
763/*
764 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
765 * packets using a GPADL Direct packet type. This interface allows you
766 * to control notifying the host. This will be useful for sending
767 * batched data. Also the sender can control the send flags
768 * explicitly.
769 */
770int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
771 struct hv_page_buffer pagebuffers[],
772 u32 pagecount, void *buffer, u32 bufferlen,
773 u64 requestid)
774{
775 int i;
776 struct vmbus_channel_packet_page_buffer desc;
777 u32 descsize;
778 u32 packetlen;
779 u32 packetlen_aligned;
780 struct kvec bufferlist[3];
781 u64 aligned_data = 0;
782
783 if (pagecount > MAX_PAGE_BUFFER_COUNT)
784 return -EINVAL;
785
786 /*
787 * Adjust the size down since vmbus_channel_packet_page_buffer is the
788 * largest size we support
789 */
790 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
791 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
792 sizeof(struct hv_page_buffer));
793 packetlen = descsize + bufferlen;
794 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
795
796 /* Setup the descriptor */
797 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
798 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
799 desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
800 desc.length8 = (u16)(packetlen_aligned >> 3);
801 desc.transactionid = requestid;
802 desc.reserved = 0;
803 desc.rangecount = pagecount;
804
805 for (i = 0; i < pagecount; i++) {
806 desc.range[i].len = pagebuffers[i].len;
807 desc.range[i].offset = pagebuffers[i].offset;
808 desc.range[i].pfn = pagebuffers[i].pfn;
809 }
810
811 bufferlist[0].iov_base = &desc;
812 bufferlist[0].iov_len = descsize;
813 bufferlist[1].iov_base = buffer;
814 bufferlist[1].iov_len = bufferlen;
815 bufferlist[2].iov_base = &aligned_data;
816 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
817
818 return hv_ringbuffer_write(channel, bufferlist, 3);
819}
820EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
821
822/*
823 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
824 * using a GPADL Direct packet type.
825 * The buffer includes the vmbus descriptor.
826 */
827int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
828 struct vmbus_packet_mpb_array *desc,
829 u32 desc_size,
830 void *buffer, u32 bufferlen, u64 requestid)
831{
832 u32 packetlen;
833 u32 packetlen_aligned;
834 struct kvec bufferlist[3];
835 u64 aligned_data = 0;
836
837 packetlen = desc_size + bufferlen;
838 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
839
840 /* Setup the descriptor */
841 desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
842 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
843 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
844 desc->length8 = (u16)(packetlen_aligned >> 3);
845 desc->transactionid = requestid;
846 desc->reserved = 0;
847 desc->rangecount = 1;
848
849 bufferlist[0].iov_base = desc;
850 bufferlist[0].iov_len = desc_size;
851 bufferlist[1].iov_base = buffer;
852 bufferlist[1].iov_len = bufferlen;
853 bufferlist[2].iov_base = &aligned_data;
854 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
855
856 return hv_ringbuffer_write(channel, bufferlist, 3);
857}
858EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
859
860/**
861 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
862 * @channel: Pointer to vmbus_channel structure.
863 * @buffer: Pointer to the buffer you want to receive the data into.
864 * @bufferlen: Maximum size of what the the buffer will hold
865 * @buffer_actual_len: The actual size of the data after it was received
866 * @requestid: Identifier of the request
867 *
868 * Receives directly from the hyper-v vmbus and puts the data it received
869 * into Buffer. This will receive the data unparsed from hyper-v.
870 *
871 * Mainly used by Hyper-V drivers.
872 */
873static inline int
874__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
875 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
876 bool raw)
877{
878 return hv_ringbuffer_read(channel, buffer, bufferlen,
879 buffer_actual_len, requestid, raw);
880
881}
882
883int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
884 u32 bufferlen, u32 *buffer_actual_len,
885 u64 *requestid)
886{
887 return __vmbus_recvpacket(channel, buffer, bufferlen,
888 buffer_actual_len, requestid, false);
889}
890EXPORT_SYMBOL(vmbus_recvpacket);
891
892/*
893 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
894 */
895int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
896 u32 bufferlen, u32 *buffer_actual_len,
897 u64 *requestid)
898{
899 return __vmbus_recvpacket(channel, buffer, bufferlen,
900 buffer_actual_len, requestid, true);
901}
902EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);