Use explicit page pool in page table manipulation.
Change-Id: Ibc3c21f815dfae54a541581941e553f79caaaace
diff --git a/src/api.c b/src/api.c
index 1cba24f..96474a8 100644
--- a/src/api.c
+++ b/src/api.c
@@ -38,6 +38,17 @@
"Currently, a page is mapped for the send and receive buffers so "
"the maximum request is the size of a page.");
+static struct mpool api_ppool;
+
+/**
+ * Initialies the API page pool by taking ownership of the contents of the given
+ * page pool.
+ */
+void api_init(struct mpool *ppool)
+{
+ mpool_init_from(&api_ppool, ppool);
+}
+
/**
* Switches the physical CPU back to the corresponding vcpu of the primary VM.
*
@@ -222,8 +233,8 @@
pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
/* Map the send page as read-only in the hypervisor address space. */
- vm->mailbox.send =
- mm_identity_map(pa_send_begin, pa_send_end, MM_MODE_R);
+ vm->mailbox.send = mm_identity_map(pa_send_begin, pa_send_end,
+ MM_MODE_R, &api_ppool);
if (!vm->mailbox.send) {
ret = -1;
goto exit;
@@ -233,11 +244,11 @@
* Map the receive page as writable in the hypervisor address space. On
* failure, unmap the send page before returning.
*/
- vm->mailbox.recv =
- mm_identity_map(pa_recv_begin, pa_recv_end, MM_MODE_W);
+ vm->mailbox.recv = mm_identity_map(pa_recv_begin, pa_recv_end,
+ MM_MODE_W, &api_ppool);
if (!vm->mailbox.recv) {
vm->mailbox.send = NULL;
- mm_unmap(pa_send_begin, pa_send_end, 0);
+ mm_unmap(pa_send_begin, pa_send_end, 0, &api_ppool);
ret = -1;
goto exit;
}