Add memory pool.

This is in preparation for replacing the existing allocator.

Change-Id: I3d6b01ee07be248f02336b8d7756883a9eeace85
diff --git a/src/mpool.c b/src/mpool.c
new file mode 100644
index 0000000..21bce22
--- /dev/null
+++ b/src/mpool.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hf/mpool.h"
+
+#include <stdbool.h>
+
+struct mpool_chunk {
+	struct mpool_chunk *next_chunk;
+	struct mpool_chunk *limit;
+};
+
+struct mpool_entry {
+	struct mpool_entry *next;
+};
+
+static bool mpool_locks_enabled = false;
+
+/**
+ * Enables the locks protecting memory pools. Before this function is called,
+ * the locks are disabled, that is, acquiring/releasing them is a no-op.
+ */
+void mpool_enable_locks(void)
+{
+	mpool_locks_enabled = true;
+}
+
+/**
+ * Acquires the lock protecting the given memory pool, if locks are enabled.
+ */
+static void mpool_lock(struct mpool *p)
+{
+	if (mpool_locks_enabled) {
+		sl_lock(&p->lock);
+	}
+}
+
+/**
+ * Releases the lock protecting the given memory pool, if locks are enabled.
+ */
+static void mpool_unlock(struct mpool *p)
+{
+	if (mpool_locks_enabled) {
+		sl_unlock(&p->lock);
+	}
+}
+
+/**
+ * Initialises the given memory pool with the given entry size, which must be
+ * at least the size of two pointers.
+ *
+ * All entries stored in the memory pool will be aligned to at least the entry
+ * size.
+ */
+void mpool_init(struct mpool *p, size_t entry_size)
+{
+	p->entry_size = entry_size;
+	p->chunk_list = NULL;
+	p->entry_list = NULL;
+	sl_init(&p->lock);
+}
+
+/**
+ * Initialises the given memory pool by replicating the properties of `from`. It
+ * also pulls the chunk and free lists from `from`, consuming all its resources
+ * and making them available via the new memory pool.
+ */
+void mpool_init_from(struct mpool *p, struct mpool *from)
+{
+	mpool_init(p, from->entry_size);
+
+	mpool_lock(from);
+	p->chunk_list = from->chunk_list;
+	p->entry_list = from->entry_list;
+
+	from->chunk_list = NULL;
+	from->entry_list = NULL;
+	mpool_unlock(from);
+}
+
+/**
+ * Adds a contiguous chunk of memory to the given memory pool. The chunk will
+ * eventually be broken up into entries of the size held by the memory pool.
+ *
+ * Only the portions aligned to the entry size will be added to the pool.
+ *
+ * Returns true if at least a portion of the chunk was added to pool, or false
+ * if none of the buffer was usable in the pool.
+ */
+bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
+{
+	struct mpool_chunk *chunk;
+	uintptr_t new_begin;
+	uintptr_t new_end;
+
+	/* Round begin address up, and end address down. */
+	new_begin = ((uintptr_t)begin + p->entry_size - 1) / p->entry_size *
+		    p->entry_size;
+	new_end = ((uintptr_t)begin + size) / p->entry_size * p->entry_size;
+
+	/* Nothing to do if there isn't enough room for an entry. */
+	if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
+		return false;
+	}
+
+	chunk = (struct mpool_chunk *)new_begin;
+	chunk->limit = (struct mpool_chunk *)new_end;
+
+	mpool_lock(p);
+	chunk->next_chunk = p->chunk_list;
+	p->chunk_list = chunk;
+	mpool_unlock(p);
+
+	return true;
+}
+
+/**
+ * Allocates an entry from the given memory pool, if one is available.
+ */
+void *mpool_alloc(struct mpool *p)
+{
+	void *ret;
+	struct mpool_chunk *chunk;
+	struct mpool_chunk *new_chunk;
+
+	/* Fetch an entry from the free list if one is available. */
+	mpool_lock(p);
+	if (p->entry_list != NULL) {
+		struct mpool_entry *entry = p->entry_list;
+		p->entry_list = entry->next;
+		ret = entry;
+		goto exit;
+	}
+
+	chunk = p->chunk_list;
+	if (chunk == NULL) {
+		/* The chunk list is also empty, we're out of entries. */
+		ret = NULL;
+		goto exit;
+	}
+
+	new_chunk = (struct mpool_chunk *)((uintptr_t)chunk + p->entry_size);
+	if (new_chunk >= chunk->limit) {
+		p->chunk_list = chunk->next_chunk;
+	} else {
+		*new_chunk = *chunk;
+		p->chunk_list = new_chunk;
+	}
+
+	ret = chunk;
+
+exit:
+	mpool_unlock(p);
+	return ret;
+}
+
+/**
+ * Frees an entry back into the memory pool, making it available for reuse.
+ *
+ * This is meant to be used for freeing single entries. To free multiple
+ * entries, one must call mpool_add_chunk instead.
+ */
+void mpool_free(struct mpool *p, void *ptr)
+{
+	struct mpool_entry *e = ptr;
+
+	/* Store the newly freed entry in the front of the free list. */
+	mpool_lock(p);
+	e->next = p->entry_list;
+	p->entry_list = e;
+	mpool_unlock(p);
+}
+
+/**
+ * Allocates a number of contiguous and aligned entries. This is a best-effort
+ * operation and only succeeds if such entries can be found in chunks (i.e., the
+ * entry list is never used to satisfy these allocations).
+ *
+ * The alignment is specified as the number of entries, that is, if `align` is
+ * 4, the alignment in bytes will be 4 * entry_size.
+ *
+ * The caller can enventually free the returned entries by calling
+ * mpool_add_chunk.
+ */
+void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
+{
+	struct mpool_chunk **prev;
+	void *ret = NULL;
+
+	align *= p->entry_size;
+
+	mpool_lock(p);
+
+	/*
+	 * Go through the chunk list in search of one with enough room for the
+	 * requested allocation
+	 */
+	prev = &p->chunk_list;
+	while (*prev != NULL) {
+		uintptr_t start;
+		struct mpool_chunk *new_chunk;
+		struct mpool_chunk *chunk = *prev;
+
+		/* Round start address up to the required alignment. */
+		start = ((uintptr_t)chunk + align - 1) / align * align;
+
+		/*
+		 * Calculate where the new chunk would be if we consume the
+		 * requested number of entries. Then check if this chunk is big
+		 * enough to satisfy the request.
+		 */
+		new_chunk =
+			(struct mpool_chunk *)(start + count * p->entry_size);
+		if (new_chunk <= chunk->limit) {
+			/* Remove the consumed area. */
+			if (new_chunk == chunk->limit) {
+				*prev = chunk->next_chunk;
+			} else {
+				*new_chunk = *chunk;
+				*prev = new_chunk;
+			}
+
+			/*
+			 * Add back the space consumed by the alignment
+			 * requirement, if it's big enough to fit an entry.
+			 */
+			if (start - (uintptr_t)chunk >= p->entry_size) {
+				chunk->next_chunk = *prev;
+				*prev = chunk;
+				chunk->limit = (struct mpool_chunk *)start;
+			}
+
+			ret = (void *)start;
+			break;
+		}
+
+		prev = &chunk->next_chunk;
+	}
+
+	mpool_unlock(p);
+
+	return ret;
+}