feat(tools/cbmc): implement host_gtsi_delegate

Add implementation to host_gtsi_[un]delegate to emulate Granule
Transition Service changing granule PAS. This allows to reach 100%
coverage with tb_rmi_granule_delegate.

Change-Id: Ia9f0b21f508a9a015606f6a279078d829dd54e86
Signed-off-by: Mate Toth-Pal <mate.toth-pal@arm.com>
diff --git a/plat/host/host_cbmc/src/tb_common.c b/plat/host/host_cbmc/src/tb_common.c
new file mode 100644
index 0000000..225966d
--- /dev/null
+++ b/plat/host/host_cbmc/src/tb_common.c
@@ -0,0 +1,180 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
+ */
+
+#include "granule.h"
+#include "host_defs.h"
+#include "host_utils.h"
+#include "measurement.h"
+#include "status.h"
+#include "string.h"
+#include "table.h"
+#include "tb_common.h"
+#include "tb_granules.h"
+#include "utils_def.h"
+
+/*
+ * This array holds information on whether the granule that is injected in
+ * RMM's granule array is set to NS in the GPT, or not.
+ */
+static bool granule_gpt_ns_array[RMM_MAX_GRANULES];
+
+/* Declare a nondet function for registers information. */
+struct tb_regs nondet_tb_regs(void);
+
+struct tb_regs __tb_arb_regs(void)
+{
+	return nondet_tb_regs();
+}
+
+bool ResultEqual_2(unsigned int code, unsigned int expected)
+{
+	return code == expected;
+}
+
+/* TODO */
+bool ResultEqual_3(unsigned int code, unsigned int expected, unsigned int level)
+{
+	return true;
+}
+
+uint64_t Zeros(void)
+{
+	return UINT64_C(0);
+}
+
+bool __tb_arb_bool(void)
+{
+	return nondet_bool();
+}
+
+void __tb_lock_invariant(struct tb_lock_status *lock_status)
+{
+	/* TODO */
+}
+
+struct tb_lock_status __tb_lock_status(void)
+{
+	struct tb_lock_status r = {0UL};
+	return r;
+}
+
+extern struct granule granules[RMM_MAX_GRANULES];
+bool used_granules_buffer[HOST_NR_GRANULES] = { 0 };
+
+bool valid_pa(uint64_t addr)
+{
+	/*
+	 * NOTE: the explicit pointer to integer type cast is necessary, as CBMC
+	 * check fails without it.
+	 */
+	if (GRANULE_ALIGNED(addr) && (uint64_t)granules_buffer <= addr &&
+		addr < (uint64_t)granules_buffer + sizeof(granules_buffer)) {
+		/*
+		 * Keep these assserts for sanitary check, there was situation
+		 * these asserts fail possibly due to CBMC dislike type
+		 * conversion between number and pointer
+		 */
+		ASSERT(GRANULE_ALIGNED(addr), "internal: `_valid_pa`, addr in alignment");
+		ASSERT(addr >= (uint64_t)granules_buffer,
+			"internal: `_valid_pa`, addr in lower range");
+		ASSERT(addr < (uint64_t)granules_buffer + sizeof(granules_buffer),
+			"internal: `_valid_pa`, addr in upper range");
+		return true;
+	}
+	return false;
+}
+
+struct granule *pa_to_granule_metadata_ptr(uint64_t addr)
+{
+	uint64_t idx = (addr - (uint64_t)granules_buffer)/GRANULE_SIZE;
+
+	__ASSERT(idx >= 0, "internal: `_pa_to_granule_metadata_ptr`, addr is in lower range");
+	__ASSERT(idx < RMM_MAX_GRANULES,
+		"internal: `_pa_to_granule_metadata_ptr`, addr is in upper range");
+
+	return &granules[idx];
+}
+
+bool valid_granule_metadata_ptr(struct granule *p)
+{
+	return p >= granules
+		&& p < granules + RMM_MAX_GRANULES;
+}
+
+/*
+ * Return the first index of `number` of unused continuous indice to both
+ * `granules` and `granules_buffer` arrays.
+ */
+size_t next_index(void)
+{
+	size_t index = nondet_size_t();
+
+	__ASSUME(unused_index(index));
+	if (!unused_index(index)) {
+		return -1;
+	}
+
+	REACHABLE;
+
+	return index;
+}
+
+bool unused_index(size_t index)
+{
+	if (index < HOST_NR_GRANULES && !used_granules_buffer[index]) {
+		return true;
+	}
+	return false;
+}
+
+void init_pa_page(const void *content, size_t size)
+{
+	unsigned long index = next_index();
+	unsigned long offset = index * GRANULE_SIZE;
+
+	(void)memcpy(granules_buffer + offset, content, size);
+	used_granules_buffer[index] = true;
+}
+
+struct granule *inject_granule_at(const struct granule *granule_metadata,
+				  const void *src_page,
+				  size_t src_size,
+				  size_t index)
+{
+	size_t offset = index * GRANULE_SIZE;
+
+	granules[index] = *granule_metadata;
+	(void)memcpy(granules_buffer + offset, src_page, src_size);
+	used_granules_buffer[index] = true;
+	return &granules[index];
+}
+
+struct granule *inject_granule(const struct granule *granule_metadata,
+			       const void *src_page,
+			       size_t src_size)
+{
+	size_t index = next_index();
+
+	if (granule_metadata->state == GRANULE_STATE_NS) {
+		granule_gpt_ns_array[index] = nondet_bool();
+	} else{
+		granule_gpt_ns_array[index] = false;
+	}
+	return inject_granule_at(granule_metadata, src_page, src_size, index);
+}
+
+bool is_granule_gpt_ns(uint64_t addr)
+{
+	uint64_t idx = (addr - (uint64_t)granules_buffer)/GRANULE_SIZE;
+
+	return granule_gpt_ns_array[idx];
+}
+
+void set_granule_gpt_ns(uint64_t addr, bool gpt_ns)
+{
+	uint64_t idx = (addr - (uint64_t)granules_buffer)/GRANULE_SIZE;
+
+	granule_gpt_ns_array[idx] = gpt_ns;
+}