feat: enable `-Wsign-compare`

This enables the `-Wsign-compare` warning everywhere except inside the
`ASSERT_EQ` macro, which will be fixed in later commits.

Change-Id: I485a0e909f23264c662702d996f792e9411ecd2c
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/arch/aarch64/hypervisor/ffa.c b/src/arch/aarch64/hypervisor/ffa.c
index 07187e1..9c75a8f 100644
--- a/src/arch/aarch64/hypervisor/ffa.c
+++ b/src/arch/aarch64/hypervisor/ffa.c
@@ -44,7 +44,7 @@
 
 	if (ret.func == FFA_SUCCESS_32) {
 		spmc_id = ret.arg2;
-	} else if (ret.func == SMCCC_ERROR_UNKNOWN ||
+	} else if (ret.func == (uint64_t)SMCCC_ERROR_UNKNOWN ||
 		   (ret.func == FFA_ERROR_32 &&
 		    ffa_error_code(ret) == FFA_NOT_SUPPORTED)) {
 		spmc_id = HF_SPMC_VM_ID;
diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c
index b1e1843..3532998 100644
--- a/src/arch/aarch64/hypervisor/psci_handler.c
+++ b/src/arch/aarch64/hypervisor/psci_handler.c
@@ -183,7 +183,7 @@
 					(uintreg_t)&cpu_entry, (uintreg_t)c, 0,
 					0, 0, SMCCC_CALLER_HYPERVISOR);
 			*ret = smc_res.func;
-		} while (*ret == PSCI_ERROR_ALREADY_ON);
+		} while (*ret == (uintreg_t)PSCI_ERROR_ALREADY_ON);
 
 		if (*ret != PSCI_RETURN_SUCCESS) {
 			cpu_off(c);
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 3c7db40..3808923 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -786,7 +786,7 @@
 		(struct ffa_value){.func = FFA_RXTX_UNMAP_32,
 				   .arg1 = id << FFA_RXTX_ALLOCATOR_SHIFT});
 	func = ret.func & ~SMCCC_CONVENTION_MASK;
-	if (ret.func == SMCCC_ERROR_UNKNOWN) {
+	if (ret.func == (uint64_t)SMCCC_ERROR_UNKNOWN) {
 		panic("Unknown error forwarding RXTX_UNMAP.\n");
 	} else if (func == FFA_ERROR_32) {
 		panic("Error %d forwarding RX/TX buffers.\n", ret.arg2);
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index f2a7b7b..b5a76d2 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -2893,7 +2893,7 @@
 	current_locked = vcpu_lock(current);
 	rt_model = current_locked.vcpu->rt_model;
 
-	if (error_code == FFA_ABORTED && rt_model == RTM_SP_INIT) {
+	if (error_code == (uint32_t)FFA_ABORTED && rt_model == RTM_SP_INIT) {
 		dlog_error("Aborting SP %#x from vCPU %u\n", current->vm->id,
 			   vcpu_index(current));
 
diff --git a/src/arch/aarch64/smc.h b/src/arch/aarch64/smc.h
index 14bb9c7..ab243c9 100644
--- a/src/arch/aarch64/smc.h
+++ b/src/arch/aarch64/smc.h
@@ -46,7 +46,7 @@
  * TODO: Trusted OS call: 0x32000000 - 0x3f000000
  */
 
-#define SMCCC_ERROR_UNKNOWN  (-1)
+#define SMCCC_ERROR_UNKNOWN (-1)
 
 #define SMCCC_VERSION_FUNC_ID	0x80000000
 #define SMCCC_VERSION_1_2	0x10002
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index aa15bb9..d188f3d 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -614,7 +614,7 @@
 		}
 	}
 
-	for (int i = 0; i < memory_region->receiver_count; i++) {
+	for (size_t i = 0; i < memory_region->receiver_count; i++) {
 		uint32_t composite_offset;
 
 		if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
diff --git a/src/manifest.c b/src/manifest.c
index 92afa7b..b317fc8 100644
--- a/src/manifest.c
+++ b/src/manifest.c
@@ -1122,7 +1122,7 @@
 	}
 
 	/* GP register is restricted to one of x0 - x3. */
-	if (vm->partition.gp_register_num != -1 &&
+	if (vm->partition.gp_register_num != DEFAULT_BOOT_GP_REGISTER &&
 	    vm->partition.gp_register_num > 3) {
 		dlog_error("GP register number %s: %u\n", error_string,
 			   vm->partition.gp_register_num);
diff --git a/src/mpool.c b/src/mpool.c
index 0ee7cb8..ed40e6e 100644
--- a/src/mpool.c
+++ b/src/mpool.c
@@ -159,7 +159,8 @@
 	new_end = (void *)align_down((char *)begin + size, p->entry_size);
 
 	/* Nothing to do if there isn't enough room for an entry. */
-	if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
+	if (new_begin >= new_end ||
+	    (size_t)(new_end - new_begin) < p->entry_size) {
 		return false;
 	}
 
@@ -302,7 +303,7 @@
 			 * Add back the space consumed by the alignment
 			 * requirement, if it's big enough to fit an entry.
 			 */
-			if (start - (char *)chunk >= p->entry_size) {
+			if ((size_t)(start - (char *)chunk) >= p->entry_size) {
 				chunk->next_chunk = *prev;
 				*prev = chunk;
 				chunk->limit = (struct mpool_chunk *)start;