Hold on to your braces!

This introduces the use of clang-tidy for static analysis and linting.
We start by ensuring that we use braces everywhere to reduce the risk of
programmer error caused by the misuse of scope.

Change-Id: I8aba449e6ef8405192d04aff5ed827f97e458d3d
diff --git a/src/mm.c b/src/mm.c
index 8bfd6f4..8942ec5 100644
--- a/src/mm.c
+++ b/src/mm.c
@@ -58,8 +58,9 @@
 	size_t inc;
 
 	/* Just return pointer to table if it's already populated. */
-	if (arch_mm_pte_is_table(v))
+	if (arch_mm_pte_is_table(v)) {
 		return arch_mm_pte_to_table(v);
+	}
 
 	/* Allocate a new table. */
 	ntable = (sync_alloc ? halloc_aligned : halloc_aligned_nosync)(
@@ -75,10 +76,11 @@
 		new_pte = arch_mm_absent_pte();
 	} else {
 		inc = mm_entry_size(level - 1);
-		if (level == 1)
+		if (level == 1) {
 			new_pte = arch_mm_block_to_page_pte(v);
-		else
+		} else {
 			new_pte = v;
+		}
 	}
 
 	/* Initialise entries in the new table. */
@@ -128,14 +130,16 @@
 	bool sync = flags & MAP_FLAG_SYNC;
 
 	/* Cap va_end so that we don't go over the current level max. */
-	if (va_end > va_level_end)
+	if (va_end > va_level_end) {
 		va_end = va_level_end;
+	}
 
 	/* Fill each entry in the table. */
 	while (va < va_end) {
 		if (level == 0) {
-			if (commit)
+			if (commit) {
 				table[i] = arch_mm_pa_to_page_pte(pa, attrs);
+			}
 		} else if ((va_end - va) >= entry_size &&
 			   arch_mm_is_block_allowed(level) &&
 			   (va & (entry_size - 1)) == 0) {
@@ -149,12 +153,14 @@
 		} else {
 			pte_t *nt =
 				mm_populate_table_pte(table + i, level, sync);
-			if (!nt)
+			if (!nt) {
 				return false;
+			}
 
 			if (!mm_map_level(va, va_end, pa, attrs, nt, level - 1,
-					  flags))
+					  flags)) {
 				return false;
+			}
 		}
 
 		va = (va + entry_size) & ~(entry_size - 1);
@@ -170,10 +176,11 @@
  */
 static void mm_invalidate_tlb(vaddr_t begin, vaddr_t end, bool stage1)
 {
-	if (stage1)
+	if (stage1) {
 		arch_mm_invalidate_stage1_range(begin, end);
-	else
+	} else {
 		arch_mm_invalidate_stage2_range(begin, end);
+	}
 }
 
 /**
@@ -197,8 +204,9 @@
 	 * state. In such a two-step implementation, the table may be left with
 	 * extra internal tables, but no different mapping on failure.
 	 */
-	if (!mm_map_level(begin, end, paddr, attrs, t->table, level, flags))
+	if (!mm_map_level(begin, end, paddr, attrs, t->table, level, flags)) {
 		return false;
+	}
 
 	mm_map_level(begin, end, paddr, attrs, t->table, level,
 		     flags | MAP_FLAG_COMMIT);
@@ -222,8 +230,9 @@
 	end = arch_mm_clear_va(end + PAGE_SIZE - 1);
 
 	/* Also do updates in two steps, similarly to mm_ptable_map. */
-	if (!mm_map_level(begin, end, begin, 0, t->table, level, flags))
+	if (!mm_map_level(begin, end, begin, 0, t->table, level, flags)) {
 		return false;
+	}
 
 	mm_map_level(begin, end, begin, 0, t->table, level,
 		     flags | MAP_FLAG_COMMIT);
@@ -250,8 +259,9 @@
 
 	for (i = arch_mm_max_level(&t->arch); i > 0; i--) {
 		table = mm_populate_table_pte(table + mm_index(va, i), i, sync);
-		if (!table)
+		if (!table) {
 			return false;
+		}
 	}
 
 	i = mm_index(va, 0);
@@ -267,12 +277,14 @@
 {
 	uint64_t i;
 	for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
-		if (!arch_mm_pte_is_present(table[i]))
+		if (!arch_mm_pte_is_present(table[i])) {
 			continue;
+		}
 
 		dlog("%*s%x: %x\n", 4 * (max_level - level), "", i, table[i]);
-		if (!level)
+		if (!level) {
 			continue;
+		}
 
 		if (arch_mm_pte_is_table(table[i])) {
 			mm_dump_table_recursive(arch_mm_pte_to_table(table[i]),
@@ -307,16 +319,19 @@
 	size_t i;
 	pte_t *table;
 
-	if (mode & MM_MODE_NOSYNC)
+	if (mode & MM_MODE_NOSYNC) {
 		table = halloc_aligned_nosync(PAGE_SIZE, PAGE_SIZE);
-	else
+	} else {
 		table = halloc_aligned(PAGE_SIZE, PAGE_SIZE);
+	}
 
-	if (!table)
+	if (!table) {
 		return false;
+	}
 
-	for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++)
+	for (i = 0; i < PAGE_SIZE / sizeof(pte_t); i++) {
 		table[i] = arch_mm_absent_pte();
+	}
 
 	t->table = table;
 	arch_mm_ptable_init(&t->arch);