refactor(lib/xlat): split xlat_arch_setup_mmu_cfg

Decouple calculating from writing MMU registers. A new structure is
added that can hold the calculated values for mmu registers.
xlat_arch_setup_mmu_cfg only initialises this structure, while the
new function xlat_arch_write_mmu_cfg writes the values to the
registers.

Change-Id: Iea4be3c4594f10057bf35b4e2b2c8030cca2b764
Signed-off-by: Mate Toth-Pal <mate.toth-pal@arm.com>
diff --git a/lib/xlat/include/xlat_tables.h b/lib/xlat/include/xlat_tables.h
index 6d6df7c..ab9506b 100644
--- a/lib/xlat/include/xlat_tables.h
+++ b/lib/xlat/include/xlat_tables.h
@@ -224,6 +224,15 @@
 	int level;		/* Table level of the current entry. */
 };
 
+/* Structure holding the values of MMU registers. */
+struct xlat_mmu_cfg {
+	xlat_addr_region_id_t region;
+	unsigned long mair;
+	unsigned long tcr;
+	uint64_t txsz;
+	unsigned long ttbrx;
+};
+
 /******************************************************************************
  * Generic translation table APIs.
  *****************************************************************************/
@@ -312,18 +321,24 @@
 			   const uintptr_t va);
 
 /*
- * Set up the MMU configuration registers for the specified platform parameters.
+ * Setup the MMU config for the specified xlat_ctx.
  *
- * This function must be called for each context as it configures the
- * appropriate TTBRx register depending on it.
+ * This function must be called for each context as it sets up the MMU config
+ * appropriately.
  *
- * This function also assumes that the contexts for high and low VA halfs share
- * the same virtual address space as well as the same physical address space,
- * so it is safe to call it for each context initialization.
+ * Note that MMU needs to be configured for both Low and High VA.
  *
- * Returns 0 on success or a negative error code otherwise.
+ * Returns 0 on success or one of the following error codes:
+ *  -EINVAL if there is an error in input arguments.
+ *  -EPERM if the hardware config detected does not match expectation.
  */
-int xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx);
+int xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx, struct xlat_mmu_cfg *mmu_config);
+
+/*
+ * This function will write the MMU config to the MMU registers based
+ * on whether Low VA or High VA region is being configured.
+ */
+void xlat_arch_write_mmu_cfg(struct xlat_mmu_cfg *mmu_cfg);
 
 /* MMU control */
 void xlat_enable_mmu_el2(void);
diff --git a/lib/xlat/src/xlat_contexts.c b/lib/xlat/src/xlat_contexts.c
index 1f014b5..a282524 100644
--- a/lib/xlat/src/xlat_contexts.c
+++ b/lib/xlat/src/xlat_contexts.c
@@ -226,8 +226,6 @@
 	size_t max_va_size = (is_feat_lpa2_4k_present() == true) ?
 		MAX_VIRT_ADDR_SPACE_SIZE_LPA2 : MAX_VIRT_ADDR_SPACE_SIZE;
 
-	assert(!is_mmu_enabled());
-
 	if (cfg == NULL) {
 		return -EINVAL;
 	}
@@ -266,8 +264,10 @@
 	cfg->region = region;
 	cfg->initialized = true;
 
-	inv_dcache_range((uintptr_t)cfg, sizeof(struct xlat_ctx_cfg));
-	inv_dcache_range((uintptr_t)mm, sizeof(struct xlat_mmap_region));
+	if (!is_mmu_enabled()) {
+		inv_dcache_range((uintptr_t)cfg, sizeof(struct xlat_ctx_cfg));
+		inv_dcache_range((uintptr_t)mm, sizeof(struct xlat_mmap_region));
+	}
 
 	return 0;
 }
@@ -278,8 +278,6 @@
 		  uint64_t *tables_ptr,
 		  unsigned int ntables)
 {
-	assert(!is_mmu_enabled());
-
 	if ((ctx == NULL) || (tbls_ctx == NULL) || (cfg == NULL)) {
 		return -EINVAL;
 	}
@@ -309,9 +307,10 @@
 	/* Add the tables to the context */
 	ctx->tbls = tbls_ctx;
 
-	inv_dcache_range((uintptr_t)ctx, sizeof(struct xlat_ctx));
-	inv_dcache_range((uintptr_t)tbls_ctx, sizeof(struct xlat_ctx_tbls));
-	inv_dcache_range((uintptr_t)cfg, sizeof(struct xlat_ctx_cfg));
-
+	if (!is_mmu_enabled()) {
+		inv_dcache_range((uintptr_t)ctx, sizeof(struct xlat_ctx));
+		inv_dcache_range((uintptr_t)tbls_ctx, sizeof(struct xlat_ctx_tbls));
+		inv_dcache_range((uintptr_t)cfg, sizeof(struct xlat_ctx_cfg));
+	}
 	return xlat_init_tables_ctx(ctx);
 }
diff --git a/lib/xlat/src/xlat_high_va.c b/lib/xlat/src/xlat_high_va.c
index 3b68a93..693e792 100644
--- a/lib/xlat/src/xlat_high_va.c
+++ b/lib/xlat/src/xlat_high_va.c
@@ -107,6 +107,7 @@
 	static uint64_t high_va_tts[XLAT_TABLE_ENTRIES * MAX_CPUS] __aligned(XLAT_TABLES_ALIGNMENT);
 
 	unsigned int cpuid = my_cpuid();
+	struct xlat_mmu_cfg mmu_config;
 	int ret;
 
 	/* Set handler stack PA for this CPU */
@@ -140,5 +141,11 @@
 	}
 
 	/* Configure MMU registers */
-	return xlat_arch_setup_mmu_cfg(&high_va_xlat_ctx[cpuid]);
+	ret = xlat_arch_setup_mmu_cfg(&high_va_xlat_ctx[cpuid], &mmu_config);
+
+	if (ret == 0) {
+		xlat_arch_write_mmu_cfg(&mmu_config);
+	}
+
+	return ret;
 }
diff --git a/lib/xlat/src/xlat_tables_arch.c b/lib/xlat/src/xlat_tables_arch.c
index 61567ca..d7f5d83 100644
--- a/lib/xlat/src/xlat_tables_arch.c
+++ b/lib/xlat/src/xlat_tables_arch.c
@@ -81,22 +81,68 @@
 	return TCR_PS_BITS_4GB;
 }
 
+void xlat_arch_write_mmu_cfg(struct xlat_mmu_cfg *mmu_cfg)
+{
+	uint64_t tcr;
+	uint64_t t0sz, t1sz;
+
+	/* MMU cannot be enabled at this point */
+	assert(!is_mmu_enabled());
+
+	/*
+	 * Read TCR_EL2 in order to extract t0sz and t1sz. So we can update the right
+	 * field depending on which context we are configuring and leave the other one
+	 * untouched.
+	 * It will not be a problem if TCR_EL2 was previoulsy configured, as the new
+	 * value of it will be the same with the only difference of the txsz field we
+	 * want to update.
+	 */
+	tcr = read_tcr_el2();
+	if (mmu_cfg->region == VA_LOW_REGION) {
+		t0sz = mmu_cfg->txsz;
+		t1sz = EXTRACT(TCR_EL2_T1SZ, tcr);
+	} else {
+		t0sz = EXTRACT(TCR_EL2_T0SZ, tcr);
+		t1sz = mmu_cfg->txsz;
+	}
+
+	tcr = mmu_cfg->tcr;
+	/*
+	 * Update the TCR_EL2 value with the memory region's sizes.
+	 * It is not necessary to clear t?sz fields in tcr as they are cleared
+	 * by the xlat_arch_setup_mmu_cfg and expected to be the same across
+	 * calls to xlat_arch_write_mmu_cfg.
+	 */
+	tcr |= (t0sz << TCR_EL2_T0SZ_SHIFT);
+	tcr |= (t1sz << TCR_EL2_T1SZ_SHIFT);
+
+	write_mair_el2(mmu_cfg->mair);
+	write_tcr_el2(tcr);
+
+	if (mmu_cfg->region == VA_LOW_REGION) {
+		write_ttbr0_el2(mmu_cfg->ttbrx);
+	} else {
+		write_ttbr1_el2(mmu_cfg->ttbrx);
+	}
+}
+
 /*
  * Configure MMU registers. This function assumes that all the contexts use the
  * same limits for VA and PA spaces.
  */
-int xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx)
+int xlat_arch_setup_mmu_cfg(struct xlat_ctx * const ctx, struct xlat_mmu_cfg *mmu_config)
 {
 	uint64_t mair;
-	uint64_t tcr;
+	uint64_t tcr = 0;
 	uint64_t ttbrx;
 	uintptr_t va_space_size;
 	struct xlat_ctx_cfg *ctx_cfg;
 	struct xlat_ctx_tbls *ctx_tbls;
-	uint64_t t0sz, t1sz, txsz;
+	uint64_t txsz;
 	uint64_t pa_size_bits;
 
 	assert(ctx != NULL);
+	assert(mmu_config != NULL);
 
 	ctx_cfg = ctx->cfg;
 	ctx_tbls = ctx->tbls;
@@ -113,9 +159,6 @@
 		return -EINVAL;
 	}
 
-	/* MMU cannot be enabled at this point */
-	assert(!is_mmu_enabled());
-
 	/* Set attributes in the right indices of the MAIR. */
 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
@@ -129,27 +172,6 @@
 	txsz = (uint64_t)(64 - __builtin_ctzll(va_space_size));
 
 	/*
-	 * Read TCR_EL2 in order to extract t0sz and t1sz. So we can update the right
-	 * field depending on which context we are configuring and leave the other one
-	 * untouched.
-	 * It will not be a problem if TCR_EL2 was previoulsy configured, as the new
-	 * value of it will be the same with the only difference of the txsz field we
-	 * want to update.
-	 */
-	tcr = read_tcr_el2();
-	if (ctx_cfg->region == VA_LOW_REGION) {
-		t0sz = txsz;
-		t1sz = EXTRACT(TCR_EL2_T1SZ, tcr);
-	} else {
-		t0sz = EXTRACT(TCR_EL2_T0SZ, tcr);
-		t1sz = txsz;
-	}
-
-	/* Recompute the value for TCR_EL2 */
-	tcr = t0sz << TCR_EL2_T0SZ_SHIFT;
-	tcr |= t1sz << TCR_EL2_T1SZ_SHIFT;
-
-	/*
 	 * Set the cacheability and shareability attributes for memory
 	 * associated with translation table walks.
 	 */
@@ -182,13 +204,10 @@
 	pa_size_bits = tcr_physical_addr_size_bits(
 					xlat_arch_get_max_supported_pa());
 	if (pa_size_bits == ~(0ULL)) {
-		return -ENOMEM;
+		return -EPERM;
 	}
 	tcr |= pa_size_bits;
 
-	write_mair_el2(mair);
-	write_tcr_el2(tcr);
-
 	/*
 	 * Set TTBR bits as well and enable CnP bit so as to share page
 	 * tables with all PEs.
@@ -205,11 +224,11 @@
 		ttbrx |= TTBR_CNP_BIT;
 	}
 
-	if (ctx_cfg->region == VA_LOW_REGION) {
-		write_ttbr0_el2(ttbrx);
-	} else {
-		write_ttbr1_el2(ttbrx);
-	}
+	mmu_config->region = ctx_cfg->region;
+	mmu_config->mair = mair;
+	mmu_config->tcr = tcr;
+	mmu_config->txsz = txsz;
+	mmu_config->ttbrx = ttbrx;
 
 	return 0;
 }
diff --git a/lib/xlat/src/xlat_tables_core.c b/lib/xlat/src/xlat_tables_core.c
index 8fb8518..e4f6238 100644
--- a/lib/xlat/src/xlat_tables_core.c
+++ b/lib/xlat/src/xlat_tables_core.c
@@ -474,8 +474,6 @@
 	struct xlat_ctx_cfg *ctx_cfg;
 	struct xlat_ctx_tbls *ctx_tbls;
 
-	assert(!is_mmu_enabled());
-
 	ctx_cfg = ctx->cfg;
 	ctx_tbls = ctx->tbls;
 
@@ -518,15 +516,17 @@
 	}
 
 	/* Inv the cache as a good measure */
-	inv_dcache_range((uintptr_t)(void *)ctx_tbls->tables,
-			 sizeof(uint64_t) * (unsigned long)ctx_tbls->tables_num
-						* XLAT_TABLE_ENTRIES);
-
+	if (!is_mmu_enabled()) {
+		inv_dcache_range((uintptr_t)(void *)ctx_tbls->tables,
+				 sizeof(uint64_t) * (unsigned long)ctx_tbls->tables_num
+							* XLAT_TABLE_ENTRIES);
+	}
 	ctx_tbls->initialized = true;
 
-	inv_dcache_range((uintptr_t)(void *)ctx_tbls,
-			   sizeof(struct xlat_ctx_tbls));
-
+	if (!is_mmu_enabled()) {
+		inv_dcache_range((uintptr_t)(void *)ctx_tbls,
+				   sizeof(struct xlat_ctx_tbls));
+	}
 	xlat_tables_print(ctx);
 
 	return 0;
diff --git a/lib/xlat/tests/xlat_tests_base.h b/lib/xlat/tests/xlat_tests_base.h
index 7152020..ba20616 100644
--- a/lib/xlat/tests/xlat_tests_base.h
+++ b/lib/xlat/tests/xlat_tests_base.h
@@ -24,14 +24,12 @@
 void xlat_ctx_cfg_init_tc11(void);
 void xlat_ctx_cfg_init_tc12(void);
 void xlat_ctx_cfg_init_tc13(void);
-void xlat_ctx_cfg_init_tc14(void);
 
 void xlat_ctx_init_tc1(void);
 void xlat_ctx_init_tc2(void);
 void xlat_ctx_init_tc3(void);
 void xlat_ctx_init_tc4(void);
 void xlat_ctx_init_tc5(void);
-void xlat_ctx_init_tc6(void);
 
 void xlat_get_llt_from_va_tc1(void);
 void xlat_get_llt_from_va_tc2(void);
@@ -62,6 +60,7 @@
 void xlat_arch_setup_mmu_cfg_tc4(void);
 void xlat_arch_setup_mmu_cfg_tc5(void);
 void xlat_arch_setup_mmu_cfg_tc6(void);
+void xlat_arch_setup_mmu_cfg_tc7(void);
 
 void xlat_get_oa_from_tte_tc1(void);
 
diff --git a/lib/xlat/tests/xlat_tests_base_g1.cpp b/lib/xlat/tests/xlat_tests_base_g1.cpp
index 8a4f62c..8c2fa81 100644
--- a/lib/xlat/tests/xlat_tests_base_g1.cpp
+++ b/lib/xlat/tests/xlat_tests_base_g1.cpp
@@ -600,7 +600,7 @@
 	 *	- 'base_pa' < maximum supported PA && 'base_pa' + 'size'
 	 *	  > maximum supported PA
 	 *	- PAs on different memory regions overlap.
-	 *	- Some memory ragions have misaligned PAs.
+	 *	- Some memory regions have misaligned PAs.
 	 ***************************************************************/
 
 	index = ARRAY_SIZE(pa_range_bits_arr);
@@ -1004,42 +1004,6 @@
 	}
 }
 
-void xlat_ctx_cfg_init_tc14(void)
-{
-	struct xlat_ctx_cfg cfg;
-	uintptr_t start_va, end_va;
-	struct xlat_mmap_region init_mmap;
-	uint64_t max_va_size = XLAT_TEST_MAX_VA_SIZE();
-
-	/***************************************************************
-	 * TEST CASE 14:
-	 *
-	 * Try to initialize the xlat_ctx_cfg structure with the MMU
-	 * enabled.
-	 *
-	 ***************************************************************/
-
-	/* Emulate the MMU enabled */
-	write_sctlr_el2(SCTLR_ELx_WXN_BIT | SCTLR_ELx_M_BIT);
-
-	/* Clean the data structure */
-	memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
-
-	/* VA space boundaries */
-	start_va = xlat_test_helpers_get_start_va(VA_LOW_REGION, max_va_size);
-	end_va = start_va + max_va_size - 1UL;
-
-	xlat_test_helpers_rand_mmap_array(&init_mmap, 1U, start_va, end_va);
-
-	test_helpers_expect_assert_fail(true);
-
-	/* Initialize the test structure */
-	(void)xlat_ctx_cfg_init(&cfg, VA_LOW_REGION, &init_mmap, 1U,
-				max_va_size);
-
-	test_helpers_fail_if_no_assert_failed();
-}
-
 void xlat_ctx_init_tc1(void)
 {
 	struct xlat_ctx ctx;
@@ -1300,53 +1264,3 @@
 		test_helpers_fail_if_no_assert_failed();
 	}
 }
-
-void xlat_ctx_init_tc5(void)
-{
-	struct xlat_ctx ctx;
-	struct xlat_ctx_tbls tbls;
-	struct xlat_ctx_cfg cfg;
-	uintptr_t start_va, end_va;
-	int retval;
-	xlat_addr_region_id_t va_region;
-	uint64_t max_va_size = XLAT_TEST_MAX_VA_SIZE();
-	struct xlat_mmap_region init_mmap;
-
-	/***************************************************************
-	 * TEST CASE 5:
-	 *
-	 * Try to initialize a context with a valid random memory map
-	 * and the MMU enabled.
-	 *
-	 ***************************************************************/
-	va_region = (xlat_addr_region_id_t)test_helpers_get_rand_in_range(0UL,
-							VA_REGIONS - 1U);
-
-	/* Clean the data structures */
-	memset((void *)&ctx, 0, sizeof(struct xlat_ctx));
-	memset((void *)&cfg, 0, sizeof(struct xlat_ctx_cfg));
-	memset((void *)&tbls, 0, sizeof(struct xlat_ctx_tbls));
-
-	/* VA space boundaries */
-	start_va = xlat_test_helpers_get_start_va(va_region, max_va_size);
-	end_va = start_va + max_va_size - 1UL;
-
-	xlat_test_helpers_rand_mmap_array(&init_mmap, 1U, start_va, end_va);
-
-	/* Initialize the test structure */
-	retval = xlat_ctx_cfg_init(&cfg, va_region, &init_mmap, 1U, max_va_size);
-
-	/* Verify that the context cfg is properly created */
-	CHECK_TRUE(retval == 0);
-
-	/* Force the MMU enablement */
-	xlat_enable_mmu_el2();
-
-	test_helpers_expect_assert_fail(true);
-
-	/* Test xlat_ctx_init() with MMU Enabled */
-	(void)xlat_ctx_init(&ctx, &cfg, &tbls, xlat_test_helpers_tbls(),
-			    XLAT_TESTS_MAX_TABLES);
-
-	test_helpers_fail_if_no_assert_failed();
-}
diff --git a/lib/xlat/tests/xlat_tests_base_g2.cpp b/lib/xlat/tests/xlat_tests_base_g2.cpp
index 1360403..50dd999 100644
--- a/lib/xlat/tests/xlat_tests_base_g2.cpp
+++ b/lib/xlat/tests/xlat_tests_base_g2.cpp
@@ -273,7 +273,7 @@
 	}
 }
 
-void xlat_ctx_init_tc6(void)
+void xlat_ctx_init_tc5(void)
 {
 	struct xlat_ctx ctx;
 	struct xlat_ctx_cfg cfg;
@@ -288,7 +288,7 @@
 	int base_lvl, end_lvl;
 
 	/**********************************************************************
-	 * TEST CASE 6:
+	 * TEST CASE 5:
 	 *
 	 * For each possible base level, create a set of mmap regions
 	 * ranging from level 1 or 0 (lowest level at which a valid walk can
@@ -2149,6 +2149,7 @@
 		PARANGE_0110_WIDTH
 	};
 	uint64_t max_va_size = XLAT_TEST_MAX_VA_SIZE();
+	struct xlat_mmu_cfg mmu_config;
 
 	/***************************************************************
 	 * TEST CASE 1:
@@ -2195,11 +2196,14 @@
 		CHECK_TRUE(retval == 0);
 
 		/* Initialize MMU for the given context */
-		retval = xlat_arch_setup_mmu_cfg(&ctx[i]);
+		retval = xlat_arch_setup_mmu_cfg(&ctx[i], &mmu_config);
 
 		/* Verify that the MMU has been configured */
 		CHECK_TRUE(retval == 0);
 
+		/* Write the MMU config for the given context */
+		xlat_arch_write_mmu_cfg(&mmu_config);
+
 		/* Validate TTBR_EL2 for each context */
 		validate_ttbrx_el2(&ctx[i]);
 	}
@@ -2217,6 +2221,7 @@
 	int retval;
 	struct xlat_mmap_region init_mmap;
 	uint64_t max_va_size =	XLAT_TEST_MAX_VA_SIZE();
+	struct xlat_mmu_cfg mmu_config;
 
 	/***************************************************************
 	 * TEST CASE 2:
@@ -2259,7 +2264,7 @@
 	ctx.cfg->initialized = false;
 
 	/* Try to initialize MMU for the given context */
-	retval = xlat_arch_setup_mmu_cfg(&ctx);
+	retval = xlat_arch_setup_mmu_cfg(&ctx, &mmu_config);
 
 	/* Verify that the MMU has failed to be initialized */
 	CHECK_TRUE(retval == -EINVAL);
@@ -2274,7 +2279,7 @@
 				ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED));
 
 	/* Try to initialize MMU for the given context */
-	retval = xlat_arch_setup_mmu_cfg(&ctx);
+	retval = xlat_arch_setup_mmu_cfg(&ctx, &mmu_config);
 
 	/* Verify that the MMU has failed to be initialized */
 	CHECK_TRUE(retval == -EPERM);
@@ -2291,10 +2296,10 @@
 				ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED));
 
 	/* Try to initialize MMU for the given context */
-	retval = xlat_arch_setup_mmu_cfg(&ctx);
+	retval = xlat_arch_setup_mmu_cfg(&ctx, &mmu_config);
 
 	/* Verify that the MMU has failed to be initialized */
-	CHECK_TRUE(retval == -ENOMEM);
+	CHECK_TRUE(retval == -EPERM);
 
 }
 
@@ -2306,8 +2311,10 @@
 	 * Test xlat_arch_setup_mmu_cfg() with a NULL context.
 	 ***************************************************************/
 
+	struct xlat_mmu_cfg mmu_config;
+
 	test_helpers_expect_assert_fail(true);
-	(void)xlat_arch_setup_mmu_cfg(NULL);
+	(void)xlat_arch_setup_mmu_cfg(NULL, &mmu_config);
 	test_helpers_fail_if_no_assert_failed();
 }
 
@@ -2328,13 +2335,15 @@
 	 * in order to generate an initial valid context.
 	 ***************************************************************/
 
+	struct xlat_mmu_cfg mmu_config;
+
 	xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
 
 	/* Force the context configuration to NULL */
 	ctx.cfg = NULL;
 
 	test_helpers_expect_assert_fail(true);
-	(void)xlat_arch_setup_mmu_cfg(&ctx);
+	(void)xlat_arch_setup_mmu_cfg(&ctx, &mmu_config);
 	test_helpers_fail_if_no_assert_failed();
 }
 
@@ -2355,13 +2364,15 @@
 	 * in order to generate an initial valid context.
 	 ***************************************************************/
 
+	struct xlat_mmu_cfg mmu_config;
+
 	xlat_get_llt_from_va_prepare_assertion(&ctx, &cfg, &tbls, &init_mmap);
 
 	/* Force the context tables structure to NULL */
 	ctx.tbls = NULL;
 
 	test_helpers_expect_assert_fail(true);
-	(void)xlat_arch_setup_mmu_cfg(&ctx);
+	(void)xlat_arch_setup_mmu_cfg(&ctx, &mmu_config);
 	test_helpers_fail_if_no_assert_failed();
 }
 
@@ -2374,6 +2385,7 @@
 	int retval;
 	struct xlat_mmap_region init_mmap;
 	uint64_t max_va_size =	XLAT_TEST_MAX_VA_SIZE();
+	struct xlat_mmu_cfg mmu_config;
 
 	/***************************************************************
 	 * TEST CASE 6:
@@ -2408,9 +2420,28 @@
 
 	test_helpers_expect_assert_fail(true);
 
-	/* Try to initialize MMU for the given context */
-	retval = xlat_arch_setup_mmu_cfg(&ctx);
+	/* Initialize MMU config for the given context */
+	retval = xlat_arch_setup_mmu_cfg(&ctx, &mmu_config);
+	CHECK_TRUE(retval == 0);
 
+	/* Try to write the MMU config for the given context */
+	xlat_arch_write_mmu_cfg(&mmu_config);
+
+	test_helpers_fail_if_no_assert_failed();
+}
+
+void xlat_arch_setup_mmu_cfg_tc7(void)
+{
+	/***************************************************************
+	 * TEST CASE 7:
+	 *
+	 * Test xlat_arch_setup_mmu_cfg() with a NULL config.
+	 ***************************************************************/
+
+	struct xlat_ctx ctx;
+
+	test_helpers_expect_assert_fail(true);
+	(void)xlat_arch_setup_mmu_cfg(&ctx, NULL);
 	test_helpers_fail_if_no_assert_failed();
 }
 
diff --git a/lib/xlat/tests/xlat_tests_lpa2.cpp b/lib/xlat/tests/xlat_tests_lpa2.cpp
index 8646bfc..1a63d5d 100644
--- a/lib/xlat/tests/xlat_tests_lpa2.cpp
+++ b/lib/xlat/tests/xlat_tests_lpa2.cpp
@@ -102,11 +102,6 @@
 	xlat_ctx_cfg_init_tc12();
 }
 
-ASSERT_TEST(xlat_tests_LPA2, xlat_ctx_cfg_init_TC14)
-{
-	xlat_ctx_cfg_init_tc14();
-}
-
 TEST(xlat_tests_LPA2, xlat_ctx_init_TC1)
 {
 	xlat_ctx_init_tc1();
@@ -127,16 +122,11 @@
 	xlat_ctx_init_tc4();
 }
 
-ASSERT_TEST(xlat_tests_LPA2, xlat_ctx_init_TC5)
+TEST(xlat_tests_LPA2, xlat_ctx_init_TC5)
 {
 	xlat_ctx_init_tc5();
 }
 
-TEST(xlat_tests_LPA2, xlat_ctx_init_TC6)
-{
-	xlat_ctx_init_tc6();
-}
-
 TEST(xlat_tests_LPA2, xlat_get_llt_from_va_TC1)
 {
 	xlat_get_llt_from_va_tc1();
@@ -262,6 +252,11 @@
 	xlat_arch_setup_mmu_cfg_tc6();
 }
 
+ASSERT_TEST(xlat_tests_LPA2, xlat_arch_setup_mmu_cfg_TC7)
+{
+	xlat_arch_setup_mmu_cfg_tc7();
+}
+
 TEST(xlat_tests_LPA2, xlat_get_oa_from_tte_TC1)
 {
 	xlat_get_oa_from_tte_tc1();
diff --git a/lib/xlat/tests/xlat_tests_no_lpa2.cpp b/lib/xlat/tests/xlat_tests_no_lpa2.cpp
index 500c08b..548e01c 100644
--- a/lib/xlat/tests/xlat_tests_no_lpa2.cpp
+++ b/lib/xlat/tests/xlat_tests_no_lpa2.cpp
@@ -107,11 +107,6 @@
 	xlat_ctx_cfg_init_tc13();
 }
 
-ASSERT_TEST(xlat_tests_no_LPA2, xlat_ctx_cfg_init_TC14)
-{
-	xlat_ctx_cfg_init_tc14();
-}
-
 TEST(xlat_tests_no_LPA2, xlat_ctx_init_TC1)
 {
 	xlat_ctx_init_tc1();
@@ -132,16 +127,11 @@
 	xlat_ctx_init_tc4();
 }
 
-ASSERT_TEST(xlat_tests_no_LPA2, xlat_ctx_init_TC5)
+TEST(xlat_tests_no_LPA2, xlat_ctx_init_TC5)
 {
 	xlat_ctx_init_tc5();
 }
 
-TEST(xlat_tests_no_LPA2, xlat_ctx_init_TC6)
-{
-	xlat_ctx_init_tc6();
-}
-
 TEST(xlat_tests_no_LPA2, xlat_get_llt_from_va_TC1)
 {
 	xlat_get_llt_from_va_tc1();
@@ -267,6 +257,11 @@
 	xlat_arch_setup_mmu_cfg_tc6();
 }
 
+ASSERT_TEST(xlat_tests_no_LPA2, xlat_arch_setup_mmu_cfg_TC7)
+{
+	xlat_arch_setup_mmu_cfg_tc7();
+}
+
 TEST(xlat_tests_no_LPA2, xlat_get_oa_from_tte_TC1)
 {
 	xlat_get_oa_from_tte_tc1();