Add support for accessing EL1 debug registers

For now, the primary vm can access all debug registers, whereas secondary vms
cannot.  Debug event exceptions are disabled for secondary vms, so a malicious
primary cannot have active breakpoints or watchpoints for secondary vms.

This code allows us in the future to add debug support to secondary vms, and to
have fine-grained control over which registers are allowed, either by the
primary or secondary, as well as change the behavior for such accesses.

Bug: 132422368
Change-Id: I616454cc12bea6b8dfebbbdb566ac64c0a6625c2
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 6a8309f..1c4d642 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -55,6 +55,54 @@
 .endm
 
 /**
+ * Save all general purpose registers into register buffer of current vcpu.
+ */
+.macro save_registers_to_vcpu
+	save_volatile_to_vcpu also_save_x18
+	stp x19, x20, [x18, #VCPU_REGS + 8 * 19]
+	stp x21, x22, [x18, #VCPU_REGS + 8 * 21]
+	stp x23, x24, [x18, #VCPU_REGS + 8 * 23]
+	stp x25, x26, [x18, #VCPU_REGS + 8 * 25]
+	stp x27, x28, [x18, #VCPU_REGS + 8 * 27]
+.endm
+
+/**
+ * Restore the volatile registers from the register buffer of the current vcpu.
+ */
+.macro restore_volatile_from_vcpu vcpu_ptr:req
+	ldp x4, x5, [\vcpu_ptr, #VCPU_REGS + 8 * 4]
+	ldp x6, x7, [\vcpu_ptr, #VCPU_REGS + 8 * 6]
+	ldp x8, x9, [\vcpu_ptr, #VCPU_REGS + 8 * 8]
+	ldp x10, x11, [\vcpu_ptr, #VCPU_REGS + 8 * 10]
+	ldp x12, x13, [\vcpu_ptr, #VCPU_REGS + 8 * 12]
+	ldp x14, x15, [\vcpu_ptr, #VCPU_REGS + 8 * 14]
+	ldp x16, x17, [\vcpu_ptr, #VCPU_REGS + 8 * 16]
+	ldr x18, [\vcpu_ptr, #VCPU_REGS + 8 * 18]
+	ldp x29, x30, [\vcpu_ptr, #VCPU_REGS + 8 * 29]
+
+	/* Restore return address & mode. */
+	ldp x1, x2, [\vcpu_ptr, #VCPU_REGS + 8 * 31]
+	msr elr_el2, x1
+	msr spsr_el2, x2
+
+	/* Restore x0..x3, which we have used as scratch before. */
+	ldp x2, x3, [\vcpu_ptr, #VCPU_REGS + 8 * 2]
+	ldp x0, x1, [\vcpu_ptr, #VCPU_REGS + 8 * 0]
+.endm
+
+/**
+ * Restore all general purpose registers from register buffer of current vcpu.
+ */
+.macro restore_registers_from_vcpu vcpu_ptr:req
+	ldp x19, x20, [\vcpu_ptr, #VCPU_REGS + 8 * 19]
+	ldp x21, x22, [\vcpu_ptr, #VCPU_REGS + 8 * 21]
+	ldp x23, x24, [\vcpu_ptr, #VCPU_REGS + 8 * 23]
+	ldp x25, x26, [\vcpu_ptr, #VCPU_REGS + 8 * 25]
+	ldp x27, x28, [\vcpu_ptr, #VCPU_REGS + 8 * 27]
+	restore_volatile_from_vcpu \vcpu_ptr
+.endm
+
+/**
  * This is a generic handler for exceptions taken at a lower EL. It saves the
  * volatile registers to the current vcpu and calls the C handler, which can
  * select one of two paths: (a) restore volatile registers and return, or
@@ -91,8 +139,8 @@
 	lsr x18, x18, #26
 
 	/* Take the slow path if exception is not due to an HVC instruction. */
-	sub x18, x18, #0x16
-	cbnz x18, slow_sync_lower
+	cmp x18, #0x16
+	b.ne slow_sync_lower
 
 	/*
 	 * Save x29 and x30, which are not saved by the callee, then jump to
@@ -193,6 +241,10 @@
 
 .balign 0x40
 slow_sync_lower:
+	/* Take the system register path for EC 0x18 */
+	cmp x18, #0x18
+	b.eq handle_system_register_access_s
+
 	/* The caller must have saved x18, so we don't save it here. */
 	save_volatile_to_vcpu
 
@@ -316,6 +368,9 @@
 	mrs x5, mdcr_el2
 	stp x4, x5, [x28], #16
 
+	mrs x6, mdscr_el1
+	str x6, [x28], #16
+
 	/* Save GIC registers. */
 #if GIC_VERSION == 3 || GIC_VERSION == 4
 	/* Offset is too large, so start from a new base. */
@@ -472,6 +527,9 @@
 	msr vttbr_el2, x4
 	msr mdcr_el2, x5
 
+	ldr x6, [x28], #16
+	msr mdscr_el1, x6
+
 	/* Restore GIC registers. */
 #if GIC_VERSION == 3 || GIC_VERSION == 4
 	/* Offset is too large, so start from a new base. */
@@ -506,30 +564,35 @@
  * x0 is a pointer to the target vcpu.
  */
 vcpu_restore_volatile_and_run:
-	ldp x4, x5, [x0, #VCPU_REGS + 8 * 4]
-	ldp x6, x7, [x0, #VCPU_REGS + 8 * 6]
-	ldp x8, x9, [x0, #VCPU_REGS + 8 * 8]
-	ldp x10, x11, [x0, #VCPU_REGS + 8 * 10]
-	ldp x12, x13, [x0, #VCPU_REGS + 8 * 12]
-	ldp x14, x15, [x0, #VCPU_REGS + 8 * 14]
-	ldp x16, x17, [x0, #VCPU_REGS + 8 * 16]
-	ldr x18, [x0, #VCPU_REGS + 8 * 18]
-	ldp x29, x30, [x0, #VCPU_REGS + 8 * 29]
-
-	/* Restore return address & mode. */
-	ldp x1, x2, [x0, #VCPU_REGS + 8 * 31]
-	msr elr_el2, x1
-	msr spsr_el2, x2
-
-	/* Restore x0..x3, which we have used as scratch before. */
-	ldp x2, x3, [x0, #VCPU_REGS + 8 * 2]
-	ldp x0, x1, [x0, #VCPU_REGS + 8 * 0]
+	restore_volatile_from_vcpu x0
 	eret
 
 .balign 0x40
 /**
- * Restores volatile registers from stack and returns.
+ * Restore volatile registers from stack and return to original caller.
  */
 restore_from_stack_and_return:
 	restore_volatile_from_stack el2
 	eret
+
+.balign 0x40
+/**
+ * Handle accesses to system registers (EC=0x18) and return to original caller.
+ */
+handle_system_register_access_s:
+	/*
+	 * All registers are (conservatively) saved because the handler can
+	 * clobber non-volatile registers that are used by the msr/mrs, which
+	 * results in the wrong value being read or written.
+	 */
+	save_registers_to_vcpu
+
+	/* Read syndrome register and call C handler. */
+	mrs x0, esr_el2
+	bl handle_system_register_access
+	cbnz x0, vcpu_switch
+
+	/* vcpu is not changing. */
+	mrs x0, tpidr_el2
+	restore_registers_from_vcpu x0
+	eret