[REFACTOR] Hafnium style fixes and other minor fixes.

Change-Id: I8f10a1d82f0de9efc43894a3a7cdd09bbbcfc6ec
diff --git a/src/api.c b/src/api.c
index a5d6ef8..7aa573c 100644
--- a/src/api.c
+++ b/src/api.c
@@ -59,7 +59,7 @@
 }
 
 /**
- * Switches the physical CPU back to the corresponding vcpu of the primary VM.
+ * Switches the physical CPU back to the corresponding vCPU of the primary VM.
  *
  * This triggers the scheduling logic to run. Run in the context of secondary VM
  * to cause SPCI_RUN to return and the primary VM to regain control of the CPU.
@@ -110,7 +110,7 @@
 	/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
 	arch_regs_set_retval(&next->regs, primary_ret);
 
-	/* Mark the current vcpu as waiting. */
+	/* Mark the current vCPU as waiting. */
 	sl_lock(&current->lock);
 	current->state = secondary_state;
 	sl_unlock(&current->lock);
@@ -119,7 +119,7 @@
 }
 
 /**
- * Returns to the primary vm and signals that the vcpu still has work to do so.
+ * Returns to the primary VM and signals that the vCPU still has work to do so.
  */
 struct vcpu *api_preempt(struct vcpu *current)
 {
@@ -132,7 +132,7 @@
 }
 
 /**
- * Puts the current vcpu in wait for interrupt mode, and returns to the primary
+ * Puts the current vCPU in wait for interrupt mode, and returns to the primary
  * vm.
  */
 struct vcpu *api_wait_for_interrupt(struct vcpu *current)
@@ -166,8 +166,8 @@
 }
 
 /**
- * Returns to the primary vm to allow this cpu to be used for other tasks as the
- * vcpu does not have work to do at this moment. The current vcpu is marked as
+ * Returns to the primary VM to allow this CPU to be used for other tasks as the
+ * vCPU does not have work to do at this moment. The current vCPU is marked as
  * ready to be scheduled again.
  */
 void api_yield(struct vcpu *current, struct vcpu **next)
@@ -178,7 +178,7 @@
 	};
 
 	if (current->vm->id == HF_PRIMARY_VM_ID) {
-		/* Noop on the primary as it makes the scheduling decisions. */
+		/* NOOP on the primary as it makes the scheduling decisions. */
 		return;
 	}
 
@@ -249,7 +249,7 @@
 {
 	struct vm *vm;
 
-	/* Only the primary VM needs to know about vcpus for scheduling. */
+	/* Only the primary VM needs to know about vCPUs for scheduling. */
 	if (current->vm->id != HF_PRIMARY_VM_ID) {
 		return 0;
 	}
@@ -264,8 +264,8 @@
 
 /**
  * This function is called by the architecture-specific context switching
- * function to indicate that register state for the given vcpu has been saved
- * and can therefore be used by other pcpus.
+ * function to indicate that register state for the given vCPU has been saved
+ * and can therefore be used by other pCPUs.
  */
 void api_regs_state_saved(struct vcpu *vcpu)
 {
@@ -375,7 +375,7 @@
 }
 
 /**
- * Prepares the vcpu to run by updating its state and fetching whether a return
+ * Prepares the vCPU to run by updating its state and fetching whether a return
  * value needs to be forced onto the vCPU.
  */
 static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
@@ -522,13 +522,13 @@
 	struct vcpu *vcpu;
 	struct spci_value ret = spci_error(SPCI_INVALID_PARAMETERS);
 
-	/* Only the primary VM can switch vcpus. */
+	/* Only the primary VM can switch vCPUs. */
 	if (current->vm->id != HF_PRIMARY_VM_ID) {
 		ret.arg2 = SPCI_DENIED;
 		goto out;
 	}
 
-	/* Only secondary VM vcpus can be run. */
+	/* Only secondary VM vCPUs can be run. */
 	if (vm_id == HF_PRIMARY_VM_ID) {
 		goto out;
 	}
@@ -1088,7 +1088,7 @@
 
 	/*
 	 * The primary VM will receive messages as a status code from running
-	 * vcpus and must not call this function.
+	 * vCPUs and must not call this function.
 	 */
 	if (vm->id == HF_PRIMARY_VM_ID) {
 		return spci_error(SPCI_NOT_SUPPORTED);
@@ -1377,7 +1377,7 @@
 	}
 
 	if (target_vcpu_idx >= target_vm->vcpu_count) {
-		/* The requested vcpu must exist. */
+		/* The requested vCPU must exist. */
 		return -1;
 	}
 
@@ -1387,7 +1387,7 @@
 
 	target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
 
-	dlog("Injecting IRQ %d for VM %d VCPU %d from VM %d VCPU %d\n", intid,
+	dlog("Injecting IRQ %d for VM %d vCPU %d from VM %d vCPU %d\n", intid,
 	     target_vm_id, target_vcpu_idx, current->vm->id, current->cpu->id);
 	return internal_interrupt_inject(target_vcpu, intid, current, next);
 }
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 2f47c00..ed218cb 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -18,7 +18,7 @@
 #include "exception_macros.S"
 
 /**
- * Saves the volatile registers into the register buffer of the current vcpu.
+ * Saves the volatile registers into the register buffer of the current vCPU.
  */
 .macro save_volatile_to_vcpu
 	/*
@@ -27,7 +27,7 @@
 	 */
 	str x18, [sp, #-16]!
 
-	/* Get the current vcpu. */
+	/* Get the current vCPU. */
 	mrs x18, tpidr_el2
 	stp x0, x1, [x18, #VCPU_REGS + 8 * 0]
 	stp x2, x3, [x18, #VCPU_REGS + 8 * 2]
@@ -40,7 +40,7 @@
 	stp x16, x17, [x18, #VCPU_REGS + 8 * 16]
 	stp x29, x30, [x18, #VCPU_REGS + 8 * 29]
 
-	/* x18 was saved on the stack, so we move it to vcpu regs buffer. */
+	/* x18 was saved on the stack, so we move it to vCPU regs buffer. */
 	ldr x0, [sp], #16
 	str x0, [x18, #VCPU_REGS + 8 * 18]
 
@@ -52,11 +52,11 @@
 
 /**
  * This is a generic handler for exceptions taken at a lower EL. It saves the
- * volatile registers to the current vcpu and calls the C handler, which can
+ * volatile registers to the current vCPU and calls the C handler, which can
  * select one of two paths: (a) restore volatile registers and return, or
- * (b) switch to a different vcpu. In the latter case, the handler needs to save
+ * (b) switch to a different vCPU. In the latter case, the handler needs to save
  * all non-volatile registers (they haven't been saved yet), then restore all
- * registers from the new vcpu.
+ * registers from the new vCPU.
  */
 .macro lower_exception handler:req
 	save_volatile_to_vcpu
@@ -64,10 +64,10 @@
 	/* Call C handler. */
 	bl \handler
 
-	/* Switch vcpu if requested by handler. */
+	/* Switch vCPU if requested by handler. */
 	cbnz x0, vcpu_switch
 
-	/* vcpu is not changing. */
+	/* vCPU is not changing. */
 	mrs x0, tpidr_el2
 	b vcpu_restore_volatile_and_run
 .endm
@@ -90,10 +90,10 @@
 	mrs x0, esr_el2
 	bl sync_lower_exception
 
-	/* Switch vcpu if requested by handler. */
+	/* Switch vCPU if requested by handler. */
 	cbnz x0, vcpu_switch
 
-	/* vcpu is not changing. */
+	/* vCPU is not changing. */
 	mrs x0, tpidr_el2
 	b vcpu_restore_volatile_and_run
 .endm
@@ -180,7 +180,7 @@
 	 * can clobber non-volatile registers that are used by the msr/mrs,
 	 * which results in the wrong value being read or written.
 	 */
-	/* Get the current vcpu. */
+	/* Get the current vCPU. */
 	mrs x18, tpidr_el2
 	stp x19, x20, [x18, #VCPU_REGS + 8 * 19]
 	stp x21, x22, [x18, #VCPU_REGS + 8 * 21]
@@ -193,18 +193,18 @@
 	bl handle_system_register_access
 	cbnz x0, vcpu_switch
 
-	/* vcpu is not changing. */
+	/* vCPU is not changing. */
 	mrs x0, tpidr_el2
 	b vcpu_restore_nonvolatile_and_run
 
 /**
- * Switch to a new vcpu.
+ * Switch to a new vCPU.
  *
- * All volatile registers from the old vcpu have already been saved. We need
- * to save only non-volatile ones from the old vcpu, and restore all from the
+ * All volatile registers from the old vCPU have already been saved. We need
+ * to save only non-volatile ones from the old vCPU, and restore all from the
  * new one.
  *
- * x0 is a pointer to the new vcpu.
+ * x0 is a pointer to the new vCPU.
  */
 vcpu_switch:
 	/* Save non-volatile registers. */
@@ -320,7 +320,7 @@
 	mrs x4, fpcr
 	stp x3, x4, [x28], #32
 
-	/* Save new vcpu pointer in non-volatile register. */
+	/* Save new vCPU pointer in non-volatile register. */
 	mov x19, x0
 
 	/*
@@ -334,7 +334,7 @@
 	/* Intentional fallthrough. */
 .global vcpu_restore_all_and_run
 vcpu_restore_all_and_run:
-	/* Update pointer to current vcpu. */
+	/* Update pointer to current vCPU. */
 	msr tpidr_el2, x0
 
 	/* Restore peripheral registers. */
@@ -495,9 +495,9 @@
 
 	/* Intentional fallthrough. */
 /**
- * Restore volatile registers and run the given vcpu.
+ * Restore volatile registers and run the given vCPU.
  *
- * x0 is a pointer to the target vcpu.
+ * x0 is a pointer to the target vCPU.
  */
 vcpu_restore_volatile_and_run:
 	ldp x4, x5, [x0, #VCPU_REGS + 8 * 4]
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 21c6d94..53ae4d0 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -306,8 +306,8 @@
 			  args->arg4, args->arg5, args->arg6, args->arg7);
 
 	/*
-	 * Preserve the value passed by the caller, rather than the client_id we
-	 * generated. Note that this would also overwrite any return value that
+	 * Preserve the value passed by the caller, rather than the generated
+	 * client_id. Note that this would also overwrite any return value that
 	 * may be in x7, but the SMCs that we are forwarding are legacy calls
 	 * from before SMCCC 1.2 so won't have more than 4 return values anyway.
 	 */
@@ -731,7 +731,7 @@
 }
 
 /**
- * Handles EC = 011000, msr, mrs instruction traps.
+ * Handles EC = 011000, MSR, MRS instruction traps.
  * Returns non-null ONLY if the access failed and the vcpu is changing.
  */
 struct vcpu *handle_system_register_access(uintreg_t esr_el2)
diff --git a/src/arch/aarch64/hypervisor/vm.c b/src/arch/aarch64/hypervisor/vm.c
index 97c8746..619ee65 100644
--- a/src/arch/aarch64/hypervisor/vm.c
+++ b/src/arch/aarch64/hypervisor/vm.c
@@ -40,7 +40,7 @@
 		vm->arch.trapped_features |= HF_FEATURE_PERFMON;
 
 		/*
-		 * TODO(b/132395845):  Access to RAS registers is not trapped at
+		 * TODO(b/132395845): Access to RAS registers is not trapped at
 		 * the moment for the primary VM, only for the secondaries. RAS
 		 * register access isn't needed now, but it might be
 		 * required for debugging. When Hafnium introduces debug vs
diff --git a/src/cpu.c b/src/cpu.c
index 92d17fd..f8beed6 100644
--- a/src/cpu.c
+++ b/src/cpu.c
@@ -46,7 +46,7 @@
  * TOCTOU issues while Hafnium performs actions on information that would
  * otherwise be re-writable by the VM.
  *
- * Each buffer is owned by a single cpu. The buffer can only be used for
+ * Each buffer is owned by a single CPU. The buffer can only be used for
  * spci_msg_send. The information stored in the buffer is only valid during the
  * spci_msg_send request is performed.
  */
@@ -158,7 +158,7 @@
 }
 
 /**
- * Searches for a CPU based on its id.
+ * Searches for a CPU based on its ID.
  */
 struct cpu *cpu_find(cpu_id_t id)
 {
diff --git a/src/load.c b/src/load.c
index 9f311a9..d69b36d 100644
--- a/src/load.c
+++ b/src/load.c
@@ -234,7 +234,7 @@
 		goto out;
 	}
 
-	dlog("Loaded with %u vcpus, entry at %#x.\n",
+	dlog("Loaded with %u vCPUs, entry at %#x.\n",
 	     manifest_vm->secondary.vcpu_count, pa_addr(mem_begin));
 
 	vcpu = vm_get_vcpu(vm, 0);
diff --git a/src/vm.c b/src/vm.c
index 3d71c51..856509f 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -64,7 +64,7 @@
 		list_init(&vm->wait_entries[i].ready_links);
 	}
 
-	/* Do basic initialization of vcpus. */
+	/* Do basic initialization of vCPUs. */
 	for (i = 0; i < vcpu_count; i++) {
 		vcpu_init(vm_get_vcpu(vm, i), vm);
 	}