blob: 85646c6fa20830166cd47418fdf86d77306b0ff6 [file] [log] [blame]
Wedson Almeida Filho987c0ff2018-06-20 16:34:38 +01001#include "cpu.h"
2
3#include "arch_cpu.h"
4#include "dlog.h"
5#include "std.h"
6#include "timer.h"
7#include "vm.h"
8
9struct new_old_vcpu {
10 struct vcpu *new;
11 struct vcpu *old;
12};
13
14void cpu_init(struct cpu *c)
15{
16 /* TODO: Assumes that c is zeroed out already. */
17 sl_init(&c->lock);
18 list_init(&c->ready_queue);
19 c->irq_disable_count = 1;
20}
21
22void cpu_irq_enable(struct cpu *c)
23{
24 c->irq_disable_count--;
25 if (!c->irq_disable_count)
26 arch_irq_enable();
27}
28
29void cpu_irq_disable(struct cpu *c)
30{
31 if (!c->irq_disable_count)
32 arch_irq_disable();
33 c->irq_disable_count++;
34}
35
36void cpu_on(struct cpu *c)
37{
38 sl_lock(&c->lock);
39 if (!c->cpu_on_count) {
40 /* The CPU is currently off, we need to turn it on. */
41 arch_cpu_on(c->id, c);
42 }
43 c->cpu_on_count++;
44 sl_unlock(&c->lock);
45}
46
47/*
48 * This must be called only from the same CPU.
49 */
50void cpu_off(struct cpu *c)
51{
52 bool on;
53
54 sl_lock(&c->lock);
55 c->cpu_on_count--;
56 on = c->cpu_on_count > 0;
57 sl_unlock(&c->lock);
58
59 if (!on)
60 arch_cpu_off();
61}
62
63void vcpu_ready(struct vcpu *v)
64{
65 struct cpu *c = v->cpu;
66
67 sl_lock(&c->lock);
68 if (!v->is_runnable) {
69 v->is_runnable = true;
70 list_append(&c->ready_queue, &v->links);
71 /* TODO: Send IPI to cpu if needed. */
72 }
73 sl_unlock(&c->lock);
74}
75
76void vcpu_unready(struct vcpu *v)
77{
78 struct cpu *c = v->cpu;
79
80 sl_lock(&c->lock);
81 if (v->is_runnable) {
82 v->is_runnable = false;
83 list_remove(&v->links);
84 }
85 sl_unlock(&c->lock);
86}
87
88#if 0
89static bool cpu_schedule_next(void *ctx)
90{
91 /* Indicate that a new vcpu should be chosen. */
92 return true;
93}
94#endif
95
96struct new_old_vcpu cpu_next_vcpu(void)
97{
98 struct cpu *c = cpu();
99 struct new_old_vcpu ret;
100 struct vcpu *next;
101 bool switch_mm;
102
103 /* TODO: Check if too soon. */
104
105 sl_lock(&c->lock);
106
107 ret.old = c->current;
108 if (list_empty(&c->ready_queue)) {
109 bool first = true;
110 c->current = NULL;
111 do {
112 sl_unlock(&c->lock);
113 /* TODO: Implement this. Enable irqs. */
114 if (first) {
115 dlog("CPU%d waiting for work...\n", c->id);
116 first = false;
117 }
118 sl_lock(&c->lock);
119 } while (list_empty(&c->ready_queue));
120 dlog("CPU%d found work!\n", c->id);
121 }
122
123 next = LIST_ELEM(c->ready_queue.next, struct vcpu, links);
124 if (next->links.next != &c->ready_queue) {
125 /* Move new vcpu to the end of ready queue. */
126 list_remove(&next->links);
127 list_append(&c->ready_queue, &next->links);
128 }
129
130 c->current = next;
131
132 if (next->interrupt) {
133 arch_regs_set_irq(&next->regs);
134 next->interrupt = false;
135 } else {
136 arch_regs_clear_irq(&next->regs);
137 }
138
139 switch_mm = !ret.old || ret.old->vm != next->vm;
140
141 sl_unlock(&c->lock);
142
143 ret.new = next;
144
145 if (switch_mm)
146 arch_set_vm_mm(&next->vm->page_table);
147
148 /* TODO: Only set this when there is a next thing to run. */
149 /* Set timer again. */
150 //timer_set(5 * 1000000, cpu_schedule_next, NULL);
151
152 return ret;
153}
154
155void vcpu_init(struct vcpu *vcpu, struct cpu *cpu, struct vm *vm)
156{
157 memset(vcpu, 0, sizeof(*vcpu));
158 vcpu->cpu = cpu;
159 vcpu->vm = vm;
160 /* TODO: Initialize vmid register. */
161}