blob: afd3c255a42770749e3d80bc2b6f0a5d378eb15e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, <yu.liu@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/kvm/44x.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/export.h>
19#include <linux/module.h>
20#include <linux/miscdevice.h>
21
22#include <asm/reg.h>
23#include <asm/cputable.h>
24#include <asm/kvm_ppc.h>
25
26#include "../mm/mmu_decl.h"
27#include "booke.h"
28#include "e500.h"
29
30struct id {
31 unsigned long val;
32 struct id **pentry;
33};
34
35#define NUM_TIDS 256
36
37/*
38 * This table provide mappings from:
39 * (guestAS,guestTID,guestPR) --> ID of physical cpu
40 * guestAS [0..1]
41 * guestTID [0..255]
42 * guestPR [0..1]
43 * ID [1..255]
44 * Each vcpu keeps one vcpu_id_table.
45 */
46struct vcpu_id_table {
47 struct id id[2][NUM_TIDS][2];
48};
49
50/*
51 * This table provide reversed mappings of vcpu_id_table:
52 * ID --> address of vcpu_id_table item.
53 * Each physical core has one pcpu_id_table.
54 */
55struct pcpu_id_table {
56 struct id *entry[NUM_TIDS];
57};
58
59static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
60
61/* This variable keeps last used shadow ID on local core.
62 * The valid range of shadow ID is [1..255] */
63static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
64
65/*
66 * Allocate a free shadow id and setup a valid sid mapping in given entry.
67 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
68 *
69 * The caller must have preemption disabled, and keep it that way until
70 * it has finished with the returned shadow id (either written into the
71 * TLB or arch.shadow_pid, or discarded).
72 */
73static inline int local_sid_setup_one(struct id *entry)
74{
75 unsigned long sid;
76 int ret = -1;
77
78 sid = __this_cpu_inc_return(pcpu_last_used_sid);
79 if (sid < NUM_TIDS) {
80 __this_cpu_write(pcpu_sids.entry[sid], entry);
81 entry->val = sid;
82 entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
83 ret = sid;
84 }
85
86 /*
87 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
88 * the caller will invalidate everything and start over.
89 *
90 * sid > NUM_TIDS indicates a race, which we disable preemption to
91 * avoid.
92 */
93 WARN_ON(sid > NUM_TIDS);
94
95 return ret;
96}
97
98/*
99 * Check if given entry contain a valid shadow id mapping.
100 * An ID mapping is considered valid only if
101 * both vcpu and pcpu know this mapping.
102 *
103 * The caller must have preemption disabled, and keep it that way until
104 * it has finished with the returned shadow id (either written into the
105 * TLB or arch.shadow_pid, or discarded).
106 */
107static inline int local_sid_lookup(struct id *entry)
108{
109 if (entry && entry->val != 0 &&
110 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
111 entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
112 return entry->val;
113 return -1;
114}
115
116/* Invalidate all id mappings on local core -- call with preempt disabled */
117static inline void local_sid_destroy_all(void)
118{
119 __this_cpu_write(pcpu_last_used_sid, 0);
120 memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
121}
122
123static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
124{
125 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
126 return vcpu_e500->idt;
127}
128
129static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
130{
131 kfree(vcpu_e500->idt);
132 vcpu_e500->idt = NULL;
133}
134
135/* Map guest pid to shadow.
136 * We use PID to keep shadow of current guest non-zero PID,
137 * and use PID1 to keep shadow of guest zero PID.
138 * So that guest tlbe with TID=0 can be accessed at any time */
139static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
140{
141 preempt_disable();
142 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
143 get_cur_as(&vcpu_e500->vcpu),
144 get_cur_pid(&vcpu_e500->vcpu),
145 get_cur_pr(&vcpu_e500->vcpu), 1);
146 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
147 get_cur_as(&vcpu_e500->vcpu), 0,
148 get_cur_pr(&vcpu_e500->vcpu), 1);
149 preempt_enable();
150}
151
152/* Invalidate all mappings on vcpu */
153static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
154{
155 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
156
157 /* Update shadow pid when mappings are changed */
158 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
159}
160
161/* Invalidate one ID mapping on vcpu */
162static inline void kvmppc_e500_id_table_reset_one(
163 struct kvmppc_vcpu_e500 *vcpu_e500,
164 int as, int pid, int pr)
165{
166 struct vcpu_id_table *idt = vcpu_e500->idt;
167
168 BUG_ON(as >= 2);
169 BUG_ON(pid >= NUM_TIDS);
170 BUG_ON(pr >= 2);
171
172 idt->id[as][pid][pr].val = 0;
173 idt->id[as][pid][pr].pentry = NULL;
174
175 /* Update shadow pid when mappings are changed */
176 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
177}
178
179/*
180 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
181 * This function first lookup if a valid mapping exists,
182 * if not, then creates a new one.
183 *
184 * The caller must have preemption disabled, and keep it that way until
185 * it has finished with the returned shadow id (either written into the
186 * TLB or arch.shadow_pid, or discarded).
187 */
188unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
189 unsigned int as, unsigned int gid,
190 unsigned int pr, int avoid_recursion)
191{
192 struct vcpu_id_table *idt = vcpu_e500->idt;
193 int sid;
194
195 BUG_ON(as >= 2);
196 BUG_ON(gid >= NUM_TIDS);
197 BUG_ON(pr >= 2);
198
199 sid = local_sid_lookup(&idt->id[as][gid][pr]);
200
201 while (sid <= 0) {
202 /* No mapping yet */
203 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
204 if (sid <= 0) {
205 _tlbil_all();
206 local_sid_destroy_all();
207 }
208
209 /* Update shadow pid when mappings are changed */
210 if (!avoid_recursion)
211 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
212 }
213
214 return sid;
215}
216
217unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
218 struct kvm_book3e_206_tlb_entry *gtlbe)
219{
220 return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
221 get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
222}
223
224void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
225{
226 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
227
228 if (vcpu->arch.pid != pid) {
229 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
230 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
231 }
232}
233
234/* gtlbe must not be mapped by more than one host tlbe */
235void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
236 struct kvm_book3e_206_tlb_entry *gtlbe)
237{
238 struct vcpu_id_table *idt = vcpu_e500->idt;
239 unsigned int pr, tid, ts;
240 int pid;
241 u32 val, eaddr;
242 unsigned long flags;
243
244 ts = get_tlb_ts(gtlbe);
245 tid = get_tlb_tid(gtlbe);
246
247 preempt_disable();
248
249 /* One guest ID may be mapped to two shadow IDs */
250 for (pr = 0; pr < 2; pr++) {
251 /*
252 * The shadow PID can have a valid mapping on at most one
253 * host CPU. In the common case, it will be valid on this
254 * CPU, in which case we do a local invalidation of the
255 * specific address.
256 *
257 * If the shadow PID is not valid on the current host CPU,
258 * we invalidate the entire shadow PID.
259 */
260 pid = local_sid_lookup(&idt->id[ts][tid][pr]);
261 if (pid <= 0) {
262 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
263 continue;
264 }
265
266 /*
267 * The guest is invalidating a 4K entry which is in a PID
268 * that has a valid shadow mapping on this host CPU. We
269 * search host TLB to invalidate it's shadow TLB entry,
270 * similar to __tlbil_va except that we need to look in AS1.
271 */
272 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
273 eaddr = get_tlb_eaddr(gtlbe);
274
275 local_irq_save(flags);
276
277 mtspr(SPRN_MAS6, val);
278 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
279 val = mfspr(SPRN_MAS1);
280 if (val & MAS1_VALID) {
281 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
282 asm volatile("tlbwe");
283 }
284
285 local_irq_restore(flags);
286 }
287
288 preempt_enable();
289}
290
291void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
292{
293 kvmppc_e500_id_table_reset_all(vcpu_e500);
294}
295
296void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
297{
298 /* Recalc shadow pid since MSR changes */
299 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
300}
301
302static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
303{
304 kvmppc_booke_vcpu_load(vcpu, cpu);
305
306 /* Shadow PID may be expired on local core */
307 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
308}
309
310static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
311{
312#ifdef CONFIG_SPE
313 if (vcpu->arch.shadow_msr & MSR_SPE)
314 kvmppc_vcpu_disable_spe(vcpu);
315#endif
316
317 kvmppc_booke_vcpu_put(vcpu);
318}
319
320int kvmppc_core_check_processor_compat(void)
321{
322 int r;
323
324 if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
325 r = 0;
326 else
327 r = -ENOTSUPP;
328
329 return r;
330}
331
332static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
333{
334 struct kvm_book3e_206_tlb_entry *tlbe;
335
336 /* Insert large initial mapping for guest. */
337 tlbe = get_entry(vcpu_e500, 1, 0);
338 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
339 tlbe->mas2 = 0;
340 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
341
342 /* 4K map for serial output. Used by kernel wrapper. */
343 tlbe = get_entry(vcpu_e500, 1, 1);
344 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
345 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
346 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
347}
348
349int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
350{
351 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
352
353 kvmppc_e500_tlb_setup(vcpu_e500);
354
355 /* Registers init */
356 vcpu->arch.pvr = mfspr(SPRN_PVR);
357 vcpu_e500->svr = mfspr(SPRN_SVR);
358
359 vcpu->arch.cpu_type = KVM_CPU_E500V2;
360
361 return 0;
362}
363
364static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
365 struct kvm_sregs *sregs)
366{
367 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
368
369 sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
370 KVM_SREGS_E_PM;
371 sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
372
373 sregs->u.e.impl.fsl.features = 0;
374 sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
375 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
376 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
377
378 sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
379 sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
380 sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
381 sregs->u.e.ivor_high[3] =
382 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
383
384 kvmppc_get_sregs_ivor(vcpu, sregs);
385 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
386 return 0;
387}
388
389static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
390 struct kvm_sregs *sregs)
391{
392 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
393 int ret;
394
395 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
396 vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
397 vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
398 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
399 }
400
401 ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
402 if (ret < 0)
403 return ret;
404
405 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
406 return 0;
407
408 if (sregs->u.e.features & KVM_SREGS_E_SPE) {
409 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
410 sregs->u.e.ivor_high[0];
411 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
412 sregs->u.e.ivor_high[1];
413 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
414 sregs->u.e.ivor_high[2];
415 }
416
417 if (sregs->u.e.features & KVM_SREGS_E_PM) {
418 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
419 sregs->u.e.ivor_high[3];
420 }
421
422 return kvmppc_set_sregs_ivor(vcpu, sregs);
423}
424
425static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
426 union kvmppc_one_reg *val)
427{
428 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
429 return r;
430}
431
432static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
433 union kvmppc_one_reg *val)
434{
435 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
436 return r;
437}
438
439static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
440 unsigned int id)
441{
442 struct kvmppc_vcpu_e500 *vcpu_e500;
443 struct kvm_vcpu *vcpu;
444 int err;
445
446 vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
447 if (!vcpu_e500) {
448 err = -ENOMEM;
449 goto out;
450 }
451
452 vcpu = &vcpu_e500->vcpu;
453 err = kvm_vcpu_init(vcpu, kvm, id);
454 if (err)
455 goto free_vcpu;
456
457 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) {
458 err = -ENOMEM;
459 goto uninit_vcpu;
460 }
461
462 err = kvmppc_e500_tlb_init(vcpu_e500);
463 if (err)
464 goto uninit_id;
465
466 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
467 if (!vcpu->arch.shared) {
468 err = -ENOMEM;
469 goto uninit_tlb;
470 }
471
472 return vcpu;
473
474uninit_tlb:
475 kvmppc_e500_tlb_uninit(vcpu_e500);
476uninit_id:
477 kvmppc_e500_id_table_free(vcpu_e500);
478uninit_vcpu:
479 kvm_vcpu_uninit(vcpu);
480free_vcpu:
481 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
482out:
483 return ERR_PTR(err);
484}
485
486static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
487{
488 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
489
490 free_page((unsigned long)vcpu->arch.shared);
491 kvmppc_e500_tlb_uninit(vcpu_e500);
492 kvmppc_e500_id_table_free(vcpu_e500);
493 kvm_vcpu_uninit(vcpu);
494 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
495}
496
497static int kvmppc_core_init_vm_e500(struct kvm *kvm)
498{
499 return 0;
500}
501
502static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
503{
504}
505
506static struct kvmppc_ops kvm_ops_e500 = {
507 .get_sregs = kvmppc_core_get_sregs_e500,
508 .set_sregs = kvmppc_core_set_sregs_e500,
509 .get_one_reg = kvmppc_get_one_reg_e500,
510 .set_one_reg = kvmppc_set_one_reg_e500,
511 .vcpu_load = kvmppc_core_vcpu_load_e500,
512 .vcpu_put = kvmppc_core_vcpu_put_e500,
513 .vcpu_create = kvmppc_core_vcpu_create_e500,
514 .vcpu_free = kvmppc_core_vcpu_free_e500,
515 .mmu_destroy = kvmppc_mmu_destroy_e500,
516 .init_vm = kvmppc_core_init_vm_e500,
517 .destroy_vm = kvmppc_core_destroy_vm_e500,
518 .emulate_op = kvmppc_core_emulate_op_e500,
519 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
520 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
521};
522
523static int __init kvmppc_e500_init(void)
524{
525 int r, i;
526 unsigned long ivor[3];
527 /* Process remaining handlers above the generic first 16 */
528 unsigned long *handler = &kvmppc_booke_handler_addr[16];
529 unsigned long handler_len;
530 unsigned long max_ivor = 0;
531
532 r = kvmppc_core_check_processor_compat();
533 if (r)
534 goto err_out;
535
536 r = kvmppc_booke_init();
537 if (r)
538 goto err_out;
539
540 /* copy extra E500 exception handlers */
541 ivor[0] = mfspr(SPRN_IVOR32);
542 ivor[1] = mfspr(SPRN_IVOR33);
543 ivor[2] = mfspr(SPRN_IVOR34);
544 for (i = 0; i < 3; i++) {
545 if (ivor[i] > ivor[max_ivor])
546 max_ivor = i;
547
548 handler_len = handler[i + 1] - handler[i];
549 memcpy((void *)kvmppc_booke_handlers + ivor[i],
550 (void *)handler[i], handler_len);
551 }
552 handler_len = handler[max_ivor + 1] - handler[max_ivor];
553 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
554 ivor[max_ivor] + handler_len);
555
556 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
557 if (r)
558 goto err_out;
559 kvm_ops_e500.owner = THIS_MODULE;
560 kvmppc_pr_ops = &kvm_ops_e500;
561
562err_out:
563 return r;
564}
565
566static void __exit kvmppc_e500_exit(void)
567{
568 kvmppc_pr_ops = NULL;
569 kvmppc_booke_exit();
570}
571
572module_init(kvmppc_e500_init);
573module_exit(kvmppc_e500_exit);
574MODULE_ALIAS_MISCDEV(KVM_MINOR);
575MODULE_ALIAS("devname:kvm");