blob: 59cd366e7933a7233cc63a32daf3a04a5426ad4d [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright 2016,2017 IBM Corporation.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5#ifndef __XIVE_INTERNAL_H
6#define __XIVE_INTERNAL_H
7
8/* Each CPU carry one of these with various per-CPU state */
9struct xive_cpu {
10#ifdef CONFIG_SMP
11 /* HW irq number and data of IPI */
12 u32 hw_ipi;
13 struct xive_irq_data ipi_data;
14#endif /* CONFIG_SMP */
15
16 int chip_id;
17
18 /* Queue datas. Only one is populated */
19#define XIVE_MAX_QUEUES 8
20 struct xive_q queue[XIVE_MAX_QUEUES];
21
22 /*
23 * Pending mask. Each bit corresponds to a priority that
24 * potentially has pending interrupts.
25 */
26 u8 pending_prio;
27
28 /* Cache of HW CPPR */
29 u8 cppr;
30};
31
32/* Backend ops */
33struct xive_ops {
34 int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
35 int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
David Brazdil0f672f62019-12-10 10:32:29 +000036 int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
37 u32 *sw_irq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038 int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
39 void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
40 void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
41 void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
42 bool (*match)(struct device_node *np);
43 void (*shutdown)(void);
44
45 void (*update_pending)(struct xive_cpu *xc);
46 void (*eoi)(u32 hw_irq);
47 void (*sync_source)(u32 hw_irq);
48 u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
49#ifdef CONFIG_SMP
50 int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
51 void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
52#endif
53 const char *name;
54};
55
56bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
57 u8 max_prio);
58__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
59
60static inline u32 xive_alloc_order(u32 queue_shift)
61{
62 return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
63}
64
65extern bool xive_cmdline_disabled;
66
67#endif /* __XIVE_INTERNAL_H */