blob: baf02ff91a31a0c75ef0c3aed7be5c7a605b0332 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * pm_domain.h - Definitions and headers related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#ifndef _LINUX_PM_DOMAIN_H
9#define _LINUX_PM_DOMAIN_H
10
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/pm.h>
14#include <linux/err.h>
15#include <linux/of.h>
16#include <linux/notifier.h>
17#include <linux/spinlock.h>
David Brazdil0f672f62019-12-10 10:32:29 +000018#include <linux/cpumask.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019
David Brazdil0f672f62019-12-10 10:32:29 +000020/*
21 * Flags to control the behaviour of a genpd.
22 *
23 * These flags may be set in the struct generic_pm_domain's flags field by a
24 * genpd backend driver. The flags must be set before it calls pm_genpd_init(),
25 * which initializes a genpd.
26 *
27 * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework,
28 * while powering on/off attached devices.
29 *
30 * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks,
31 * ->power_on|off(), doesn't sleep. Hence, these
32 * can be invoked from within atomic context, which
33 * enables genpd to power on/off the PM domain,
34 * even when pm_runtime_is_irq_safe() returns true,
35 * for any of its attached devices. Note that, a
36 * genpd having this flag set, requires its
37 * masterdomains to also have it set.
38 *
39 * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain
40 * powered on.
41 *
42 * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
43 * on, in case any of its attached devices is used
44 * in the wakeup path to serve system wakeups.
45 *
46 * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
47 * devices attached, which may belong to CPUs or
48 * possibly have subdomains with CPUs attached.
49 * This flag enables the genpd backend driver to
50 * deploy idle power management support for CPUs
51 * and groups of CPUs. Note that, the backend
52 * driver must then comply with the so called,
53 * last-man-standing algorithm, for the CPUs in the
54 * PM domain.
55 *
56 * GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
57 * powered on except for system suspend.
58 */
59#define GENPD_FLAG_PM_CLK (1U << 0)
60#define GENPD_FLAG_IRQ_SAFE (1U << 1)
61#define GENPD_FLAG_ALWAYS_ON (1U << 2)
62#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
63#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
64#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065
66enum gpd_status {
67 GPD_STATE_ACTIVE = 0, /* PM domain is active */
68 GPD_STATE_POWER_OFF, /* PM domain is off */
69};
70
71struct dev_power_governor {
72 bool (*power_down_ok)(struct dev_pm_domain *domain);
73 bool (*suspend_ok)(struct device *dev);
74};
75
76struct gpd_dev_ops {
77 int (*start)(struct device *dev);
78 int (*stop)(struct device *dev);
79};
80
81struct genpd_power_state {
82 s64 power_off_latency_ns;
83 s64 power_on_latency_ns;
84 s64 residency_ns;
85 struct fwnode_handle *fwnode;
86 ktime_t idle_time;
David Brazdil0f672f62019-12-10 10:32:29 +000087 void *data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088};
89
90struct genpd_lock_ops;
91struct dev_pm_opp;
David Brazdil0f672f62019-12-10 10:32:29 +000092struct opp_table;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093
94struct generic_pm_domain {
95 struct device dev;
96 struct dev_pm_domain domain; /* PM domain operations */
97 struct list_head gpd_list_node; /* Node in the global PM domains list */
98 struct list_head master_links; /* Links with PM domain as a master */
99 struct list_head slave_links; /* Links with PM domain as a slave */
100 struct list_head dev_list; /* List of devices */
101 struct dev_power_governor *gov;
102 struct work_struct power_off_work;
103 struct fwnode_handle *provider; /* Identity of the domain provider */
104 bool has_provider;
105 const char *name;
106 atomic_t sd_count; /* Number of subdomains with power "on" */
107 enum gpd_status status; /* Current state of the domain */
108 unsigned int device_count; /* Number of devices */
109 unsigned int suspended_count; /* System suspend device counter */
110 unsigned int prepared_count; /* Suspend counter of prepared devices */
111 unsigned int performance_state; /* Aggregated max performance state */
David Brazdil0f672f62019-12-10 10:32:29 +0000112 cpumask_var_t cpus; /* A cpumask of the attached CPUs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 int (*power_off)(struct generic_pm_domain *domain);
114 int (*power_on)(struct generic_pm_domain *domain);
David Brazdil0f672f62019-12-10 10:32:29 +0000115 struct opp_table *opp_table; /* OPP table of the genpd */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
117 struct dev_pm_opp *opp);
118 int (*set_performance_state)(struct generic_pm_domain *genpd,
119 unsigned int state);
120 struct gpd_dev_ops dev_ops;
121 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
122 bool max_off_time_changed;
123 bool cached_power_down_ok;
David Brazdil0f672f62019-12-10 10:32:29 +0000124 bool cached_power_down_state_idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125 int (*attach_dev)(struct generic_pm_domain *domain,
126 struct device *dev);
127 void (*detach_dev)(struct generic_pm_domain *domain,
128 struct device *dev);
129 unsigned int flags; /* Bit field of configs for genpd */
130 struct genpd_power_state *states;
David Brazdil0f672f62019-12-10 10:32:29 +0000131 void (*free_states)(struct genpd_power_state *states,
132 unsigned int state_count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133 unsigned int state_count; /* number of states */
134 unsigned int state_idx; /* state that genpd will go to when off */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 ktime_t on_time;
136 ktime_t accounting_time;
137 const struct genpd_lock_ops *lock_ops;
138 union {
139 struct mutex mlock;
140 struct {
141 spinlock_t slock;
142 unsigned long lock_flags;
143 };
144 };
145
146};
147
148static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
149{
150 return container_of(pd, struct generic_pm_domain, domain);
151}
152
153struct gpd_link {
154 struct generic_pm_domain *master;
155 struct list_head master_node;
156 struct generic_pm_domain *slave;
157 struct list_head slave_node;
David Brazdil0f672f62019-12-10 10:32:29 +0000158
159 /* Sub-domain's per-master domain performance state */
160 unsigned int performance_state;
161 unsigned int prev_performance_state;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000162};
163
164struct gpd_timing_data {
165 s64 suspend_latency_ns;
166 s64 resume_latency_ns;
167 s64 effective_constraint_ns;
168 bool constraint_changed;
169 bool cached_suspend_ok;
170};
171
172struct pm_domain_data {
173 struct list_head list_node;
174 struct device *dev;
175};
176
177struct generic_pm_domain_data {
178 struct pm_domain_data base;
179 struct gpd_timing_data td;
180 struct notifier_block nb;
David Brazdil0f672f62019-12-10 10:32:29 +0000181 int cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 unsigned int performance_state;
183 void *data;
184};
185
186#ifdef CONFIG_PM_GENERIC_DOMAINS
187static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
188{
189 return container_of(pdd, struct generic_pm_domain_data, base);
190}
191
192static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
193{
194 return to_gpd_data(dev->power.subsys_data->domain_data);
195}
196
197int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev);
198int pm_genpd_remove_device(struct device *dev);
199int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
David Brazdil0f672f62019-12-10 10:32:29 +0000200 struct generic_pm_domain *subdomain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
David Brazdil0f672f62019-12-10 10:32:29 +0000202 struct generic_pm_domain *subdomain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203int pm_genpd_init(struct generic_pm_domain *genpd,
204 struct dev_power_governor *gov, bool is_off);
205int pm_genpd_remove(struct generic_pm_domain *genpd);
206int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
207
208extern struct dev_power_governor simple_qos_governor;
209extern struct dev_power_governor pm_domain_always_on_gov;
David Brazdil0f672f62019-12-10 10:32:29 +0000210#ifdef CONFIG_CPU_IDLE
211extern struct dev_power_governor pm_domain_cpu_gov;
212#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213#else
214
215static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
216{
217 return ERR_PTR(-ENOSYS);
218}
219static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
220 struct device *dev)
221{
222 return -ENOSYS;
223}
224static inline int pm_genpd_remove_device(struct device *dev)
225{
226 return -ENOSYS;
227}
228static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
David Brazdil0f672f62019-12-10 10:32:29 +0000229 struct generic_pm_domain *subdomain)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230{
231 return -ENOSYS;
232}
233static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
David Brazdil0f672f62019-12-10 10:32:29 +0000234 struct generic_pm_domain *subdomain)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235{
236 return -ENOSYS;
237}
238static inline int pm_genpd_init(struct generic_pm_domain *genpd,
239 struct dev_power_governor *gov, bool is_off)
240{
241 return -ENOSYS;
242}
243static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
244{
245 return -ENOTSUPP;
246}
247
248static inline int dev_pm_genpd_set_performance_state(struct device *dev,
249 unsigned int state)
250{
251 return -ENOTSUPP;
252}
253
254#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
255#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
256#endif
257
258#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
259void pm_genpd_syscore_poweroff(struct device *dev);
260void pm_genpd_syscore_poweron(struct device *dev);
261#else
262static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
263static inline void pm_genpd_syscore_poweron(struct device *dev) {}
264#endif
265
266/* OF PM domain providers */
267struct of_device_id;
268
269typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
270 void *data);
271
272struct genpd_onecell_data {
273 struct generic_pm_domain **domains;
274 unsigned int num_domains;
275 genpd_xlate_t xlate;
276};
277
278#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
279int of_genpd_add_provider_simple(struct device_node *np,
280 struct generic_pm_domain *genpd);
281int of_genpd_add_provider_onecell(struct device_node *np,
282 struct genpd_onecell_data *data);
283void of_genpd_del_provider(struct device_node *np);
284int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000285int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
286 struct of_phandle_args *subdomain_spec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000287struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
288int of_genpd_parse_idle_states(struct device_node *dn,
289 struct genpd_power_state **states, int *n);
David Brazdil0f672f62019-12-10 10:32:29 +0000290unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
291 struct dev_pm_opp *opp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292
293int genpd_dev_pm_attach(struct device *dev);
294struct device *genpd_dev_pm_attach_by_id(struct device *dev,
295 unsigned int index);
296struct device *genpd_dev_pm_attach_by_name(struct device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000297 const char *name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
299static inline int of_genpd_add_provider_simple(struct device_node *np,
300 struct generic_pm_domain *genpd)
301{
302 return -ENOTSUPP;
303}
304
305static inline int of_genpd_add_provider_onecell(struct device_node *np,
306 struct genpd_onecell_data *data)
307{
308 return -ENOTSUPP;
309}
310
311static inline void of_genpd_del_provider(struct device_node *np) {}
312
313static inline int of_genpd_add_device(struct of_phandle_args *args,
314 struct device *dev)
315{
316 return -ENODEV;
317}
318
David Brazdil0f672f62019-12-10 10:32:29 +0000319static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
320 struct of_phandle_args *subdomain_spec)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321{
322 return -ENODEV;
323}
324
325static inline int of_genpd_parse_idle_states(struct device_node *dn,
326 struct genpd_power_state **states, int *n)
327{
328 return -ENODEV;
329}
330
331static inline unsigned int
David Brazdil0f672f62019-12-10 10:32:29 +0000332pm_genpd_opp_to_performance_state(struct device *genpd_dev,
333 struct dev_pm_opp *opp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334{
335 return 0;
336}
337
338static inline int genpd_dev_pm_attach(struct device *dev)
339{
340 return 0;
341}
342
343static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
344 unsigned int index)
345{
346 return NULL;
347}
348
349static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000350 const char *name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000351{
352 return NULL;
353}
354
355static inline
356struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
357{
358 return ERR_PTR(-ENOTSUPP);
359}
360#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
361
362#ifdef CONFIG_PM
363int dev_pm_domain_attach(struct device *dev, bool power_on);
364struct device *dev_pm_domain_attach_by_id(struct device *dev,
365 unsigned int index);
366struct device *dev_pm_domain_attach_by_name(struct device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000367 const char *name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368void dev_pm_domain_detach(struct device *dev, bool power_off);
369void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
370#else
371static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
372{
373 return 0;
374}
375static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
376 unsigned int index)
377{
378 return NULL;
379}
380static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000381 const char *name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382{
383 return NULL;
384}
385static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
386static inline void dev_pm_domain_set(struct device *dev,
387 struct dev_pm_domain *pd) {}
388#endif
389
390#endif /* _LINUX_PM_DOMAIN_H */