blob: 132cc96895e3a97845b7d086ea83053e8299b799 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Renesas Clock Pulse Generator / Module Standby and Software Reset
4 *
5 * Copyright (C) 2015 Glider bvba
6 *
7 * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8 *
9 * Copyright (C) 2013 Ideas On Board SPRL
10 * Copyright (C) 2015 Renesas Electronics Corp.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/clk/renesas.h>
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/init.h>
David Brazdil0f672f62019-12-10 10:32:29 +000019#include <linux/io.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020#include <linux/mod_devicetable.h>
21#include <linux/module.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/pm_clock.h>
26#include <linux/pm_domain.h>
27#include <linux/psci.h>
28#include <linux/reset-controller.h>
29#include <linux/slab.h>
30
31#include <dt-bindings/clock/renesas-cpg-mssr.h>
32
33#include "renesas-cpg-mssr.h"
34#include "clk-div6.h"
35
36#ifdef DEBUG
37#define WARN_DEBUG(x) WARN_ON(x)
38#else
39#define WARN_DEBUG(x) do { } while (0)
40#endif
41
42
43/*
44 * Module Standby and Software Reset register offets.
45 *
46 * If the registers exist, these are valid for SH-Mobile, R-Mobile,
47 * R-Car Gen2, R-Car Gen3, and RZ/G1.
48 * These are NOT valid for R-Car Gen1 and RZ/A1!
49 */
50
51/*
52 * Module Stop Status Register offsets
53 */
54
55static const u16 mstpsr[] = {
56 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
57 0x9A0, 0x9A4, 0x9A8, 0x9AC,
58};
59
60#define MSTPSR(i) mstpsr[i]
61
62
63/*
64 * System Module Stop Control Register offsets
65 */
66
67static const u16 smstpcr[] = {
68 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
69 0x990, 0x994, 0x998, 0x99C,
70};
71
72#define SMSTPCR(i) smstpcr[i]
73
David Brazdil0f672f62019-12-10 10:32:29 +000074/*
75 * Standby Control Register offsets (RZ/A)
76 * Base address is FRQCR register
77 */
78
79static const u16 stbcr[] = {
80 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
81 0x424, 0x428, 0x42C,
82};
83
84#define STBCR(i) stbcr[i]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085
86/*
87 * Software Reset Register offsets
88 */
89
90static const u16 srcr[] = {
91 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
92 0x920, 0x924, 0x928, 0x92C,
93};
94
95#define SRCR(i) srcr[i]
96
97
98/* Realtime Module Stop Control Register offsets */
99#define RMSTPCR(i) (smstpcr[i] - 0x20)
100
101/* Modem Module Stop Control Register offsets (r8a73a4) */
102#define MMSTPCR(i) (smstpcr[i] + 0x20)
103
104/* Software Reset Clearing Register offsets */
105#define SRSTCLR(i) (0x940 + (i) * 4)
106
107
108/**
109 * Clock Pulse Generator / Module Standby and Software Reset Private Data
110 *
111 * @rcdev: Optional reset controller entity
112 * @dev: CPG/MSSR device
113 * @base: CPG/MSSR register block base address
114 * @rmw_lock: protects RMW register accesses
David Brazdil0f672f62019-12-10 10:32:29 +0000115 * @np: Device node in DT for this CPG/MSSR module
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 * @num_core_clks: Number of Core Clocks in clks[]
117 * @num_mod_clks: Number of Module Clocks in clks[]
118 * @last_dt_core_clk: ID of the last Core Clock exported to DT
David Brazdil0f672f62019-12-10 10:32:29 +0000119 * @stbyctrl: This device has Standby Control Registers
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 * @notifiers: Notifier chain to save/restore clock state for system resume
121 * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
122 * @smstpcr_saved[].val: Saved values of SMSTPCR[]
David Brazdil0f672f62019-12-10 10:32:29 +0000123 * @clks: Array containing all Core and Module Clocks
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124 */
125struct cpg_mssr_priv {
126#ifdef CONFIG_RESET_CONTROLLER
127 struct reset_controller_dev rcdev;
128#endif
129 struct device *dev;
130 void __iomem *base;
131 spinlock_t rmw_lock;
David Brazdil0f672f62019-12-10 10:32:29 +0000132 struct device_node *np;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 unsigned int num_core_clks;
135 unsigned int num_mod_clks;
136 unsigned int last_dt_core_clk;
David Brazdil0f672f62019-12-10 10:32:29 +0000137 bool stbyctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138
139 struct raw_notifier_head notifiers;
140 struct {
141 u32 mask;
142 u32 val;
143 } smstpcr_saved[ARRAY_SIZE(smstpcr)];
David Brazdil0f672f62019-12-10 10:32:29 +0000144
145 struct clk *clks[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146};
147
David Brazdil0f672f62019-12-10 10:32:29 +0000148static struct cpg_mssr_priv *cpg_mssr_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149
150/**
151 * struct mstp_clock - MSTP gating clock
152 * @hw: handle between common and hardware-specific interfaces
153 * @index: MSTP clock number
154 * @priv: CPG/MSSR private data
155 */
156struct mstp_clock {
157 struct clk_hw hw;
158 u32 index;
159 struct cpg_mssr_priv *priv;
160};
161
162#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
163
164static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
165{
166 struct mstp_clock *clock = to_mstp_clock(hw);
167 struct cpg_mssr_priv *priv = clock->priv;
168 unsigned int reg = clock->index / 32;
169 unsigned int bit = clock->index % 32;
170 struct device *dev = priv->dev;
171 u32 bitmask = BIT(bit);
172 unsigned long flags;
173 unsigned int i;
174 u32 value;
175
176 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
177 enable ? "ON" : "OFF");
178 spin_lock_irqsave(&priv->rmw_lock, flags);
179
David Brazdil0f672f62019-12-10 10:32:29 +0000180 if (priv->stbyctrl) {
181 value = readb(priv->base + STBCR(reg));
182 if (enable)
183 value &= ~bitmask;
184 else
185 value |= bitmask;
186 writeb(value, priv->base + STBCR(reg));
187
188 /* dummy read to ensure write has completed */
189 readb(priv->base + STBCR(reg));
190 barrier_data(priv->base + STBCR(reg));
191 } else {
192 value = readl(priv->base + SMSTPCR(reg));
193 if (enable)
194 value &= ~bitmask;
195 else
196 value |= bitmask;
197 writel(value, priv->base + SMSTPCR(reg));
198 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199
200 spin_unlock_irqrestore(&priv->rmw_lock, flags);
201
David Brazdil0f672f62019-12-10 10:32:29 +0000202 if (!enable || priv->stbyctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203 return 0;
204
205 for (i = 1000; i > 0; --i) {
206 if (!(readl(priv->base + MSTPSR(reg)) & bitmask))
207 break;
208 cpu_relax();
209 }
210
211 if (!i) {
212 dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
213 priv->base + SMSTPCR(reg), bit);
214 return -ETIMEDOUT;
215 }
216
217 return 0;
218}
219
220static int cpg_mstp_clock_enable(struct clk_hw *hw)
221{
222 return cpg_mstp_clock_endisable(hw, true);
223}
224
225static void cpg_mstp_clock_disable(struct clk_hw *hw)
226{
227 cpg_mstp_clock_endisable(hw, false);
228}
229
230static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
231{
232 struct mstp_clock *clock = to_mstp_clock(hw);
233 struct cpg_mssr_priv *priv = clock->priv;
234 u32 value;
235
David Brazdil0f672f62019-12-10 10:32:29 +0000236 if (priv->stbyctrl)
237 value = readb(priv->base + STBCR(clock->index / 32));
238 else
239 value = readl(priv->base + MSTPSR(clock->index / 32));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240
241 return !(value & BIT(clock->index % 32));
242}
243
244static const struct clk_ops cpg_mstp_clock_ops = {
245 .enable = cpg_mstp_clock_enable,
246 .disable = cpg_mstp_clock_disable,
247 .is_enabled = cpg_mstp_clock_is_enabled,
248};
249
250static
251struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
252 void *data)
253{
254 unsigned int clkidx = clkspec->args[1];
255 struct cpg_mssr_priv *priv = data;
256 struct device *dev = priv->dev;
257 unsigned int idx;
258 const char *type;
259 struct clk *clk;
David Brazdil0f672f62019-12-10 10:32:29 +0000260 int range_check;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261
262 switch (clkspec->args[0]) {
263 case CPG_CORE:
264 type = "core";
265 if (clkidx > priv->last_dt_core_clk) {
266 dev_err(dev, "Invalid %s clock index %u\n", type,
267 clkidx);
268 return ERR_PTR(-EINVAL);
269 }
270 clk = priv->clks[clkidx];
271 break;
272
273 case CPG_MOD:
274 type = "module";
David Brazdil0f672f62019-12-10 10:32:29 +0000275 if (priv->stbyctrl) {
276 idx = MOD_CLK_PACK_10(clkidx);
277 range_check = 7 - (clkidx % 10);
278 } else {
279 idx = MOD_CLK_PACK(clkidx);
280 range_check = 31 - (clkidx % 100);
281 }
282 if (range_check < 0 || idx >= priv->num_mod_clks) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283 dev_err(dev, "Invalid %s clock index %u\n", type,
284 clkidx);
285 return ERR_PTR(-EINVAL);
286 }
287 clk = priv->clks[priv->num_core_clks + idx];
288 break;
289
290 default:
291 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
292 return ERR_PTR(-EINVAL);
293 }
294
295 if (IS_ERR(clk))
296 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
297 PTR_ERR(clk));
298 else
299 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
300 clkspec->args[0], clkspec->args[1], clk,
301 clk_get_rate(clk));
302 return clk;
303}
304
305static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
306 const struct cpg_mssr_info *info,
307 struct cpg_mssr_priv *priv)
308{
309 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
310 struct device *dev = priv->dev;
311 unsigned int id = core->id, div = core->div;
312 const char *parent_name;
313
314 WARN_DEBUG(id >= priv->num_core_clks);
315 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
316
317 if (!core->name) {
318 /* Skip NULLified clock */
319 return;
320 }
321
322 switch (core->type) {
323 case CLK_TYPE_IN:
David Brazdil0f672f62019-12-10 10:32:29 +0000324 clk = of_clk_get_by_name(priv->np, core->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325 break;
326
327 case CLK_TYPE_FF:
328 case CLK_TYPE_DIV6P1:
329 case CLK_TYPE_DIV6_RO:
330 WARN_DEBUG(core->parent >= priv->num_core_clks);
331 parent = priv->clks[core->parent];
332 if (IS_ERR(parent)) {
333 clk = parent;
334 goto fail;
335 }
336
337 parent_name = __clk_get_name(parent);
338
339 if (core->type == CLK_TYPE_DIV6_RO)
340 /* Multiply with the DIV6 register value */
341 div *= (readl(priv->base + core->offset) & 0x3f) + 1;
342
343 if (core->type == CLK_TYPE_DIV6P1) {
344 clk = cpg_div6_register(core->name, 1, &parent_name,
345 priv->base + core->offset,
346 &priv->notifiers);
347 } else {
348 clk = clk_register_fixed_factor(NULL, core->name,
349 parent_name, 0,
350 core->mult, div);
351 }
352 break;
353
David Brazdil0f672f62019-12-10 10:32:29 +0000354 case CLK_TYPE_FR:
355 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
356 core->mult);
357 break;
358
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000359 default:
360 if (info->cpg_clk_register)
361 clk = info->cpg_clk_register(dev, core, info,
362 priv->clks, priv->base,
363 &priv->notifiers);
364 else
365 dev_err(dev, "%s has unsupported core clock type %u\n",
366 core->name, core->type);
367 break;
368 }
369
370 if (IS_ERR_OR_NULL(clk))
371 goto fail;
372
373 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
374 priv->clks[id] = clk;
375 return;
376
377fail:
378 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
379 core->name, PTR_ERR(clk));
380}
381
382static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
383 const struct cpg_mssr_info *info,
384 struct cpg_mssr_priv *priv)
385{
386 struct mstp_clock *clock = NULL;
387 struct device *dev = priv->dev;
388 unsigned int id = mod->id;
389 struct clk_init_data init;
390 struct clk *parent, *clk;
391 const char *parent_name;
392 unsigned int i;
393
394 WARN_DEBUG(id < priv->num_core_clks);
395 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
396 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
397 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
398
399 if (!mod->name) {
400 /* Skip NULLified clock */
401 return;
402 }
403
404 parent = priv->clks[mod->parent];
405 if (IS_ERR(parent)) {
406 clk = parent;
407 goto fail;
408 }
409
410 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
411 if (!clock) {
412 clk = ERR_PTR(-ENOMEM);
413 goto fail;
414 }
415
416 init.name = mod->name;
417 init.ops = &cpg_mstp_clock_ops;
David Brazdil0f672f62019-12-10 10:32:29 +0000418 init.flags = CLK_SET_RATE_PARENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419 for (i = 0; i < info->num_crit_mod_clks; i++)
420 if (id == info->crit_mod_clks[i]) {
421 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
422 mod->name);
423 init.flags |= CLK_IS_CRITICAL;
424 break;
425 }
426
427 parent_name = __clk_get_name(parent);
428 init.parent_names = &parent_name;
429 init.num_parents = 1;
430
431 clock->index = id - priv->num_core_clks;
432 clock->priv = priv;
433 clock->hw.init = &init;
434
435 clk = clk_register(NULL, &clock->hw);
436 if (IS_ERR(clk))
437 goto fail;
438
439 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
440 priv->clks[id] = clk;
441 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
442 return;
443
444fail:
445 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
446 mod->name, PTR_ERR(clk));
447 kfree(clock);
448}
449
450struct cpg_mssr_clk_domain {
451 struct generic_pm_domain genpd;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452 unsigned int num_core_pm_clks;
David Brazdil0f672f62019-12-10 10:32:29 +0000453 unsigned int core_pm_clks[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000454};
455
456static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
457
458static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
459 struct cpg_mssr_clk_domain *pd)
460{
461 unsigned int i;
462
David Brazdil0f672f62019-12-10 10:32:29 +0000463 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 return false;
465
466 switch (clkspec->args[0]) {
467 case CPG_CORE:
468 for (i = 0; i < pd->num_core_pm_clks; i++)
469 if (clkspec->args[1] == pd->core_pm_clks[i])
470 return true;
471 return false;
472
473 case CPG_MOD:
474 return true;
475
476 default:
477 return false;
478 }
479}
480
481int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
482{
483 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
484 struct device_node *np = dev->of_node;
485 struct of_phandle_args clkspec;
486 struct clk *clk;
487 int i = 0;
488 int error;
489
490 if (!pd) {
491 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
492 return -EPROBE_DEFER;
493 }
494
495 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
496 &clkspec)) {
497 if (cpg_mssr_is_pm_clk(&clkspec, pd))
498 goto found;
499
500 of_node_put(clkspec.np);
501 i++;
502 }
503
504 return 0;
505
506found:
507 clk = of_clk_get_from_provider(&clkspec);
508 of_node_put(clkspec.np);
509
510 if (IS_ERR(clk))
511 return PTR_ERR(clk);
512
513 error = pm_clk_create(dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000514 if (error)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 goto fail_put;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516
517 error = pm_clk_add_clk(dev, clk);
David Brazdil0f672f62019-12-10 10:32:29 +0000518 if (error)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000519 goto fail_destroy;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520
521 return 0;
522
523fail_destroy:
524 pm_clk_destroy(dev);
525fail_put:
526 clk_put(clk);
527 return error;
528}
529
530void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
531{
532 if (!pm_clk_no_clocks(dev))
533 pm_clk_destroy(dev);
534}
535
536static int __init cpg_mssr_add_clk_domain(struct device *dev,
537 const unsigned int *core_pm_clks,
538 unsigned int num_core_pm_clks)
539{
540 struct device_node *np = dev->of_node;
541 struct generic_pm_domain *genpd;
542 struct cpg_mssr_clk_domain *pd;
543 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
544
545 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
546 if (!pd)
547 return -ENOMEM;
548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 pd->num_core_pm_clks = num_core_pm_clks;
550 memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
551
552 genpd = &pd->genpd;
553 genpd->name = np->name;
David Brazdil0f672f62019-12-10 10:32:29 +0000554 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
555 GENPD_FLAG_ACTIVE_WAKEUP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000556 genpd->attach_dev = cpg_mssr_attach_dev;
557 genpd->detach_dev = cpg_mssr_detach_dev;
558 pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
559 cpg_mssr_clk_domain = pd;
560
561 of_genpd_add_provider_simple(np, genpd);
562 return 0;
563}
564
565#ifdef CONFIG_RESET_CONTROLLER
566
567#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
568
569static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
570 unsigned long id)
571{
572 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
573 unsigned int reg = id / 32;
574 unsigned int bit = id % 32;
575 u32 bitmask = BIT(bit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000576
577 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
578
579 /* Reset module */
David Brazdil0f672f62019-12-10 10:32:29 +0000580 writel(bitmask, priv->base + SRCR(reg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581
582 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
583 udelay(35);
584
585 /* Release module from reset state */
586 writel(bitmask, priv->base + SRSTCLR(reg));
587
588 return 0;
589}
590
591static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
592{
593 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
594 unsigned int reg = id / 32;
595 unsigned int bit = id % 32;
596 u32 bitmask = BIT(bit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597
598 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
599
David Brazdil0f672f62019-12-10 10:32:29 +0000600 writel(bitmask, priv->base + SRCR(reg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 return 0;
602}
603
604static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
605 unsigned long id)
606{
607 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
608 unsigned int reg = id / 32;
609 unsigned int bit = id % 32;
610 u32 bitmask = BIT(bit);
611
612 dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
613
614 writel(bitmask, priv->base + SRSTCLR(reg));
615 return 0;
616}
617
618static int cpg_mssr_status(struct reset_controller_dev *rcdev,
619 unsigned long id)
620{
621 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
622 unsigned int reg = id / 32;
623 unsigned int bit = id % 32;
624 u32 bitmask = BIT(bit);
625
626 return !!(readl(priv->base + SRCR(reg)) & bitmask);
627}
628
629static const struct reset_control_ops cpg_mssr_reset_ops = {
630 .reset = cpg_mssr_reset,
631 .assert = cpg_mssr_assert,
632 .deassert = cpg_mssr_deassert,
633 .status = cpg_mssr_status,
634};
635
636static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
637 const struct of_phandle_args *reset_spec)
638{
639 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
640 unsigned int unpacked = reset_spec->args[0];
641 unsigned int idx = MOD_CLK_PACK(unpacked);
642
643 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
644 dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
645 return -EINVAL;
646 }
647
648 return idx;
649}
650
651static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
652{
653 priv->rcdev.ops = &cpg_mssr_reset_ops;
654 priv->rcdev.of_node = priv->dev->of_node;
655 priv->rcdev.of_reset_n_cells = 1;
656 priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
657 priv->rcdev.nr_resets = priv->num_mod_clks;
658 return devm_reset_controller_register(priv->dev, &priv->rcdev);
659}
660
661#else /* !CONFIG_RESET_CONTROLLER */
662static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
663{
664 return 0;
665}
666#endif /* !CONFIG_RESET_CONTROLLER */
667
668
669static const struct of_device_id cpg_mssr_match[] = {
David Brazdil0f672f62019-12-10 10:32:29 +0000670#ifdef CONFIG_CLK_R7S9210
671 {
672 .compatible = "renesas,r7s9210-cpg-mssr",
673 .data = &r7s9210_cpg_mssr_info,
674 },
675#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676#ifdef CONFIG_CLK_R8A7743
677 {
678 .compatible = "renesas,r8a7743-cpg-mssr",
679 .data = &r8a7743_cpg_mssr_info,
680 },
David Brazdil0f672f62019-12-10 10:32:29 +0000681 /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
682 {
683 .compatible = "renesas,r8a7744-cpg-mssr",
684 .data = &r8a7743_cpg_mssr_info,
685 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686#endif
687#ifdef CONFIG_CLK_R8A7745
688 {
689 .compatible = "renesas,r8a7745-cpg-mssr",
690 .data = &r8a7745_cpg_mssr_info,
691 },
692#endif
693#ifdef CONFIG_CLK_R8A77470
694 {
695 .compatible = "renesas,r8a77470-cpg-mssr",
696 .data = &r8a77470_cpg_mssr_info,
697 },
698#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000699#ifdef CONFIG_CLK_R8A774A1
700 {
701 .compatible = "renesas,r8a774a1-cpg-mssr",
702 .data = &r8a774a1_cpg_mssr_info,
703 },
704#endif
705#ifdef CONFIG_CLK_R8A774C0
706 {
707 .compatible = "renesas,r8a774c0-cpg-mssr",
708 .data = &r8a774c0_cpg_mssr_info,
709 },
710#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711#ifdef CONFIG_CLK_R8A7790
712 {
713 .compatible = "renesas,r8a7790-cpg-mssr",
714 .data = &r8a7790_cpg_mssr_info,
715 },
716#endif
717#ifdef CONFIG_CLK_R8A7791
718 {
719 .compatible = "renesas,r8a7791-cpg-mssr",
720 .data = &r8a7791_cpg_mssr_info,
721 },
722 /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
723 {
724 .compatible = "renesas,r8a7793-cpg-mssr",
725 .data = &r8a7791_cpg_mssr_info,
726 },
727#endif
728#ifdef CONFIG_CLK_R8A7792
729 {
730 .compatible = "renesas,r8a7792-cpg-mssr",
731 .data = &r8a7792_cpg_mssr_info,
732 },
733#endif
734#ifdef CONFIG_CLK_R8A7794
735 {
736 .compatible = "renesas,r8a7794-cpg-mssr",
737 .data = &r8a7794_cpg_mssr_info,
738 },
739#endif
740#ifdef CONFIG_CLK_R8A7795
741 {
742 .compatible = "renesas,r8a7795-cpg-mssr",
743 .data = &r8a7795_cpg_mssr_info,
744 },
745#endif
746#ifdef CONFIG_CLK_R8A7796
747 {
748 .compatible = "renesas,r8a7796-cpg-mssr",
749 .data = &r8a7796_cpg_mssr_info,
750 },
751#endif
752#ifdef CONFIG_CLK_R8A77965
753 {
754 .compatible = "renesas,r8a77965-cpg-mssr",
755 .data = &r8a77965_cpg_mssr_info,
756 },
757#endif
758#ifdef CONFIG_CLK_R8A77970
759 {
760 .compatible = "renesas,r8a77970-cpg-mssr",
761 .data = &r8a77970_cpg_mssr_info,
762 },
763#endif
764#ifdef CONFIG_CLK_R8A77980
765 {
766 .compatible = "renesas,r8a77980-cpg-mssr",
767 .data = &r8a77980_cpg_mssr_info,
768 },
769#endif
770#ifdef CONFIG_CLK_R8A77990
771 {
772 .compatible = "renesas,r8a77990-cpg-mssr",
773 .data = &r8a77990_cpg_mssr_info,
774 },
775#endif
776#ifdef CONFIG_CLK_R8A77995
777 {
778 .compatible = "renesas,r8a77995-cpg-mssr",
779 .data = &r8a77995_cpg_mssr_info,
780 },
781#endif
782 { /* sentinel */ }
783};
784
785static void cpg_mssr_del_clk_provider(void *data)
786{
787 of_clk_del_provider(data);
788}
789
790#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
791static int cpg_mssr_suspend_noirq(struct device *dev)
792{
793 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
794 unsigned int reg;
795
796 /* This is the best we can do to check for the presence of PSCI */
797 if (!psci_ops.cpu_suspend)
798 return 0;
799
800 /* Save module registers with bits under our control */
801 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
802 if (priv->smstpcr_saved[reg].mask)
803 priv->smstpcr_saved[reg].val =
804 readl(priv->base + SMSTPCR(reg));
805 }
806
807 /* Save core clocks */
808 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
809
810 return 0;
811}
812
813static int cpg_mssr_resume_noirq(struct device *dev)
814{
815 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
816 unsigned int reg, i;
817 u32 mask, oldval, newval;
818
819 /* This is the best we can do to check for the presence of PSCI */
820 if (!psci_ops.cpu_suspend)
821 return 0;
822
823 /* Restore core clocks */
824 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
825
826 /* Restore module clocks */
827 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
828 mask = priv->smstpcr_saved[reg].mask;
829 if (!mask)
830 continue;
831
David Brazdil0f672f62019-12-10 10:32:29 +0000832 if (priv->stbyctrl)
833 oldval = readb(priv->base + STBCR(reg));
834 else
835 oldval = readl(priv->base + SMSTPCR(reg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000836 newval = oldval & ~mask;
837 newval |= priv->smstpcr_saved[reg].val & mask;
838 if (newval == oldval)
839 continue;
840
David Brazdil0f672f62019-12-10 10:32:29 +0000841 if (priv->stbyctrl) {
842 writeb(newval, priv->base + STBCR(reg));
843 /* dummy read to ensure write has completed */
844 readb(priv->base + STBCR(reg));
845 barrier_data(priv->base + STBCR(reg));
846 continue;
847 } else
848 writel(newval, priv->base + SMSTPCR(reg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000849
850 /* Wait until enabled clocks are really enabled */
851 mask &= ~priv->smstpcr_saved[reg].val;
852 if (!mask)
853 continue;
854
855 for (i = 1000; i > 0; --i) {
856 oldval = readl(priv->base + MSTPSR(reg));
857 if (!(oldval & mask))
858 break;
859 cpu_relax();
860 }
861
862 if (!i)
863 dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
864 priv->base + SMSTPCR(reg), oldval & mask);
865 }
866
867 return 0;
868}
869
870static const struct dev_pm_ops cpg_mssr_pm = {
871 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
872 cpg_mssr_resume_noirq)
873};
874#define DEV_PM_OPS &cpg_mssr_pm
875#else
876#define DEV_PM_OPS NULL
877#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
878
David Brazdil0f672f62019-12-10 10:32:29 +0000879static int __init cpg_mssr_common_init(struct device *dev,
880 struct device_node *np,
881 const struct cpg_mssr_info *info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000883 struct cpg_mssr_priv *priv;
884 unsigned int nclks, i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000885 int error;
886
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000887 if (info->init) {
888 error = info->init(dev);
889 if (error)
890 return error;
891 }
892
David Brazdil0f672f62019-12-10 10:32:29 +0000893 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
894 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895 if (!priv)
896 return -ENOMEM;
897
David Brazdil0f672f62019-12-10 10:32:29 +0000898 priv->np = np;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000899 priv->dev = dev;
900 spin_lock_init(&priv->rmw_lock);
901
David Brazdil0f672f62019-12-10 10:32:29 +0000902 priv->base = of_iomap(np, 0);
903 if (!priv->base) {
904 error = -ENOMEM;
905 goto out_err;
906 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000907
David Brazdil0f672f62019-12-10 10:32:29 +0000908 cpg_mssr_priv = priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000909 priv->num_core_clks = info->num_total_core_clks;
910 priv->num_mod_clks = info->num_hw_mod_clks;
911 priv->last_dt_core_clk = info->last_dt_core_clk;
912 RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
David Brazdil0f672f62019-12-10 10:32:29 +0000913 priv->stbyctrl = info->stbyctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914
915 for (i = 0; i < nclks; i++)
David Brazdil0f672f62019-12-10 10:32:29 +0000916 priv->clks[i] = ERR_PTR(-ENOENT);
917
918 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
919 if (error)
920 goto out_err;
921
922 return 0;
923
924out_err:
925 if (priv->base)
926 iounmap(priv->base);
927 kfree(priv);
928
929 return error;
930}
931
932void __init cpg_mssr_early_init(struct device_node *np,
933 const struct cpg_mssr_info *info)
934{
935 int error;
936 int i;
937
938 error = cpg_mssr_common_init(NULL, np, info);
939 if (error)
940 return;
941
942 for (i = 0; i < info->num_early_core_clks; i++)
943 cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
944 cpg_mssr_priv);
945
946 for (i = 0; i < info->num_early_mod_clks; i++)
947 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
948 cpg_mssr_priv);
949
950}
951
952static int __init cpg_mssr_probe(struct platform_device *pdev)
953{
954 struct device *dev = &pdev->dev;
955 struct device_node *np = dev->of_node;
956 const struct cpg_mssr_info *info;
957 struct cpg_mssr_priv *priv;
958 unsigned int i;
959 int error;
960
961 info = of_device_get_match_data(dev);
962
963 if (!cpg_mssr_priv) {
964 error = cpg_mssr_common_init(dev, dev->of_node, info);
965 if (error)
966 return error;
967 }
968
969 priv = cpg_mssr_priv;
970 priv->dev = dev;
971 dev_set_drvdata(dev, priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000972
973 for (i = 0; i < info->num_core_clks; i++)
974 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
975
976 for (i = 0; i < info->num_mod_clks; i++)
977 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
978
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000979 error = devm_add_action_or_reset(dev,
980 cpg_mssr_del_clk_provider,
981 np);
982 if (error)
983 return error;
984
985 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
986 info->num_core_pm_clks);
987 if (error)
988 return error;
989
David Brazdil0f672f62019-12-10 10:32:29 +0000990 /* Reset Controller not supported for Standby Control SoCs */
991 if (info->stbyctrl)
992 return 0;
993
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000994 error = cpg_mssr_reset_controller_register(priv);
995 if (error)
996 return error;
997
998 return 0;
999}
1000
1001static struct platform_driver cpg_mssr_driver = {
1002 .driver = {
1003 .name = "renesas-cpg-mssr",
1004 .of_match_table = cpg_mssr_match,
1005 .pm = DEV_PM_OPS,
1006 },
1007};
1008
1009static int __init cpg_mssr_init(void)
1010{
1011 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1012}
1013
1014subsys_initcall(cpg_mssr_init);
1015
1016void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1017 unsigned int num_core_clks,
1018 unsigned int first_clk,
1019 unsigned int last_clk)
1020{
1021 unsigned int i;
1022
1023 for (i = 0; i < num_core_clks; i++)
1024 if (core_clks[i].id >= first_clk &&
1025 core_clks[i].id <= last_clk)
1026 core_clks[i].name = NULL;
1027}
1028
1029void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1030 unsigned int num_mod_clks,
1031 const unsigned int *clks, unsigned int n)
1032{
1033 unsigned int i, j;
1034
1035 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1036 if (mod_clks[i].id == clks[j]) {
1037 mod_clks[i].name = NULL;
1038 j++;
1039 }
1040}
1041
1042void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1043 unsigned int num_mod_clks,
1044 const struct mssr_mod_reparent *clks,
1045 unsigned int n)
1046{
1047 unsigned int i, j;
1048
1049 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1050 if (mod_clks[i].id == clks[j].clk) {
1051 mod_clks[i].parent = clks[j].parent;
1052 j++;
1053 }
1054}
1055
1056MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1057MODULE_LICENSE("GPL v2");