David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> |
| 4 | * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> |
| 5 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | * Gated clock implementation |
| 7 | */ |
| 8 | |
| 9 | #include <linux/clk-provider.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/string.h> |
| 15 | |
| 16 | /** |
| 17 | * DOC: basic gatable clock which can gate and ungate it's ouput |
| 18 | * |
| 19 | * Traits of this clock: |
| 20 | * prepare - clk_(un)prepare only ensures parent is (un)prepared |
| 21 | * enable - clk_enable and clk_disable are functional & control gating |
| 22 | * rate - inherits rate from parent. No clk_set_rate support |
| 23 | * parent - fixed parent. No clk_set_parent support |
| 24 | */ |
| 25 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 26 | static inline u32 clk_gate_readl(struct clk_gate *gate) |
| 27 | { |
| 28 | if (gate->flags & CLK_GATE_BIG_ENDIAN) |
| 29 | return ioread32be(gate->reg); |
| 30 | |
| 31 | return readl(gate->reg); |
| 32 | } |
| 33 | |
| 34 | static inline void clk_gate_writel(struct clk_gate *gate, u32 val) |
| 35 | { |
| 36 | if (gate->flags & CLK_GATE_BIG_ENDIAN) |
| 37 | iowrite32be(val, gate->reg); |
| 38 | else |
| 39 | writel(val, gate->reg); |
| 40 | } |
| 41 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | /* |
| 43 | * It works on following logic: |
| 44 | * |
| 45 | * For enabling clock, enable = 1 |
| 46 | * set2dis = 1 -> clear bit -> set = 0 |
| 47 | * set2dis = 0 -> set bit -> set = 1 |
| 48 | * |
| 49 | * For disabling clock, enable = 0 |
| 50 | * set2dis = 1 -> set bit -> set = 1 |
| 51 | * set2dis = 0 -> clear bit -> set = 0 |
| 52 | * |
| 53 | * So, result is always: enable xor set2dis. |
| 54 | */ |
| 55 | static void clk_gate_endisable(struct clk_hw *hw, int enable) |
| 56 | { |
| 57 | struct clk_gate *gate = to_clk_gate(hw); |
| 58 | int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | unsigned long flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | u32 reg; |
| 61 | |
| 62 | set ^= enable; |
| 63 | |
| 64 | if (gate->lock) |
| 65 | spin_lock_irqsave(gate->lock, flags); |
| 66 | else |
| 67 | __acquire(gate->lock); |
| 68 | |
| 69 | if (gate->flags & CLK_GATE_HIWORD_MASK) { |
| 70 | reg = BIT(gate->bit_idx + 16); |
| 71 | if (set) |
| 72 | reg |= BIT(gate->bit_idx); |
| 73 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 74 | reg = clk_gate_readl(gate); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | |
| 76 | if (set) |
| 77 | reg |= BIT(gate->bit_idx); |
| 78 | else |
| 79 | reg &= ~BIT(gate->bit_idx); |
| 80 | } |
| 81 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 82 | clk_gate_writel(gate, reg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 83 | |
| 84 | if (gate->lock) |
| 85 | spin_unlock_irqrestore(gate->lock, flags); |
| 86 | else |
| 87 | __release(gate->lock); |
| 88 | } |
| 89 | |
| 90 | static int clk_gate_enable(struct clk_hw *hw) |
| 91 | { |
| 92 | clk_gate_endisable(hw, 1); |
| 93 | |
| 94 | return 0; |
| 95 | } |
| 96 | |
| 97 | static void clk_gate_disable(struct clk_hw *hw) |
| 98 | { |
| 99 | clk_gate_endisable(hw, 0); |
| 100 | } |
| 101 | |
| 102 | int clk_gate_is_enabled(struct clk_hw *hw) |
| 103 | { |
| 104 | u32 reg; |
| 105 | struct clk_gate *gate = to_clk_gate(hw); |
| 106 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 107 | reg = clk_gate_readl(gate); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | |
| 109 | /* if a set bit disables this clk, flip it before masking */ |
| 110 | if (gate->flags & CLK_GATE_SET_TO_DISABLE) |
| 111 | reg ^= BIT(gate->bit_idx); |
| 112 | |
| 113 | reg &= BIT(gate->bit_idx); |
| 114 | |
| 115 | return reg ? 1 : 0; |
| 116 | } |
| 117 | EXPORT_SYMBOL_GPL(clk_gate_is_enabled); |
| 118 | |
| 119 | const struct clk_ops clk_gate_ops = { |
| 120 | .enable = clk_gate_enable, |
| 121 | .disable = clk_gate_disable, |
| 122 | .is_enabled = clk_gate_is_enabled, |
| 123 | }; |
| 124 | EXPORT_SYMBOL_GPL(clk_gate_ops); |
| 125 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 126 | struct clk_hw *__clk_hw_register_gate(struct device *dev, |
| 127 | struct device_node *np, const char *name, |
| 128 | const char *parent_name, const struct clk_hw *parent_hw, |
| 129 | const struct clk_parent_data *parent_data, |
| 130 | unsigned long flags, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 131 | void __iomem *reg, u8 bit_idx, |
| 132 | u8 clk_gate_flags, spinlock_t *lock) |
| 133 | { |
| 134 | struct clk_gate *gate; |
| 135 | struct clk_hw *hw; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 136 | struct clk_init_data init = {}; |
| 137 | int ret = -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | |
| 139 | if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { |
| 140 | if (bit_idx > 15) { |
| 141 | pr_err("gate bit exceeds LOWORD field\n"); |
| 142 | return ERR_PTR(-EINVAL); |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | /* allocate the gate */ |
| 147 | gate = kzalloc(sizeof(*gate), GFP_KERNEL); |
| 148 | if (!gate) |
| 149 | return ERR_PTR(-ENOMEM); |
| 150 | |
| 151 | init.name = name; |
| 152 | init.ops = &clk_gate_ops; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 153 | init.flags = flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 154 | init.parent_names = parent_name ? &parent_name : NULL; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 155 | init.parent_hws = parent_hw ? &parent_hw : NULL; |
| 156 | init.parent_data = parent_data; |
| 157 | if (parent_name || parent_hw || parent_data) |
| 158 | init.num_parents = 1; |
| 159 | else |
| 160 | init.num_parents = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 161 | |
| 162 | /* struct clk_gate assignments */ |
| 163 | gate->reg = reg; |
| 164 | gate->bit_idx = bit_idx; |
| 165 | gate->flags = clk_gate_flags; |
| 166 | gate->lock = lock; |
| 167 | gate->hw.init = &init; |
| 168 | |
| 169 | hw = &gate->hw; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 170 | if (dev || !np) |
| 171 | ret = clk_hw_register(dev, hw); |
| 172 | else if (np) |
| 173 | ret = of_clk_hw_register(np, hw); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 174 | if (ret) { |
| 175 | kfree(gate); |
| 176 | hw = ERR_PTR(ret); |
| 177 | } |
| 178 | |
| 179 | return hw; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 180 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 181 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 182 | EXPORT_SYMBOL_GPL(__clk_hw_register_gate); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 183 | |
| 184 | struct clk *clk_register_gate(struct device *dev, const char *name, |
| 185 | const char *parent_name, unsigned long flags, |
| 186 | void __iomem *reg, u8 bit_idx, |
| 187 | u8 clk_gate_flags, spinlock_t *lock) |
| 188 | { |
| 189 | struct clk_hw *hw; |
| 190 | |
| 191 | hw = clk_hw_register_gate(dev, name, parent_name, flags, reg, |
| 192 | bit_idx, clk_gate_flags, lock); |
| 193 | if (IS_ERR(hw)) |
| 194 | return ERR_CAST(hw); |
| 195 | return hw->clk; |
| 196 | } |
| 197 | EXPORT_SYMBOL_GPL(clk_register_gate); |
| 198 | |
| 199 | void clk_unregister_gate(struct clk *clk) |
| 200 | { |
| 201 | struct clk_gate *gate; |
| 202 | struct clk_hw *hw; |
| 203 | |
| 204 | hw = __clk_get_hw(clk); |
| 205 | if (!hw) |
| 206 | return; |
| 207 | |
| 208 | gate = to_clk_gate(hw); |
| 209 | |
| 210 | clk_unregister(clk); |
| 211 | kfree(gate); |
| 212 | } |
| 213 | EXPORT_SYMBOL_GPL(clk_unregister_gate); |
| 214 | |
| 215 | void clk_hw_unregister_gate(struct clk_hw *hw) |
| 216 | { |
| 217 | struct clk_gate *gate; |
| 218 | |
| 219 | gate = to_clk_gate(hw); |
| 220 | |
| 221 | clk_hw_unregister(hw); |
| 222 | kfree(gate); |
| 223 | } |
| 224 | EXPORT_SYMBOL_GPL(clk_hw_unregister_gate); |