Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * System Control and Management Interface (SCMI) Clock Protocol |
| 4 | * |
| 5 | * Copyright (C) 2018 ARM Ltd. |
| 6 | */ |
| 7 | |
| 8 | #include "common.h" |
| 9 | |
| 10 | enum scmi_clock_protocol_cmd { |
| 11 | CLOCK_ATTRIBUTES = 0x3, |
| 12 | CLOCK_DESCRIBE_RATES = 0x4, |
| 13 | CLOCK_RATE_SET = 0x5, |
| 14 | CLOCK_RATE_GET = 0x6, |
| 15 | CLOCK_CONFIG_SET = 0x7, |
| 16 | }; |
| 17 | |
| 18 | struct scmi_msg_resp_clock_protocol_attributes { |
| 19 | __le16 num_clocks; |
| 20 | u8 max_async_req; |
| 21 | u8 reserved; |
| 22 | }; |
| 23 | |
| 24 | struct scmi_msg_resp_clock_attributes { |
| 25 | __le32 attributes; |
| 26 | #define CLOCK_ENABLE BIT(0) |
| 27 | u8 name[SCMI_MAX_STR_SIZE]; |
| 28 | }; |
| 29 | |
| 30 | struct scmi_clock_set_config { |
| 31 | __le32 id; |
| 32 | __le32 attributes; |
| 33 | }; |
| 34 | |
| 35 | struct scmi_msg_clock_describe_rates { |
| 36 | __le32 id; |
| 37 | __le32 rate_index; |
| 38 | }; |
| 39 | |
| 40 | struct scmi_msg_resp_clock_describe_rates { |
| 41 | __le32 num_rates_flags; |
| 42 | #define NUM_RETURNED(x) ((x) & 0xfff) |
| 43 | #define RATE_DISCRETE(x) !((x) & BIT(12)) |
| 44 | #define NUM_REMAINING(x) ((x) >> 16) |
| 45 | struct { |
| 46 | __le32 value_low; |
| 47 | __le32 value_high; |
| 48 | } rate[0]; |
| 49 | #define RATE_TO_U64(X) \ |
| 50 | ({ \ |
| 51 | typeof(X) x = (X); \ |
| 52 | le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ |
| 53 | }) |
| 54 | }; |
| 55 | |
| 56 | struct scmi_clock_set_rate { |
| 57 | __le32 flags; |
| 58 | #define CLOCK_SET_ASYNC BIT(0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 59 | #define CLOCK_SET_IGNORE_RESP BIT(1) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | #define CLOCK_SET_ROUND_UP BIT(2) |
| 61 | #define CLOCK_SET_ROUND_AUTO BIT(3) |
| 62 | __le32 id; |
| 63 | __le32 value_low; |
| 64 | __le32 value_high; |
| 65 | }; |
| 66 | |
| 67 | struct clock_info { |
| 68 | int num_clocks; |
| 69 | int max_async_req; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 70 | atomic_t cur_async_req; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | struct scmi_clock_info *clk; |
| 72 | }; |
| 73 | |
| 74 | static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle, |
| 75 | struct clock_info *ci) |
| 76 | { |
| 77 | int ret; |
| 78 | struct scmi_xfer *t; |
| 79 | struct scmi_msg_resp_clock_protocol_attributes *attr; |
| 80 | |
| 81 | ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, |
| 82 | SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); |
| 83 | if (ret) |
| 84 | return ret; |
| 85 | |
| 86 | attr = t->rx.buf; |
| 87 | |
| 88 | ret = scmi_do_xfer(handle, t); |
| 89 | if (!ret) { |
| 90 | ci->num_clocks = le16_to_cpu(attr->num_clocks); |
| 91 | ci->max_async_req = attr->max_async_req; |
| 92 | } |
| 93 | |
| 94 | scmi_xfer_put(handle, t); |
| 95 | return ret; |
| 96 | } |
| 97 | |
| 98 | static int scmi_clock_attributes_get(const struct scmi_handle *handle, |
| 99 | u32 clk_id, struct scmi_clock_info *clk) |
| 100 | { |
| 101 | int ret; |
| 102 | struct scmi_xfer *t; |
| 103 | struct scmi_msg_resp_clock_attributes *attr; |
| 104 | |
| 105 | ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, |
| 106 | sizeof(clk_id), sizeof(*attr), &t); |
| 107 | if (ret) |
| 108 | return ret; |
| 109 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 110 | put_unaligned_le32(clk_id, t->tx.buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | attr = t->rx.buf; |
| 112 | |
| 113 | ret = scmi_do_xfer(handle, t); |
| 114 | if (!ret) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 115 | strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | else |
| 117 | clk->name[0] = '\0'; |
| 118 | |
| 119 | scmi_xfer_put(handle, t); |
| 120 | return ret; |
| 121 | } |
| 122 | |
| 123 | static int |
| 124 | scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, |
| 125 | struct scmi_clock_info *clk) |
| 126 | { |
| 127 | u64 *rate; |
| 128 | int ret, cnt; |
| 129 | bool rate_discrete = false; |
| 130 | u32 tot_rate_cnt = 0, rates_flag; |
| 131 | u16 num_returned, num_remaining; |
| 132 | struct scmi_xfer *t; |
| 133 | struct scmi_msg_clock_describe_rates *clk_desc; |
| 134 | struct scmi_msg_resp_clock_describe_rates *rlist; |
| 135 | |
| 136 | ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES, |
| 137 | SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); |
| 138 | if (ret) |
| 139 | return ret; |
| 140 | |
| 141 | clk_desc = t->tx.buf; |
| 142 | rlist = t->rx.buf; |
| 143 | |
| 144 | do { |
| 145 | clk_desc->id = cpu_to_le32(clk_id); |
| 146 | /* Set the number of rates to be skipped/already read */ |
| 147 | clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); |
| 148 | |
| 149 | ret = scmi_do_xfer(handle, t); |
| 150 | if (ret) |
| 151 | goto err; |
| 152 | |
| 153 | rates_flag = le32_to_cpu(rlist->num_rates_flags); |
| 154 | num_remaining = NUM_REMAINING(rates_flag); |
| 155 | rate_discrete = RATE_DISCRETE(rates_flag); |
| 156 | num_returned = NUM_RETURNED(rates_flag); |
| 157 | |
| 158 | if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { |
| 159 | dev_err(handle->dev, "No. of rates > MAX_NUM_RATES"); |
| 160 | break; |
| 161 | } |
| 162 | |
| 163 | if (!rate_discrete) { |
| 164 | clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); |
| 165 | clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); |
| 166 | clk->range.step_size = RATE_TO_U64(rlist->rate[2]); |
| 167 | dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n", |
| 168 | clk->range.min_rate, clk->range.max_rate, |
| 169 | clk->range.step_size); |
| 170 | break; |
| 171 | } |
| 172 | |
| 173 | rate = &clk->list.rates[tot_rate_cnt]; |
| 174 | for (cnt = 0; cnt < num_returned; cnt++, rate++) { |
| 175 | *rate = RATE_TO_U64(rlist->rate[cnt]); |
| 176 | dev_dbg(handle->dev, "Rate %llu Hz\n", *rate); |
| 177 | } |
| 178 | |
| 179 | tot_rate_cnt += num_returned; |
| 180 | /* |
| 181 | * check for both returned and remaining to avoid infinite |
| 182 | * loop due to buggy firmware |
| 183 | */ |
| 184 | } while (num_returned && num_remaining); |
| 185 | |
| 186 | if (rate_discrete) |
| 187 | clk->list.num_rates = tot_rate_cnt; |
| 188 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 189 | clk->rate_discrete = rate_discrete; |
| 190 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 191 | err: |
| 192 | scmi_xfer_put(handle, t); |
| 193 | return ret; |
| 194 | } |
| 195 | |
| 196 | static int |
| 197 | scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) |
| 198 | { |
| 199 | int ret; |
| 200 | struct scmi_xfer *t; |
| 201 | |
| 202 | ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, |
| 203 | sizeof(__le32), sizeof(u64), &t); |
| 204 | if (ret) |
| 205 | return ret; |
| 206 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 207 | put_unaligned_le32(clk_id, t->tx.buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 208 | |
| 209 | ret = scmi_do_xfer(handle, t); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 210 | if (!ret) |
| 211 | *value = get_unaligned_le64(t->rx.buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 212 | |
| 213 | scmi_xfer_put(handle, t); |
| 214 | return ret; |
| 215 | } |
| 216 | |
| 217 | static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 218 | u64 rate) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 219 | { |
| 220 | int ret; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 221 | u32 flags = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | struct scmi_xfer *t; |
| 223 | struct scmi_clock_set_rate *cfg; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 224 | struct clock_info *ci = handle->clk_priv; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 225 | |
| 226 | ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, |
| 227 | sizeof(*cfg), 0, &t); |
| 228 | if (ret) |
| 229 | return ret; |
| 230 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 231 | if (ci->max_async_req && |
| 232 | atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) |
| 233 | flags |= CLOCK_SET_ASYNC; |
| 234 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 235 | cfg = t->tx.buf; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 236 | cfg->flags = cpu_to_le32(flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 237 | cfg->id = cpu_to_le32(clk_id); |
| 238 | cfg->value_low = cpu_to_le32(rate & 0xffffffff); |
| 239 | cfg->value_high = cpu_to_le32(rate >> 32); |
| 240 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 241 | if (flags & CLOCK_SET_ASYNC) |
| 242 | ret = scmi_do_xfer_with_response(handle, t); |
| 243 | else |
| 244 | ret = scmi_do_xfer(handle, t); |
| 245 | |
| 246 | if (ci->max_async_req) |
| 247 | atomic_dec(&ci->cur_async_req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 248 | |
| 249 | scmi_xfer_put(handle, t); |
| 250 | return ret; |
| 251 | } |
| 252 | |
| 253 | static int |
| 254 | scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config) |
| 255 | { |
| 256 | int ret; |
| 257 | struct scmi_xfer *t; |
| 258 | struct scmi_clock_set_config *cfg; |
| 259 | |
| 260 | ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, |
| 261 | sizeof(*cfg), 0, &t); |
| 262 | if (ret) |
| 263 | return ret; |
| 264 | |
| 265 | cfg = t->tx.buf; |
| 266 | cfg->id = cpu_to_le32(clk_id); |
| 267 | cfg->attributes = cpu_to_le32(config); |
| 268 | |
| 269 | ret = scmi_do_xfer(handle, t); |
| 270 | |
| 271 | scmi_xfer_put(handle, t); |
| 272 | return ret; |
| 273 | } |
| 274 | |
| 275 | static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id) |
| 276 | { |
| 277 | return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE); |
| 278 | } |
| 279 | |
| 280 | static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id) |
| 281 | { |
| 282 | return scmi_clock_config_set(handle, clk_id, 0); |
| 283 | } |
| 284 | |
| 285 | static int scmi_clock_count_get(const struct scmi_handle *handle) |
| 286 | { |
| 287 | struct clock_info *ci = handle->clk_priv; |
| 288 | |
| 289 | return ci->num_clocks; |
| 290 | } |
| 291 | |
| 292 | static const struct scmi_clock_info * |
| 293 | scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) |
| 294 | { |
| 295 | struct clock_info *ci = handle->clk_priv; |
| 296 | struct scmi_clock_info *clk = ci->clk + clk_id; |
| 297 | |
| 298 | if (!clk->name[0]) |
| 299 | return NULL; |
| 300 | |
| 301 | return clk; |
| 302 | } |
| 303 | |
| 304 | static struct scmi_clk_ops clk_ops = { |
| 305 | .count_get = scmi_clock_count_get, |
| 306 | .info_get = scmi_clock_info_get, |
| 307 | .rate_get = scmi_clock_rate_get, |
| 308 | .rate_set = scmi_clock_rate_set, |
| 309 | .enable = scmi_clock_enable, |
| 310 | .disable = scmi_clock_disable, |
| 311 | }; |
| 312 | |
| 313 | static int scmi_clock_protocol_init(struct scmi_handle *handle) |
| 314 | { |
| 315 | u32 version; |
| 316 | int clkid, ret; |
| 317 | struct clock_info *cinfo; |
| 318 | |
| 319 | scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version); |
| 320 | |
| 321 | dev_dbg(handle->dev, "Clock Version %d.%d\n", |
| 322 | PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); |
| 323 | |
| 324 | cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL); |
| 325 | if (!cinfo) |
| 326 | return -ENOMEM; |
| 327 | |
| 328 | scmi_clock_protocol_attributes_get(handle, cinfo); |
| 329 | |
| 330 | cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks, |
| 331 | sizeof(*cinfo->clk), GFP_KERNEL); |
| 332 | if (!cinfo->clk) |
| 333 | return -ENOMEM; |
| 334 | |
| 335 | for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { |
| 336 | struct scmi_clock_info *clk = cinfo->clk + clkid; |
| 337 | |
| 338 | ret = scmi_clock_attributes_get(handle, clkid, clk); |
| 339 | if (!ret) |
| 340 | scmi_clock_describe_rates_get(handle, clkid, clk); |
| 341 | } |
| 342 | |
| 343 | handle->clk_ops = &clk_ops; |
| 344 | handle->clk_priv = cinfo; |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | static int __init scmi_clock_init(void) |
| 350 | { |
| 351 | return scmi_protocol_register(SCMI_PROTOCOL_CLOCK, |
| 352 | &scmi_clock_protocol_init); |
| 353 | } |
| 354 | subsys_initcall(scmi_clock_init); |