Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * System Control and Management Interface (SCMI) Clock Protocol |
| 4 | * |
| 5 | * Copyright (C) 2018 ARM Ltd. |
| 6 | */ |
| 7 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 8 | #include <linux/sort.h> |
| 9 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | #include "common.h" |
| 11 | |
| 12 | enum scmi_clock_protocol_cmd { |
| 13 | CLOCK_ATTRIBUTES = 0x3, |
| 14 | CLOCK_DESCRIBE_RATES = 0x4, |
| 15 | CLOCK_RATE_SET = 0x5, |
| 16 | CLOCK_RATE_GET = 0x6, |
| 17 | CLOCK_CONFIG_SET = 0x7, |
| 18 | }; |
| 19 | |
| 20 | struct scmi_msg_resp_clock_protocol_attributes { |
| 21 | __le16 num_clocks; |
| 22 | u8 max_async_req; |
| 23 | u8 reserved; |
| 24 | }; |
| 25 | |
| 26 | struct scmi_msg_resp_clock_attributes { |
| 27 | __le32 attributes; |
| 28 | #define CLOCK_ENABLE BIT(0) |
| 29 | u8 name[SCMI_MAX_STR_SIZE]; |
| 30 | }; |
| 31 | |
| 32 | struct scmi_clock_set_config { |
| 33 | __le32 id; |
| 34 | __le32 attributes; |
| 35 | }; |
| 36 | |
| 37 | struct scmi_msg_clock_describe_rates { |
| 38 | __le32 id; |
| 39 | __le32 rate_index; |
| 40 | }; |
| 41 | |
| 42 | struct scmi_msg_resp_clock_describe_rates { |
| 43 | __le32 num_rates_flags; |
| 44 | #define NUM_RETURNED(x) ((x) & 0xfff) |
| 45 | #define RATE_DISCRETE(x) !((x) & BIT(12)) |
| 46 | #define NUM_REMAINING(x) ((x) >> 16) |
| 47 | struct { |
| 48 | __le32 value_low; |
| 49 | __le32 value_high; |
| 50 | } rate[0]; |
| 51 | #define RATE_TO_U64(X) \ |
| 52 | ({ \ |
| 53 | typeof(X) x = (X); \ |
| 54 | le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ |
| 55 | }) |
| 56 | }; |
| 57 | |
| 58 | struct scmi_clock_set_rate { |
| 59 | __le32 flags; |
| 60 | #define CLOCK_SET_ASYNC BIT(0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 61 | #define CLOCK_SET_IGNORE_RESP BIT(1) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | #define CLOCK_SET_ROUND_UP BIT(2) |
| 63 | #define CLOCK_SET_ROUND_AUTO BIT(3) |
| 64 | __le32 id; |
| 65 | __le32 value_low; |
| 66 | __le32 value_high; |
| 67 | }; |
| 68 | |
| 69 | struct clock_info { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 70 | u32 version; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | int num_clocks; |
| 72 | int max_async_req; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 73 | atomic_t cur_async_req; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | struct scmi_clock_info *clk; |
| 75 | }; |
| 76 | |
| 77 | static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle, |
| 78 | struct clock_info *ci) |
| 79 | { |
| 80 | int ret; |
| 81 | struct scmi_xfer *t; |
| 82 | struct scmi_msg_resp_clock_protocol_attributes *attr; |
| 83 | |
| 84 | ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, |
| 85 | SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); |
| 86 | if (ret) |
| 87 | return ret; |
| 88 | |
| 89 | attr = t->rx.buf; |
| 90 | |
| 91 | ret = scmi_do_xfer(handle, t); |
| 92 | if (!ret) { |
| 93 | ci->num_clocks = le16_to_cpu(attr->num_clocks); |
| 94 | ci->max_async_req = attr->max_async_req; |
| 95 | } |
| 96 | |
| 97 | scmi_xfer_put(handle, t); |
| 98 | return ret; |
| 99 | } |
| 100 | |
| 101 | static int scmi_clock_attributes_get(const struct scmi_handle *handle, |
| 102 | u32 clk_id, struct scmi_clock_info *clk) |
| 103 | { |
| 104 | int ret; |
| 105 | struct scmi_xfer *t; |
| 106 | struct scmi_msg_resp_clock_attributes *attr; |
| 107 | |
| 108 | ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, |
| 109 | sizeof(clk_id), sizeof(*attr), &t); |
| 110 | if (ret) |
| 111 | return ret; |
| 112 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 113 | put_unaligned_le32(clk_id, t->tx.buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | attr = t->rx.buf; |
| 115 | |
| 116 | ret = scmi_do_xfer(handle, t); |
| 117 | if (!ret) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | else |
| 120 | clk->name[0] = '\0'; |
| 121 | |
| 122 | scmi_xfer_put(handle, t); |
| 123 | return ret; |
| 124 | } |
| 125 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 126 | static int rate_cmp_func(const void *_r1, const void *_r2) |
| 127 | { |
| 128 | const u64 *r1 = _r1, *r2 = _r2; |
| 129 | |
| 130 | if (*r1 < *r2) |
| 131 | return -1; |
| 132 | else if (*r1 == *r2) |
| 133 | return 0; |
| 134 | else |
| 135 | return 1; |
| 136 | } |
| 137 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | static int |
| 139 | scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, |
| 140 | struct scmi_clock_info *clk) |
| 141 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 142 | u64 *rate = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 143 | int ret, cnt; |
| 144 | bool rate_discrete = false; |
| 145 | u32 tot_rate_cnt = 0, rates_flag; |
| 146 | u16 num_returned, num_remaining; |
| 147 | struct scmi_xfer *t; |
| 148 | struct scmi_msg_clock_describe_rates *clk_desc; |
| 149 | struct scmi_msg_resp_clock_describe_rates *rlist; |
| 150 | |
| 151 | ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES, |
| 152 | SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); |
| 153 | if (ret) |
| 154 | return ret; |
| 155 | |
| 156 | clk_desc = t->tx.buf; |
| 157 | rlist = t->rx.buf; |
| 158 | |
| 159 | do { |
| 160 | clk_desc->id = cpu_to_le32(clk_id); |
| 161 | /* Set the number of rates to be skipped/already read */ |
| 162 | clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); |
| 163 | |
| 164 | ret = scmi_do_xfer(handle, t); |
| 165 | if (ret) |
| 166 | goto err; |
| 167 | |
| 168 | rates_flag = le32_to_cpu(rlist->num_rates_flags); |
| 169 | num_remaining = NUM_REMAINING(rates_flag); |
| 170 | rate_discrete = RATE_DISCRETE(rates_flag); |
| 171 | num_returned = NUM_RETURNED(rates_flag); |
| 172 | |
| 173 | if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { |
| 174 | dev_err(handle->dev, "No. of rates > MAX_NUM_RATES"); |
| 175 | break; |
| 176 | } |
| 177 | |
| 178 | if (!rate_discrete) { |
| 179 | clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); |
| 180 | clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); |
| 181 | clk->range.step_size = RATE_TO_U64(rlist->rate[2]); |
| 182 | dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n", |
| 183 | clk->range.min_rate, clk->range.max_rate, |
| 184 | clk->range.step_size); |
| 185 | break; |
| 186 | } |
| 187 | |
| 188 | rate = &clk->list.rates[tot_rate_cnt]; |
| 189 | for (cnt = 0; cnt < num_returned; cnt++, rate++) { |
| 190 | *rate = RATE_TO_U64(rlist->rate[cnt]); |
| 191 | dev_dbg(handle->dev, "Rate %llu Hz\n", *rate); |
| 192 | } |
| 193 | |
| 194 | tot_rate_cnt += num_returned; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 195 | |
| 196 | scmi_reset_rx_to_maxsz(handle, t); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 197 | /* |
| 198 | * check for both returned and remaining to avoid infinite |
| 199 | * loop due to buggy firmware |
| 200 | */ |
| 201 | } while (num_returned && num_remaining); |
| 202 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 203 | if (rate_discrete && rate) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 204 | clk->list.num_rates = tot_rate_cnt; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 205 | sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), |
| 206 | rate_cmp_func, NULL); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 207 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 208 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 209 | clk->rate_discrete = rate_discrete; |
| 210 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 211 | err: |
| 212 | scmi_xfer_put(handle, t); |
| 213 | return ret; |
| 214 | } |
| 215 | |
| 216 | static int |
| 217 | scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) |
| 218 | { |
| 219 | int ret; |
| 220 | struct scmi_xfer *t; |
| 221 | |
| 222 | ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, |
| 223 | sizeof(__le32), sizeof(u64), &t); |
| 224 | if (ret) |
| 225 | return ret; |
| 226 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 227 | put_unaligned_le32(clk_id, t->tx.buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | |
| 229 | ret = scmi_do_xfer(handle, t); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 230 | if (!ret) |
| 231 | *value = get_unaligned_le64(t->rx.buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 232 | |
| 233 | scmi_xfer_put(handle, t); |
| 234 | return ret; |
| 235 | } |
| 236 | |
| 237 | static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 238 | u64 rate) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 239 | { |
| 240 | int ret; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 241 | u32 flags = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | struct scmi_xfer *t; |
| 243 | struct scmi_clock_set_rate *cfg; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 244 | struct clock_info *ci = handle->clk_priv; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | |
| 246 | ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, |
| 247 | sizeof(*cfg), 0, &t); |
| 248 | if (ret) |
| 249 | return ret; |
| 250 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 251 | if (ci->max_async_req && |
| 252 | atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) |
| 253 | flags |= CLOCK_SET_ASYNC; |
| 254 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 255 | cfg = t->tx.buf; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 256 | cfg->flags = cpu_to_le32(flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 257 | cfg->id = cpu_to_le32(clk_id); |
| 258 | cfg->value_low = cpu_to_le32(rate & 0xffffffff); |
| 259 | cfg->value_high = cpu_to_le32(rate >> 32); |
| 260 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 261 | if (flags & CLOCK_SET_ASYNC) |
| 262 | ret = scmi_do_xfer_with_response(handle, t); |
| 263 | else |
| 264 | ret = scmi_do_xfer(handle, t); |
| 265 | |
| 266 | if (ci->max_async_req) |
| 267 | atomic_dec(&ci->cur_async_req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | |
| 269 | scmi_xfer_put(handle, t); |
| 270 | return ret; |
| 271 | } |
| 272 | |
| 273 | static int |
| 274 | scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config) |
| 275 | { |
| 276 | int ret; |
| 277 | struct scmi_xfer *t; |
| 278 | struct scmi_clock_set_config *cfg; |
| 279 | |
| 280 | ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, |
| 281 | sizeof(*cfg), 0, &t); |
| 282 | if (ret) |
| 283 | return ret; |
| 284 | |
| 285 | cfg = t->tx.buf; |
| 286 | cfg->id = cpu_to_le32(clk_id); |
| 287 | cfg->attributes = cpu_to_le32(config); |
| 288 | |
| 289 | ret = scmi_do_xfer(handle, t); |
| 290 | |
| 291 | scmi_xfer_put(handle, t); |
| 292 | return ret; |
| 293 | } |
| 294 | |
| 295 | static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id) |
| 296 | { |
| 297 | return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE); |
| 298 | } |
| 299 | |
| 300 | static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id) |
| 301 | { |
| 302 | return scmi_clock_config_set(handle, clk_id, 0); |
| 303 | } |
| 304 | |
| 305 | static int scmi_clock_count_get(const struct scmi_handle *handle) |
| 306 | { |
| 307 | struct clock_info *ci = handle->clk_priv; |
| 308 | |
| 309 | return ci->num_clocks; |
| 310 | } |
| 311 | |
| 312 | static const struct scmi_clock_info * |
| 313 | scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) |
| 314 | { |
| 315 | struct clock_info *ci = handle->clk_priv; |
| 316 | struct scmi_clock_info *clk = ci->clk + clk_id; |
| 317 | |
| 318 | if (!clk->name[0]) |
| 319 | return NULL; |
| 320 | |
| 321 | return clk; |
| 322 | } |
| 323 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 324 | static const struct scmi_clk_ops clk_ops = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 325 | .count_get = scmi_clock_count_get, |
| 326 | .info_get = scmi_clock_info_get, |
| 327 | .rate_get = scmi_clock_rate_get, |
| 328 | .rate_set = scmi_clock_rate_set, |
| 329 | .enable = scmi_clock_enable, |
| 330 | .disable = scmi_clock_disable, |
| 331 | }; |
| 332 | |
| 333 | static int scmi_clock_protocol_init(struct scmi_handle *handle) |
| 334 | { |
| 335 | u32 version; |
| 336 | int clkid, ret; |
| 337 | struct clock_info *cinfo; |
| 338 | |
| 339 | scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version); |
| 340 | |
| 341 | dev_dbg(handle->dev, "Clock Version %d.%d\n", |
| 342 | PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); |
| 343 | |
| 344 | cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL); |
| 345 | if (!cinfo) |
| 346 | return -ENOMEM; |
| 347 | |
| 348 | scmi_clock_protocol_attributes_get(handle, cinfo); |
| 349 | |
| 350 | cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks, |
| 351 | sizeof(*cinfo->clk), GFP_KERNEL); |
| 352 | if (!cinfo->clk) |
| 353 | return -ENOMEM; |
| 354 | |
| 355 | for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { |
| 356 | struct scmi_clock_info *clk = cinfo->clk + clkid; |
| 357 | |
| 358 | ret = scmi_clock_attributes_get(handle, clkid, clk); |
| 359 | if (!ret) |
| 360 | scmi_clock_describe_rates_get(handle, clkid, clk); |
| 361 | } |
| 362 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 363 | cinfo->version = version; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 364 | handle->clk_ops = &clk_ops; |
| 365 | handle->clk_priv = cinfo; |
| 366 | |
| 367 | return 0; |
| 368 | } |
| 369 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 370 | DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock) |