David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // |
| 3 | // Register map access API - debugfs |
| 4 | // |
| 5 | // Copyright 2011 Wolfson Microelectronics plc |
| 6 | // |
| 7 | // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/mutex.h> |
| 11 | #include <linux/debugfs.h> |
| 12 | #include <linux/uaccess.h> |
| 13 | #include <linux/device.h> |
| 14 | #include <linux/list.h> |
| 15 | |
| 16 | #include "internal.h" |
| 17 | |
| 18 | struct regmap_debugfs_node { |
| 19 | struct regmap *map; |
| 20 | const char *name; |
| 21 | struct list_head link; |
| 22 | }; |
| 23 | |
| 24 | static unsigned int dummy_index; |
| 25 | static struct dentry *regmap_debugfs_root; |
| 26 | static LIST_HEAD(regmap_debugfs_early_list); |
| 27 | static DEFINE_MUTEX(regmap_debugfs_early_lock); |
| 28 | |
| 29 | /* Calculate the length of a fixed format */ |
| 30 | static size_t regmap_calc_reg_len(int max_val) |
| 31 | { |
| 32 | return snprintf(NULL, 0, "%x", max_val); |
| 33 | } |
| 34 | |
| 35 | static ssize_t regmap_name_read_file(struct file *file, |
| 36 | char __user *user_buf, size_t count, |
| 37 | loff_t *ppos) |
| 38 | { |
| 39 | struct regmap *map = file->private_data; |
| 40 | const char *name = "nodev"; |
| 41 | int ret; |
| 42 | char *buf; |
| 43 | |
| 44 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 45 | if (!buf) |
| 46 | return -ENOMEM; |
| 47 | |
| 48 | if (map->dev && map->dev->driver) |
| 49 | name = map->dev->driver->name; |
| 50 | |
| 51 | ret = snprintf(buf, PAGE_SIZE, "%s\n", name); |
| 52 | if (ret < 0) { |
| 53 | kfree(buf); |
| 54 | return ret; |
| 55 | } |
| 56 | |
| 57 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
| 58 | kfree(buf); |
| 59 | return ret; |
| 60 | } |
| 61 | |
| 62 | static const struct file_operations regmap_name_fops = { |
| 63 | .open = simple_open, |
| 64 | .read = regmap_name_read_file, |
| 65 | .llseek = default_llseek, |
| 66 | }; |
| 67 | |
| 68 | static void regmap_debugfs_free_dump_cache(struct regmap *map) |
| 69 | { |
| 70 | struct regmap_debugfs_off_cache *c; |
| 71 | |
| 72 | while (!list_empty(&map->debugfs_off_cache)) { |
| 73 | c = list_first_entry(&map->debugfs_off_cache, |
| 74 | struct regmap_debugfs_off_cache, |
| 75 | list); |
| 76 | list_del(&c->list); |
| 77 | kfree(c); |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | static bool regmap_printable(struct regmap *map, unsigned int reg) |
| 82 | { |
| 83 | if (regmap_precious(map, reg)) |
| 84 | return false; |
| 85 | |
| 86 | if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) |
| 87 | return false; |
| 88 | |
| 89 | return true; |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * Work out where the start offset maps into register numbers, bearing |
| 94 | * in mind that we suppress hidden registers. |
| 95 | */ |
| 96 | static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, |
| 97 | unsigned int base, |
| 98 | loff_t from, |
| 99 | loff_t *pos) |
| 100 | { |
| 101 | struct regmap_debugfs_off_cache *c = NULL; |
| 102 | loff_t p = 0; |
| 103 | unsigned int i, ret; |
| 104 | unsigned int fpos_offset; |
| 105 | unsigned int reg_offset; |
| 106 | |
| 107 | /* Suppress the cache if we're using a subrange */ |
| 108 | if (base) |
| 109 | return base; |
| 110 | |
| 111 | /* |
| 112 | * If we don't have a cache build one so we don't have to do a |
| 113 | * linear scan each time. |
| 114 | */ |
| 115 | mutex_lock(&map->cache_lock); |
| 116 | i = base; |
| 117 | if (list_empty(&map->debugfs_off_cache)) { |
| 118 | for (; i <= map->max_register; i += map->reg_stride) { |
| 119 | /* Skip unprinted registers, closing off cache entry */ |
| 120 | if (!regmap_printable(map, i)) { |
| 121 | if (c) { |
| 122 | c->max = p - 1; |
| 123 | c->max_reg = i - map->reg_stride; |
| 124 | list_add_tail(&c->list, |
| 125 | &map->debugfs_off_cache); |
| 126 | c = NULL; |
| 127 | } |
| 128 | |
| 129 | continue; |
| 130 | } |
| 131 | |
| 132 | /* No cache entry? Start a new one */ |
| 133 | if (!c) { |
| 134 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
| 135 | if (!c) { |
| 136 | regmap_debugfs_free_dump_cache(map); |
| 137 | mutex_unlock(&map->cache_lock); |
| 138 | return base; |
| 139 | } |
| 140 | c->min = p; |
| 141 | c->base_reg = i; |
| 142 | } |
| 143 | |
| 144 | p += map->debugfs_tot_len; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /* Close the last entry off if we didn't scan beyond it */ |
| 149 | if (c) { |
| 150 | c->max = p - 1; |
| 151 | c->max_reg = i - map->reg_stride; |
| 152 | list_add_tail(&c->list, |
| 153 | &map->debugfs_off_cache); |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * This should never happen; we return above if we fail to |
| 158 | * allocate and we should never be in this code if there are |
| 159 | * no registers at all. |
| 160 | */ |
| 161 | WARN_ON(list_empty(&map->debugfs_off_cache)); |
| 162 | ret = base; |
| 163 | |
| 164 | /* Find the relevant block:offset */ |
| 165 | list_for_each_entry(c, &map->debugfs_off_cache, list) { |
| 166 | if (from >= c->min && from <= c->max) { |
| 167 | fpos_offset = from - c->min; |
| 168 | reg_offset = fpos_offset / map->debugfs_tot_len; |
| 169 | *pos = c->min + (reg_offset * map->debugfs_tot_len); |
| 170 | mutex_unlock(&map->cache_lock); |
| 171 | return c->base_reg + (reg_offset * map->reg_stride); |
| 172 | } |
| 173 | |
| 174 | *pos = c->max; |
| 175 | ret = c->max_reg; |
| 176 | } |
| 177 | mutex_unlock(&map->cache_lock); |
| 178 | |
| 179 | return ret; |
| 180 | } |
| 181 | |
| 182 | static inline void regmap_calc_tot_len(struct regmap *map, |
| 183 | void *buf, size_t count) |
| 184 | { |
| 185 | /* Calculate the length of a fixed format */ |
| 186 | if (!map->debugfs_tot_len) { |
| 187 | map->debugfs_reg_len = regmap_calc_reg_len(map->max_register), |
| 188 | map->debugfs_val_len = 2 * map->format.val_bytes; |
| 189 | map->debugfs_tot_len = map->debugfs_reg_len + |
| 190 | map->debugfs_val_len + 3; /* : \n */ |
| 191 | } |
| 192 | } |
| 193 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 194 | static int regmap_next_readable_reg(struct regmap *map, int reg) |
| 195 | { |
| 196 | struct regmap_debugfs_off_cache *c; |
| 197 | int ret = -EINVAL; |
| 198 | |
| 199 | if (regmap_printable(map, reg + map->reg_stride)) { |
| 200 | ret = reg + map->reg_stride; |
| 201 | } else { |
| 202 | mutex_lock(&map->cache_lock); |
| 203 | list_for_each_entry(c, &map->debugfs_off_cache, list) { |
| 204 | if (reg > c->max_reg) |
| 205 | continue; |
| 206 | if (reg < c->base_reg) { |
| 207 | ret = c->base_reg; |
| 208 | break; |
| 209 | } |
| 210 | } |
| 211 | mutex_unlock(&map->cache_lock); |
| 212 | } |
| 213 | return ret; |
| 214 | } |
| 215 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 216 | static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, |
| 217 | unsigned int to, char __user *user_buf, |
| 218 | size_t count, loff_t *ppos) |
| 219 | { |
| 220 | size_t buf_pos = 0; |
| 221 | loff_t p = *ppos; |
| 222 | ssize_t ret; |
| 223 | int i; |
| 224 | char *buf; |
| 225 | unsigned int val, start_reg; |
| 226 | |
| 227 | if (*ppos < 0 || !count) |
| 228 | return -EINVAL; |
| 229 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 230 | if (count > (PAGE_SIZE << (MAX_ORDER - 1))) |
| 231 | count = PAGE_SIZE << (MAX_ORDER - 1); |
| 232 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 233 | buf = kmalloc(count, GFP_KERNEL); |
| 234 | if (!buf) |
| 235 | return -ENOMEM; |
| 236 | |
| 237 | regmap_calc_tot_len(map, buf, count); |
| 238 | |
| 239 | /* Work out which register we're starting at */ |
| 240 | start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); |
| 241 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 242 | for (i = start_reg; i >= 0 && i <= to; |
| 243 | i = regmap_next_readable_reg(map, i)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 244 | |
| 245 | /* If we're in the region the user is trying to read */ |
| 246 | if (p >= *ppos) { |
| 247 | /* ...but not beyond it */ |
| 248 | if (buf_pos + map->debugfs_tot_len > count) |
| 249 | break; |
| 250 | |
| 251 | /* Format the register */ |
| 252 | snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", |
| 253 | map->debugfs_reg_len, i - from); |
| 254 | buf_pos += map->debugfs_reg_len + 2; |
| 255 | |
| 256 | /* Format the value, write all X if we can't read */ |
| 257 | ret = regmap_read(map, i, &val); |
| 258 | if (ret == 0) |
| 259 | snprintf(buf + buf_pos, count - buf_pos, |
| 260 | "%.*x", map->debugfs_val_len, val); |
| 261 | else |
| 262 | memset(buf + buf_pos, 'X', |
| 263 | map->debugfs_val_len); |
| 264 | buf_pos += 2 * map->format.val_bytes; |
| 265 | |
| 266 | buf[buf_pos++] = '\n'; |
| 267 | } |
| 268 | p += map->debugfs_tot_len; |
| 269 | } |
| 270 | |
| 271 | ret = buf_pos; |
| 272 | |
| 273 | if (copy_to_user(user_buf, buf, buf_pos)) { |
| 274 | ret = -EFAULT; |
| 275 | goto out; |
| 276 | } |
| 277 | |
| 278 | *ppos += buf_pos; |
| 279 | |
| 280 | out: |
| 281 | kfree(buf); |
| 282 | return ret; |
| 283 | } |
| 284 | |
| 285 | static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, |
| 286 | size_t count, loff_t *ppos) |
| 287 | { |
| 288 | struct regmap *map = file->private_data; |
| 289 | |
| 290 | return regmap_read_debugfs(map, 0, map->max_register, user_buf, |
| 291 | count, ppos); |
| 292 | } |
| 293 | |
| 294 | #undef REGMAP_ALLOW_WRITE_DEBUGFS |
| 295 | #ifdef REGMAP_ALLOW_WRITE_DEBUGFS |
| 296 | /* |
| 297 | * This can be dangerous especially when we have clients such as |
| 298 | * PMICs, therefore don't provide any real compile time configuration option |
| 299 | * for this feature, people who want to use this will need to modify |
| 300 | * the source code directly. |
| 301 | */ |
| 302 | static ssize_t regmap_map_write_file(struct file *file, |
| 303 | const char __user *user_buf, |
| 304 | size_t count, loff_t *ppos) |
| 305 | { |
| 306 | char buf[32]; |
| 307 | size_t buf_size; |
| 308 | char *start = buf; |
| 309 | unsigned long reg, value; |
| 310 | struct regmap *map = file->private_data; |
| 311 | int ret; |
| 312 | |
| 313 | buf_size = min(count, (sizeof(buf)-1)); |
| 314 | if (copy_from_user(buf, user_buf, buf_size)) |
| 315 | return -EFAULT; |
| 316 | buf[buf_size] = 0; |
| 317 | |
| 318 | while (*start == ' ') |
| 319 | start++; |
| 320 | reg = simple_strtoul(start, &start, 16); |
| 321 | while (*start == ' ') |
| 322 | start++; |
| 323 | if (kstrtoul(start, 16, &value)) |
| 324 | return -EINVAL; |
| 325 | |
| 326 | /* Userspace has been fiddling around behind the kernel's back */ |
| 327 | add_taint(TAINT_USER, LOCKDEP_STILL_OK); |
| 328 | |
| 329 | ret = regmap_write(map, reg, value); |
| 330 | if (ret < 0) |
| 331 | return ret; |
| 332 | return buf_size; |
| 333 | } |
| 334 | #else |
| 335 | #define regmap_map_write_file NULL |
| 336 | #endif |
| 337 | |
| 338 | static const struct file_operations regmap_map_fops = { |
| 339 | .open = simple_open, |
| 340 | .read = regmap_map_read_file, |
| 341 | .write = regmap_map_write_file, |
| 342 | .llseek = default_llseek, |
| 343 | }; |
| 344 | |
| 345 | static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf, |
| 346 | size_t count, loff_t *ppos) |
| 347 | { |
| 348 | struct regmap_range_node *range = file->private_data; |
| 349 | struct regmap *map = range->map; |
| 350 | |
| 351 | return regmap_read_debugfs(map, range->range_min, range->range_max, |
| 352 | user_buf, count, ppos); |
| 353 | } |
| 354 | |
| 355 | static const struct file_operations regmap_range_fops = { |
| 356 | .open = simple_open, |
| 357 | .read = regmap_range_read_file, |
| 358 | .llseek = default_llseek, |
| 359 | }; |
| 360 | |
| 361 | static ssize_t regmap_reg_ranges_read_file(struct file *file, |
| 362 | char __user *user_buf, size_t count, |
| 363 | loff_t *ppos) |
| 364 | { |
| 365 | struct regmap *map = file->private_data; |
| 366 | struct regmap_debugfs_off_cache *c; |
| 367 | loff_t p = 0; |
| 368 | size_t buf_pos = 0; |
| 369 | char *buf; |
| 370 | char *entry; |
| 371 | int ret; |
| 372 | unsigned entry_len; |
| 373 | |
| 374 | if (*ppos < 0 || !count) |
| 375 | return -EINVAL; |
| 376 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 377 | if (count > (PAGE_SIZE << (MAX_ORDER - 1))) |
| 378 | count = PAGE_SIZE << (MAX_ORDER - 1); |
| 379 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 380 | buf = kmalloc(count, GFP_KERNEL); |
| 381 | if (!buf) |
| 382 | return -ENOMEM; |
| 383 | |
| 384 | entry = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 385 | if (!entry) { |
| 386 | kfree(buf); |
| 387 | return -ENOMEM; |
| 388 | } |
| 389 | |
| 390 | /* While we are at it, build the register dump cache |
| 391 | * now so the read() operation on the `registers' file |
| 392 | * can benefit from using the cache. We do not care |
| 393 | * about the file position information that is contained |
| 394 | * in the cache, just about the actual register blocks */ |
| 395 | regmap_calc_tot_len(map, buf, count); |
| 396 | regmap_debugfs_get_dump_start(map, 0, *ppos, &p); |
| 397 | |
| 398 | /* Reset file pointer as the fixed-format of the `registers' |
| 399 | * file is not compatible with the `range' file */ |
| 400 | p = 0; |
| 401 | mutex_lock(&map->cache_lock); |
| 402 | list_for_each_entry(c, &map->debugfs_off_cache, list) { |
| 403 | entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", |
| 404 | c->base_reg, c->max_reg); |
| 405 | if (p >= *ppos) { |
| 406 | if (buf_pos + entry_len > count) |
| 407 | break; |
| 408 | memcpy(buf + buf_pos, entry, entry_len); |
| 409 | buf_pos += entry_len; |
| 410 | } |
| 411 | p += entry_len; |
| 412 | } |
| 413 | mutex_unlock(&map->cache_lock); |
| 414 | |
| 415 | kfree(entry); |
| 416 | ret = buf_pos; |
| 417 | |
| 418 | if (copy_to_user(user_buf, buf, buf_pos)) { |
| 419 | ret = -EFAULT; |
| 420 | goto out_buf; |
| 421 | } |
| 422 | |
| 423 | *ppos += buf_pos; |
| 424 | out_buf: |
| 425 | kfree(buf); |
| 426 | return ret; |
| 427 | } |
| 428 | |
| 429 | static const struct file_operations regmap_reg_ranges_fops = { |
| 430 | .open = simple_open, |
| 431 | .read = regmap_reg_ranges_read_file, |
| 432 | .llseek = default_llseek, |
| 433 | }; |
| 434 | |
| 435 | static int regmap_access_show(struct seq_file *s, void *ignored) |
| 436 | { |
| 437 | struct regmap *map = s->private; |
| 438 | int i, reg_len; |
| 439 | |
| 440 | reg_len = regmap_calc_reg_len(map->max_register); |
| 441 | |
| 442 | for (i = 0; i <= map->max_register; i += map->reg_stride) { |
| 443 | /* Ignore registers which are neither readable nor writable */ |
| 444 | if (!regmap_readable(map, i) && !regmap_writeable(map, i)) |
| 445 | continue; |
| 446 | |
| 447 | /* Format the register */ |
| 448 | seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, |
| 449 | regmap_readable(map, i) ? 'y' : 'n', |
| 450 | regmap_writeable(map, i) ? 'y' : 'n', |
| 451 | regmap_volatile(map, i) ? 'y' : 'n', |
| 452 | regmap_precious(map, i) ? 'y' : 'n'); |
| 453 | } |
| 454 | |
| 455 | return 0; |
| 456 | } |
| 457 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 458 | DEFINE_SHOW_ATTRIBUTE(regmap_access); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 459 | |
| 460 | static ssize_t regmap_cache_only_write_file(struct file *file, |
| 461 | const char __user *user_buf, |
| 462 | size_t count, loff_t *ppos) |
| 463 | { |
| 464 | struct regmap *map = container_of(file->private_data, |
| 465 | struct regmap, cache_only); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 466 | bool new_val, require_sync = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 467 | int err; |
| 468 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 469 | err = kstrtobool_from_user(user_buf, count, &new_val); |
| 470 | /* Ignore malforned data like debugfs_write_file_bool() */ |
| 471 | if (err) |
| 472 | return count; |
| 473 | |
| 474 | err = debugfs_file_get(file->f_path.dentry); |
| 475 | if (err) |
| 476 | return err; |
| 477 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 478 | map->lock(map->lock_arg); |
| 479 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 480 | if (new_val && !map->cache_only) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 481 | dev_warn(map->dev, "debugfs cache_only=Y forced\n"); |
| 482 | add_taint(TAINT_USER, LOCKDEP_STILL_OK); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 483 | } else if (!new_val && map->cache_only) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 484 | dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); |
| 485 | require_sync = true; |
| 486 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 487 | map->cache_only = new_val; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 488 | |
| 489 | map->unlock(map->lock_arg); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 490 | debugfs_file_put(file->f_path.dentry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 491 | |
| 492 | if (require_sync) { |
| 493 | err = regcache_sync(map); |
| 494 | if (err) |
| 495 | dev_err(map->dev, "Failed to sync cache %d\n", err); |
| 496 | } |
| 497 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 498 | return count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 499 | } |
| 500 | |
| 501 | static const struct file_operations regmap_cache_only_fops = { |
| 502 | .open = simple_open, |
| 503 | .read = debugfs_read_file_bool, |
| 504 | .write = regmap_cache_only_write_file, |
| 505 | }; |
| 506 | |
| 507 | static ssize_t regmap_cache_bypass_write_file(struct file *file, |
| 508 | const char __user *user_buf, |
| 509 | size_t count, loff_t *ppos) |
| 510 | { |
| 511 | struct regmap *map = container_of(file->private_data, |
| 512 | struct regmap, cache_bypass); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 513 | bool new_val; |
| 514 | int err; |
| 515 | |
| 516 | err = kstrtobool_from_user(user_buf, count, &new_val); |
| 517 | /* Ignore malforned data like debugfs_write_file_bool() */ |
| 518 | if (err) |
| 519 | return count; |
| 520 | |
| 521 | err = debugfs_file_get(file->f_path.dentry); |
| 522 | if (err) |
| 523 | return err; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 524 | |
| 525 | map->lock(map->lock_arg); |
| 526 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 527 | if (new_val && !map->cache_bypass) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 528 | dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); |
| 529 | add_taint(TAINT_USER, LOCKDEP_STILL_OK); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 530 | } else if (!new_val && map->cache_bypass) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 531 | dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); |
| 532 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 533 | map->cache_bypass = new_val; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 534 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 535 | map->unlock(map->lock_arg); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 536 | debugfs_file_put(file->f_path.dentry); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 537 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 538 | return count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | static const struct file_operations regmap_cache_bypass_fops = { |
| 542 | .open = simple_open, |
| 543 | .read = debugfs_read_file_bool, |
| 544 | .write = regmap_cache_bypass_write_file, |
| 545 | }; |
| 546 | |
| 547 | void regmap_debugfs_init(struct regmap *map, const char *name) |
| 548 | { |
| 549 | struct rb_node *next; |
| 550 | struct regmap_range_node *range_node; |
| 551 | const char *devname = "dummy"; |
| 552 | |
| 553 | /* |
| 554 | * Userspace can initiate reads from the hardware over debugfs. |
| 555 | * Normally internal regmap structures and buffers are protected with |
| 556 | * a mutex or a spinlock, but if the regmap owner decided to disable |
| 557 | * all locking mechanisms, this is no longer the case. For safety: |
| 558 | * don't create the debugfs entries if locking is disabled. |
| 559 | */ |
| 560 | if (map->debugfs_disable) { |
| 561 | dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); |
| 562 | return; |
| 563 | } |
| 564 | |
| 565 | /* If we don't have the debugfs root yet, postpone init */ |
| 566 | if (!regmap_debugfs_root) { |
| 567 | struct regmap_debugfs_node *node; |
| 568 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
| 569 | if (!node) |
| 570 | return; |
| 571 | node->map = map; |
| 572 | node->name = name; |
| 573 | mutex_lock(®map_debugfs_early_lock); |
| 574 | list_add(&node->link, ®map_debugfs_early_list); |
| 575 | mutex_unlock(®map_debugfs_early_lock); |
| 576 | return; |
| 577 | } |
| 578 | |
| 579 | INIT_LIST_HEAD(&map->debugfs_off_cache); |
| 580 | mutex_init(&map->cache_lock); |
| 581 | |
| 582 | if (map->dev) |
| 583 | devname = dev_name(map->dev); |
| 584 | |
| 585 | if (name) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 586 | if (!map->debugfs_name) { |
| 587 | map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 588 | devname, name); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 589 | if (!map->debugfs_name) |
| 590 | return; |
| 591 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 592 | name = map->debugfs_name; |
| 593 | } else { |
| 594 | name = devname; |
| 595 | } |
| 596 | |
| 597 | if (!strcmp(name, "dummy")) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 598 | kfree(map->debugfs_name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 599 | map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", |
| 600 | dummy_index); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 601 | if (!map->debugfs_name) |
| 602 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 603 | name = map->debugfs_name; |
| 604 | dummy_index++; |
| 605 | } |
| 606 | |
| 607 | map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 608 | |
| 609 | debugfs_create_file("name", 0400, map->debugfs, |
| 610 | map, ®map_name_fops); |
| 611 | |
| 612 | debugfs_create_file("range", 0400, map->debugfs, |
| 613 | map, ®map_reg_ranges_fops); |
| 614 | |
| 615 | if (map->max_register || regmap_readable(map, 0)) { |
| 616 | umode_t registers_mode; |
| 617 | |
| 618 | #if defined(REGMAP_ALLOW_WRITE_DEBUGFS) |
| 619 | registers_mode = 0600; |
| 620 | #else |
| 621 | registers_mode = 0400; |
| 622 | #endif |
| 623 | |
| 624 | debugfs_create_file("registers", registers_mode, map->debugfs, |
| 625 | map, ®map_map_fops); |
| 626 | debugfs_create_file("access", 0400, map->debugfs, |
| 627 | map, ®map_access_fops); |
| 628 | } |
| 629 | |
| 630 | if (map->cache_type) { |
| 631 | debugfs_create_file("cache_only", 0600, map->debugfs, |
| 632 | &map->cache_only, ®map_cache_only_fops); |
| 633 | debugfs_create_bool("cache_dirty", 0400, map->debugfs, |
| 634 | &map->cache_dirty); |
| 635 | debugfs_create_file("cache_bypass", 0600, map->debugfs, |
| 636 | &map->cache_bypass, |
| 637 | ®map_cache_bypass_fops); |
| 638 | } |
| 639 | |
| 640 | next = rb_first(&map->range_tree); |
| 641 | while (next) { |
| 642 | range_node = rb_entry(next, struct regmap_range_node, node); |
| 643 | |
| 644 | if (range_node->name) |
| 645 | debugfs_create_file(range_node->name, 0400, |
| 646 | map->debugfs, range_node, |
| 647 | ®map_range_fops); |
| 648 | |
| 649 | next = rb_next(&range_node->node); |
| 650 | } |
| 651 | |
| 652 | if (map->cache_ops && map->cache_ops->debugfs_init) |
| 653 | map->cache_ops->debugfs_init(map); |
| 654 | } |
| 655 | |
| 656 | void regmap_debugfs_exit(struct regmap *map) |
| 657 | { |
| 658 | if (map->debugfs) { |
| 659 | debugfs_remove_recursive(map->debugfs); |
| 660 | mutex_lock(&map->cache_lock); |
| 661 | regmap_debugfs_free_dump_cache(map); |
| 662 | mutex_unlock(&map->cache_lock); |
| 663 | kfree(map->debugfs_name); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 664 | map->debugfs_name = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 665 | } else { |
| 666 | struct regmap_debugfs_node *node, *tmp; |
| 667 | |
| 668 | mutex_lock(®map_debugfs_early_lock); |
| 669 | list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, |
| 670 | link) { |
| 671 | if (node->map == map) { |
| 672 | list_del(&node->link); |
| 673 | kfree(node); |
| 674 | } |
| 675 | } |
| 676 | mutex_unlock(®map_debugfs_early_lock); |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | void regmap_debugfs_initcall(void) |
| 681 | { |
| 682 | struct regmap_debugfs_node *node, *tmp; |
| 683 | |
| 684 | regmap_debugfs_root = debugfs_create_dir("regmap", NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 685 | |
| 686 | mutex_lock(®map_debugfs_early_lock); |
| 687 | list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { |
| 688 | regmap_debugfs_init(node->map, node->name); |
| 689 | list_del(&node->link); |
| 690 | kfree(node); |
| 691 | } |
| 692 | mutex_unlock(®map_debugfs_early_lock); |
| 693 | } |