blob: 55a30afc14a00cc3dd3bba15281f623561442b62 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2//
3// Register map access API
4//
5// Copyright 2011 Wolfson Microelectronics plc
6//
7// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/export.h>
12#include <linux/mutex.h>
13#include <linux/err.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020014#include <linux/property.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include <linux/rbtree.h>
16#include <linux/sched.h>
17#include <linux/delay.h>
18#include <linux/log2.h>
19#include <linux/hwspinlock.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020020#include <asm/unaligned.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021
22#define CREATE_TRACE_POINTS
23#include "trace.h"
24
25#include "internal.h"
26
27/*
28 * Sometimes for failures during very early init the trace
29 * infrastructure isn't available early enough to be used. For this
30 * sort of problem defining LOG_DEVICE will add printks for basic
31 * register I/O on a specific device.
32 */
33#undef LOG_DEVICE
34
David Brazdil0f672f62019-12-10 10:32:29 +000035#ifdef LOG_DEVICE
36static inline bool regmap_should_log(struct regmap *map)
37{
38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39}
40#else
41static inline bool regmap_should_log(struct regmap *map) { return false; }
42#endif
43
44
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46 unsigned int mask, unsigned int val,
47 bool *change, bool force_write);
48
49static int _regmap_bus_reg_read(void *context, unsigned int reg,
50 unsigned int *val);
51static int _regmap_bus_read(void *context, unsigned int reg,
52 unsigned int *val);
53static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54 unsigned int val);
55static int _regmap_bus_reg_write(void *context, unsigned int reg,
56 unsigned int val);
57static int _regmap_bus_raw_write(void *context, unsigned int reg,
58 unsigned int val);
59
60bool regmap_reg_in_ranges(unsigned int reg,
61 const struct regmap_range *ranges,
62 unsigned int nranges)
63{
64 const struct regmap_range *r;
65 int i;
66
67 for (i = 0, r = ranges; i < nranges; i++, r++)
68 if (regmap_reg_in_range(reg, r))
69 return true;
70 return false;
71}
72EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73
74bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75 const struct regmap_access_table *table)
76{
77 /* Check "no ranges" first */
78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79 return false;
80
81 /* In case zero "yes ranges" are supplied, any reg is OK */
82 if (!table->n_yes_ranges)
83 return true;
84
85 return regmap_reg_in_ranges(reg, table->yes_ranges,
86 table->n_yes_ranges);
87}
88EXPORT_SYMBOL_GPL(regmap_check_range_table);
89
90bool regmap_writeable(struct regmap *map, unsigned int reg)
91{
92 if (map->max_register && reg > map->max_register)
93 return false;
94
95 if (map->writeable_reg)
96 return map->writeable_reg(map->dev, reg);
97
98 if (map->wr_table)
99 return regmap_check_range_table(map, reg, map->wr_table);
100
101 return true;
102}
103
104bool regmap_cached(struct regmap *map, unsigned int reg)
105{
106 int ret;
107 unsigned int val;
108
109 if (map->cache_type == REGCACHE_NONE)
110 return false;
111
112 if (!map->cache_ops)
113 return false;
114
115 if (map->max_register && reg > map->max_register)
116 return false;
117
118 map->lock(map->lock_arg);
119 ret = regcache_read(map, reg, &val);
120 map->unlock(map->lock_arg);
121 if (ret)
122 return false;
123
124 return true;
125}
126
127bool regmap_readable(struct regmap *map, unsigned int reg)
128{
129 if (!map->reg_read)
130 return false;
131
132 if (map->max_register && reg > map->max_register)
133 return false;
134
135 if (map->format.format_write)
136 return false;
137
138 if (map->readable_reg)
139 return map->readable_reg(map->dev, reg);
140
141 if (map->rd_table)
142 return regmap_check_range_table(map, reg, map->rd_table);
143
144 return true;
145}
146
147bool regmap_volatile(struct regmap *map, unsigned int reg)
148{
149 if (!map->format.format_write && !regmap_readable(map, reg))
150 return false;
151
152 if (map->volatile_reg)
153 return map->volatile_reg(map->dev, reg);
154
155 if (map->volatile_table)
156 return regmap_check_range_table(map, reg, map->volatile_table);
157
158 if (map->cache_ops)
159 return false;
160 else
161 return true;
162}
163
164bool regmap_precious(struct regmap *map, unsigned int reg)
165{
166 if (!regmap_readable(map, reg))
167 return false;
168
169 if (map->precious_reg)
170 return map->precious_reg(map->dev, reg);
171
172 if (map->precious_table)
173 return regmap_check_range_table(map, reg, map->precious_table);
174
175 return false;
176}
177
David Brazdil0f672f62019-12-10 10:32:29 +0000178bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179{
180 if (map->writeable_noinc_reg)
181 return map->writeable_noinc_reg(map->dev, reg);
182
183 if (map->wr_noinc_table)
184 return regmap_check_range_table(map, reg, map->wr_noinc_table);
185
186 return true;
187}
188
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190{
191 if (map->readable_noinc_reg)
192 return map->readable_noinc_reg(map->dev, reg);
193
194 if (map->rd_noinc_table)
195 return regmap_check_range_table(map, reg, map->rd_noinc_table);
196
197 return true;
198}
199
200static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201 size_t num)
202{
203 unsigned int i;
204
205 for (i = 0; i < num; i++)
206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207 return false;
208
209 return true;
210}
211
Olivier Deprez157378f2022-04-04 15:47:50 +0200212static void regmap_format_12_20_write(struct regmap *map,
213 unsigned int reg, unsigned int val)
214{
215 u8 *out = map->work_buf;
216
217 out[0] = reg >> 4;
218 out[1] = (reg << 4) | (val >> 16);
219 out[2] = val >> 8;
220 out[3] = val;
221}
222
223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224static void regmap_format_2_6_write(struct regmap *map,
225 unsigned int reg, unsigned int val)
226{
227 u8 *out = map->work_buf;
228
229 *out = (reg << 6) | val;
230}
231
232static void regmap_format_4_12_write(struct regmap *map,
233 unsigned int reg, unsigned int val)
234{
235 __be16 *out = map->work_buf;
236 *out = cpu_to_be16((reg << 12) | val);
237}
238
239static void regmap_format_7_9_write(struct regmap *map,
240 unsigned int reg, unsigned int val)
241{
242 __be16 *out = map->work_buf;
243 *out = cpu_to_be16((reg << 9) | val);
244}
245
246static void regmap_format_10_14_write(struct regmap *map,
247 unsigned int reg, unsigned int val)
248{
249 u8 *out = map->work_buf;
250
251 out[2] = val;
252 out[1] = (val >> 8) | (reg << 6);
253 out[0] = reg >> 2;
254}
255
256static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
257{
258 u8 *b = buf;
259
260 b[0] = val << shift;
261}
262
263static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
264{
Olivier Deprez0e641232021-09-23 10:07:05 +0200265 put_unaligned_be16(val << shift, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266}
267
268static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
269{
Olivier Deprez0e641232021-09-23 10:07:05 +0200270 put_unaligned_le16(val << shift, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271}
272
273static void regmap_format_16_native(void *buf, unsigned int val,
274 unsigned int shift)
275{
Olivier Deprez0e641232021-09-23 10:07:05 +0200276 u16 v = val << shift;
277
278 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279}
280
281static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
282{
283 u8 *b = buf;
284
285 val <<= shift;
286
287 b[0] = val >> 16;
288 b[1] = val >> 8;
289 b[2] = val;
290}
291
292static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
293{
Olivier Deprez0e641232021-09-23 10:07:05 +0200294 put_unaligned_be32(val << shift, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295}
296
297static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
298{
Olivier Deprez0e641232021-09-23 10:07:05 +0200299 put_unaligned_le32(val << shift, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000300}
301
302static void regmap_format_32_native(void *buf, unsigned int val,
303 unsigned int shift)
304{
Olivier Deprez0e641232021-09-23 10:07:05 +0200305 u32 v = val << shift;
306
307 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000308}
309
310#ifdef CONFIG_64BIT
311static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
312{
Olivier Deprez0e641232021-09-23 10:07:05 +0200313 put_unaligned_be64((u64) val << shift, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000314}
315
316static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
317{
Olivier Deprez0e641232021-09-23 10:07:05 +0200318 put_unaligned_le64((u64) val << shift, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000319}
320
321static void regmap_format_64_native(void *buf, unsigned int val,
322 unsigned int shift)
323{
Olivier Deprez0e641232021-09-23 10:07:05 +0200324 u64 v = (u64) val << shift;
325
326 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327}
328#endif
329
330static void regmap_parse_inplace_noop(void *buf)
331{
332}
333
334static unsigned int regmap_parse_8(const void *buf)
335{
336 const u8 *b = buf;
337
338 return b[0];
339}
340
341static unsigned int regmap_parse_16_be(const void *buf)
342{
Olivier Deprez0e641232021-09-23 10:07:05 +0200343 return get_unaligned_be16(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344}
345
346static unsigned int regmap_parse_16_le(const void *buf)
347{
Olivier Deprez0e641232021-09-23 10:07:05 +0200348 return get_unaligned_le16(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000349}
350
351static void regmap_parse_16_be_inplace(void *buf)
352{
Olivier Deprez0e641232021-09-23 10:07:05 +0200353 u16 v = get_unaligned_be16(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354
Olivier Deprez0e641232021-09-23 10:07:05 +0200355 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000356}
357
358static void regmap_parse_16_le_inplace(void *buf)
359{
Olivier Deprez0e641232021-09-23 10:07:05 +0200360 u16 v = get_unaligned_le16(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000361
Olivier Deprez0e641232021-09-23 10:07:05 +0200362 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363}
364
365static unsigned int regmap_parse_16_native(const void *buf)
366{
Olivier Deprez0e641232021-09-23 10:07:05 +0200367 u16 v;
368
369 memcpy(&v, buf, sizeof(v));
370 return v;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371}
372
373static unsigned int regmap_parse_24(const void *buf)
374{
375 const u8 *b = buf;
376 unsigned int ret = b[2];
377 ret |= ((unsigned int)b[1]) << 8;
378 ret |= ((unsigned int)b[0]) << 16;
379
380 return ret;
381}
382
383static unsigned int regmap_parse_32_be(const void *buf)
384{
Olivier Deprez0e641232021-09-23 10:07:05 +0200385 return get_unaligned_be32(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386}
387
388static unsigned int regmap_parse_32_le(const void *buf)
389{
Olivier Deprez0e641232021-09-23 10:07:05 +0200390 return get_unaligned_le32(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391}
392
393static void regmap_parse_32_be_inplace(void *buf)
394{
Olivier Deprez0e641232021-09-23 10:07:05 +0200395 u32 v = get_unaligned_be32(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396
Olivier Deprez0e641232021-09-23 10:07:05 +0200397 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000398}
399
400static void regmap_parse_32_le_inplace(void *buf)
401{
Olivier Deprez0e641232021-09-23 10:07:05 +0200402 u32 v = get_unaligned_le32(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403
Olivier Deprez0e641232021-09-23 10:07:05 +0200404 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405}
406
407static unsigned int regmap_parse_32_native(const void *buf)
408{
Olivier Deprez0e641232021-09-23 10:07:05 +0200409 u32 v;
410
411 memcpy(&v, buf, sizeof(v));
412 return v;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413}
414
415#ifdef CONFIG_64BIT
416static unsigned int regmap_parse_64_be(const void *buf)
417{
Olivier Deprez0e641232021-09-23 10:07:05 +0200418 return get_unaligned_be64(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419}
420
421static unsigned int regmap_parse_64_le(const void *buf)
422{
Olivier Deprez0e641232021-09-23 10:07:05 +0200423 return get_unaligned_le64(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424}
425
426static void regmap_parse_64_be_inplace(void *buf)
427{
Olivier Deprez0e641232021-09-23 10:07:05 +0200428 u64 v = get_unaligned_be64(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000429
Olivier Deprez0e641232021-09-23 10:07:05 +0200430 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431}
432
433static void regmap_parse_64_le_inplace(void *buf)
434{
Olivier Deprez0e641232021-09-23 10:07:05 +0200435 u64 v = get_unaligned_le64(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436
Olivier Deprez0e641232021-09-23 10:07:05 +0200437 memcpy(buf, &v, sizeof(v));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438}
439
440static unsigned int regmap_parse_64_native(const void *buf)
441{
Olivier Deprez0e641232021-09-23 10:07:05 +0200442 u64 v;
443
444 memcpy(&v, buf, sizeof(v));
445 return v;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446}
447#endif
448
449static void regmap_lock_hwlock(void *__map)
450{
451 struct regmap *map = __map;
452
453 hwspin_lock_timeout(map->hwlock, UINT_MAX);
454}
455
456static void regmap_lock_hwlock_irq(void *__map)
457{
458 struct regmap *map = __map;
459
460 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
461}
462
463static void regmap_lock_hwlock_irqsave(void *__map)
464{
465 struct regmap *map = __map;
466
467 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
468 &map->spinlock_flags);
469}
470
471static void regmap_unlock_hwlock(void *__map)
472{
473 struct regmap *map = __map;
474
475 hwspin_unlock(map->hwlock);
476}
477
478static void regmap_unlock_hwlock_irq(void *__map)
479{
480 struct regmap *map = __map;
481
482 hwspin_unlock_irq(map->hwlock);
483}
484
485static void regmap_unlock_hwlock_irqrestore(void *__map)
486{
487 struct regmap *map = __map;
488
489 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
490}
491
492static void regmap_lock_unlock_none(void *__map)
493{
494
495}
496
497static void regmap_lock_mutex(void *__map)
498{
499 struct regmap *map = __map;
500 mutex_lock(&map->mutex);
501}
502
503static void regmap_unlock_mutex(void *__map)
504{
505 struct regmap *map = __map;
506 mutex_unlock(&map->mutex);
507}
508
509static void regmap_lock_spinlock(void *__map)
510__acquires(&map->spinlock)
511{
512 struct regmap *map = __map;
513 unsigned long flags;
514
515 spin_lock_irqsave(&map->spinlock, flags);
516 map->spinlock_flags = flags;
517}
518
519static void regmap_unlock_spinlock(void *__map)
520__releases(&map->spinlock)
521{
522 struct regmap *map = __map;
523 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
524}
525
526static void dev_get_regmap_release(struct device *dev, void *res)
527{
528 /*
529 * We don't actually have anything to do here; the goal here
530 * is not to manage the regmap but to provide a simple way to
531 * get the regmap back given a struct device.
532 */
533}
534
535static bool _regmap_range_add(struct regmap *map,
536 struct regmap_range_node *data)
537{
538 struct rb_root *root = &map->range_tree;
539 struct rb_node **new = &(root->rb_node), *parent = NULL;
540
541 while (*new) {
542 struct regmap_range_node *this =
543 rb_entry(*new, struct regmap_range_node, node);
544
545 parent = *new;
546 if (data->range_max < this->range_min)
547 new = &((*new)->rb_left);
548 else if (data->range_min > this->range_max)
549 new = &((*new)->rb_right);
550 else
551 return false;
552 }
553
554 rb_link_node(&data->node, parent, new);
555 rb_insert_color(&data->node, root);
556
557 return true;
558}
559
560static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
561 unsigned int reg)
562{
563 struct rb_node *node = map->range_tree.rb_node;
564
565 while (node) {
566 struct regmap_range_node *this =
567 rb_entry(node, struct regmap_range_node, node);
568
569 if (reg < this->range_min)
570 node = node->rb_left;
571 else if (reg > this->range_max)
572 node = node->rb_right;
573 else
574 return this;
575 }
576
577 return NULL;
578}
579
580static void regmap_range_exit(struct regmap *map)
581{
582 struct rb_node *next;
583 struct regmap_range_node *range_node;
584
585 next = rb_first(&map->range_tree);
586 while (next) {
587 range_node = rb_entry(next, struct regmap_range_node, node);
588 next = rb_next(&range_node->node);
589 rb_erase(&range_node->node, &map->range_tree);
590 kfree(range_node);
591 }
592
593 kfree(map->selector_work_buf);
594}
595
Olivier Deprez157378f2022-04-04 15:47:50 +0200596static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
597{
598 if (config->name) {
599 const char *name = kstrdup_const(config->name, GFP_KERNEL);
600
601 if (!name)
602 return -ENOMEM;
603
604 kfree_const(map->name);
605 map->name = name;
606 }
607
608 return 0;
609}
610
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611int regmap_attach_dev(struct device *dev, struct regmap *map,
612 const struct regmap_config *config)
613{
614 struct regmap **m;
Olivier Deprez157378f2022-04-04 15:47:50 +0200615 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616
617 map->dev = dev;
618
Olivier Deprez157378f2022-04-04 15:47:50 +0200619 ret = regmap_set_name(map, config);
620 if (ret)
621 return ret;
622
623 regmap_debugfs_exit(map);
624 regmap_debugfs_init(map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625
626 /* Add a devres resource for dev_get_regmap() */
627 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
628 if (!m) {
629 regmap_debugfs_exit(map);
630 return -ENOMEM;
631 }
632 *m = map;
633 devres_add(dev, m);
634
635 return 0;
636}
637EXPORT_SYMBOL_GPL(regmap_attach_dev);
638
639static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
640 const struct regmap_config *config)
641{
642 enum regmap_endian endian;
643
644 /* Retrieve the endianness specification from the regmap config */
645 endian = config->reg_format_endian;
646
647 /* If the regmap config specified a non-default value, use that */
648 if (endian != REGMAP_ENDIAN_DEFAULT)
649 return endian;
650
651 /* Retrieve the endianness specification from the bus config */
652 if (bus && bus->reg_format_endian_default)
653 endian = bus->reg_format_endian_default;
654
655 /* If the bus specified a non-default value, use that */
656 if (endian != REGMAP_ENDIAN_DEFAULT)
657 return endian;
658
659 /* Use this if no other value was found */
660 return REGMAP_ENDIAN_BIG;
661}
662
663enum regmap_endian regmap_get_val_endian(struct device *dev,
664 const struct regmap_bus *bus,
665 const struct regmap_config *config)
666{
Olivier Deprez157378f2022-04-04 15:47:50 +0200667 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 enum regmap_endian endian;
669
670 /* Retrieve the endianness specification from the regmap config */
671 endian = config->val_format_endian;
672
673 /* If the regmap config specified a non-default value, use that */
674 if (endian != REGMAP_ENDIAN_DEFAULT)
675 return endian;
676
Olivier Deprez157378f2022-04-04 15:47:50 +0200677 /* If the firmware node exist try to get endianness from it */
678 if (fwnode_property_read_bool(fwnode, "big-endian"))
679 endian = REGMAP_ENDIAN_BIG;
680 else if (fwnode_property_read_bool(fwnode, "little-endian"))
681 endian = REGMAP_ENDIAN_LITTLE;
682 else if (fwnode_property_read_bool(fwnode, "native-endian"))
683 endian = REGMAP_ENDIAN_NATIVE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684
Olivier Deprez157378f2022-04-04 15:47:50 +0200685 /* If the endianness was specified in fwnode, use that */
686 if (endian != REGMAP_ENDIAN_DEFAULT)
687 return endian;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000688
689 /* Retrieve the endianness specification from the bus config */
690 if (bus && bus->val_format_endian_default)
691 endian = bus->val_format_endian_default;
692
693 /* If the bus specified a non-default value, use that */
694 if (endian != REGMAP_ENDIAN_DEFAULT)
695 return endian;
696
697 /* Use this if no other value was found */
698 return REGMAP_ENDIAN_BIG;
699}
700EXPORT_SYMBOL_GPL(regmap_get_val_endian);
701
702struct regmap *__regmap_init(struct device *dev,
703 const struct regmap_bus *bus,
704 void *bus_context,
705 const struct regmap_config *config,
706 struct lock_class_key *lock_key,
707 const char *lock_name)
708{
709 struct regmap *map;
710 int ret = -EINVAL;
711 enum regmap_endian reg_endian, val_endian;
712 int i, j;
713
714 if (!config)
715 goto err;
716
717 map = kzalloc(sizeof(*map), GFP_KERNEL);
718 if (map == NULL) {
719 ret = -ENOMEM;
720 goto err;
721 }
722
Olivier Deprez157378f2022-04-04 15:47:50 +0200723 ret = regmap_set_name(map, config);
724 if (ret)
725 goto err_map;
726
727 ret = -EINVAL; /* Later error paths rely on this */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728
729 if (config->disable_locking) {
730 map->lock = map->unlock = regmap_lock_unlock_none;
Olivier Deprez157378f2022-04-04 15:47:50 +0200731 map->can_sleep = config->can_sleep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000732 regmap_debugfs_disable(map);
733 } else if (config->lock && config->unlock) {
734 map->lock = config->lock;
735 map->unlock = config->unlock;
736 map->lock_arg = config->lock_arg;
Olivier Deprez157378f2022-04-04 15:47:50 +0200737 map->can_sleep = config->can_sleep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000738 } else if (config->use_hwlock) {
739 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
740 if (!map->hwlock) {
741 ret = -ENXIO;
742 goto err_name;
743 }
744
745 switch (config->hwlock_mode) {
746 case HWLOCK_IRQSTATE:
747 map->lock = regmap_lock_hwlock_irqsave;
748 map->unlock = regmap_unlock_hwlock_irqrestore;
749 break;
750 case HWLOCK_IRQ:
751 map->lock = regmap_lock_hwlock_irq;
752 map->unlock = regmap_unlock_hwlock_irq;
753 break;
754 default:
755 map->lock = regmap_lock_hwlock;
756 map->unlock = regmap_unlock_hwlock;
757 break;
758 }
759
760 map->lock_arg = map;
761 } else {
762 if ((bus && bus->fast_io) ||
763 config->fast_io) {
764 spin_lock_init(&map->spinlock);
765 map->lock = regmap_lock_spinlock;
766 map->unlock = regmap_unlock_spinlock;
767 lockdep_set_class_and_name(&map->spinlock,
768 lock_key, lock_name);
769 } else {
770 mutex_init(&map->mutex);
771 map->lock = regmap_lock_mutex;
772 map->unlock = regmap_unlock_mutex;
Olivier Deprez157378f2022-04-04 15:47:50 +0200773 map->can_sleep = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774 lockdep_set_class_and_name(&map->mutex,
775 lock_key, lock_name);
776 }
777 map->lock_arg = map;
778 }
779
780 /*
781 * When we write in fast-paths with regmap_bulk_write() don't allocate
782 * scratch buffers with sleeping allocations.
783 */
784 if ((bus && bus->fast_io) || config->fast_io)
785 map->alloc_flags = GFP_ATOMIC;
786 else
787 map->alloc_flags = GFP_KERNEL;
788
789 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
790 map->format.pad_bytes = config->pad_bits / 8;
791 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
792 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
793 config->val_bits + config->pad_bits, 8);
794 map->reg_shift = config->pad_bits % 8;
795 if (config->reg_stride)
796 map->reg_stride = config->reg_stride;
797 else
798 map->reg_stride = 1;
799 if (is_power_of_2(map->reg_stride))
800 map->reg_stride_order = ilog2(map->reg_stride);
801 else
802 map->reg_stride_order = -1;
David Brazdil0f672f62019-12-10 10:32:29 +0000803 map->use_single_read = config->use_single_read || !bus || !bus->read;
804 map->use_single_write = config->use_single_write || !bus || !bus->write;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000805 map->can_multi_write = config->can_multi_write && bus && bus->write;
806 if (bus) {
807 map->max_raw_read = bus->max_raw_read;
808 map->max_raw_write = bus->max_raw_write;
809 }
810 map->dev = dev;
811 map->bus = bus;
812 map->bus_context = bus_context;
813 map->max_register = config->max_register;
814 map->wr_table = config->wr_table;
815 map->rd_table = config->rd_table;
816 map->volatile_table = config->volatile_table;
817 map->precious_table = config->precious_table;
David Brazdil0f672f62019-12-10 10:32:29 +0000818 map->wr_noinc_table = config->wr_noinc_table;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 map->rd_noinc_table = config->rd_noinc_table;
820 map->writeable_reg = config->writeable_reg;
821 map->readable_reg = config->readable_reg;
822 map->volatile_reg = config->volatile_reg;
823 map->precious_reg = config->precious_reg;
David Brazdil0f672f62019-12-10 10:32:29 +0000824 map->writeable_noinc_reg = config->writeable_noinc_reg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000825 map->readable_noinc_reg = config->readable_noinc_reg;
826 map->cache_type = config->cache_type;
827
828 spin_lock_init(&map->async_lock);
829 INIT_LIST_HEAD(&map->async_list);
830 INIT_LIST_HEAD(&map->async_free);
831 init_waitqueue_head(&map->async_waitq);
832
833 if (config->read_flag_mask ||
834 config->write_flag_mask ||
835 config->zero_flag_mask) {
836 map->read_flag_mask = config->read_flag_mask;
837 map->write_flag_mask = config->write_flag_mask;
838 } else if (bus) {
839 map->read_flag_mask = bus->read_flag_mask;
840 }
841
842 if (!bus) {
843 map->reg_read = config->reg_read;
844 map->reg_write = config->reg_write;
845
846 map->defer_caching = false;
847 goto skip_format_initialization;
848 } else if (!bus->read || !bus->write) {
849 map->reg_read = _regmap_bus_reg_read;
850 map->reg_write = _regmap_bus_reg_write;
Olivier Deprez157378f2022-04-04 15:47:50 +0200851 map->reg_update_bits = bus->reg_update_bits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000852
853 map->defer_caching = false;
854 goto skip_format_initialization;
855 } else {
856 map->reg_read = _regmap_bus_read;
857 map->reg_update_bits = bus->reg_update_bits;
858 }
859
860 reg_endian = regmap_get_reg_endian(bus, config);
861 val_endian = regmap_get_val_endian(dev, bus, config);
862
863 switch (config->reg_bits + map->reg_shift) {
864 case 2:
865 switch (config->val_bits) {
866 case 6:
867 map->format.format_write = regmap_format_2_6_write;
868 break;
869 default:
870 goto err_hwlock;
871 }
872 break;
873
874 case 4:
875 switch (config->val_bits) {
876 case 12:
877 map->format.format_write = regmap_format_4_12_write;
878 break;
879 default:
880 goto err_hwlock;
881 }
882 break;
883
884 case 7:
885 switch (config->val_bits) {
886 case 9:
887 map->format.format_write = regmap_format_7_9_write;
888 break;
889 default:
890 goto err_hwlock;
891 }
892 break;
893
894 case 10:
895 switch (config->val_bits) {
896 case 14:
897 map->format.format_write = regmap_format_10_14_write;
898 break;
899 default:
900 goto err_hwlock;
901 }
902 break;
903
Olivier Deprez157378f2022-04-04 15:47:50 +0200904 case 12:
905 switch (config->val_bits) {
906 case 20:
907 map->format.format_write = regmap_format_12_20_write;
908 break;
909 default:
910 goto err_hwlock;
911 }
912 break;
913
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 case 8:
915 map->format.format_reg = regmap_format_8;
916 break;
917
918 case 16:
919 switch (reg_endian) {
920 case REGMAP_ENDIAN_BIG:
921 map->format.format_reg = regmap_format_16_be;
922 break;
923 case REGMAP_ENDIAN_LITTLE:
924 map->format.format_reg = regmap_format_16_le;
925 break;
926 case REGMAP_ENDIAN_NATIVE:
927 map->format.format_reg = regmap_format_16_native;
928 break;
929 default:
930 goto err_hwlock;
931 }
932 break;
933
934 case 24:
935 if (reg_endian != REGMAP_ENDIAN_BIG)
936 goto err_hwlock;
937 map->format.format_reg = regmap_format_24;
938 break;
939
940 case 32:
941 switch (reg_endian) {
942 case REGMAP_ENDIAN_BIG:
943 map->format.format_reg = regmap_format_32_be;
944 break;
945 case REGMAP_ENDIAN_LITTLE:
946 map->format.format_reg = regmap_format_32_le;
947 break;
948 case REGMAP_ENDIAN_NATIVE:
949 map->format.format_reg = regmap_format_32_native;
950 break;
951 default:
952 goto err_hwlock;
953 }
954 break;
955
956#ifdef CONFIG_64BIT
957 case 64:
958 switch (reg_endian) {
959 case REGMAP_ENDIAN_BIG:
960 map->format.format_reg = regmap_format_64_be;
961 break;
962 case REGMAP_ENDIAN_LITTLE:
963 map->format.format_reg = regmap_format_64_le;
964 break;
965 case REGMAP_ENDIAN_NATIVE:
966 map->format.format_reg = regmap_format_64_native;
967 break;
968 default:
969 goto err_hwlock;
970 }
971 break;
972#endif
973
974 default:
975 goto err_hwlock;
976 }
977
978 if (val_endian == REGMAP_ENDIAN_NATIVE)
979 map->format.parse_inplace = regmap_parse_inplace_noop;
980
981 switch (config->val_bits) {
982 case 8:
983 map->format.format_val = regmap_format_8;
984 map->format.parse_val = regmap_parse_8;
985 map->format.parse_inplace = regmap_parse_inplace_noop;
986 break;
987 case 16:
988 switch (val_endian) {
989 case REGMAP_ENDIAN_BIG:
990 map->format.format_val = regmap_format_16_be;
991 map->format.parse_val = regmap_parse_16_be;
992 map->format.parse_inplace = regmap_parse_16_be_inplace;
993 break;
994 case REGMAP_ENDIAN_LITTLE:
995 map->format.format_val = regmap_format_16_le;
996 map->format.parse_val = regmap_parse_16_le;
997 map->format.parse_inplace = regmap_parse_16_le_inplace;
998 break;
999 case REGMAP_ENDIAN_NATIVE:
1000 map->format.format_val = regmap_format_16_native;
1001 map->format.parse_val = regmap_parse_16_native;
1002 break;
1003 default:
1004 goto err_hwlock;
1005 }
1006 break;
1007 case 24:
1008 if (val_endian != REGMAP_ENDIAN_BIG)
1009 goto err_hwlock;
1010 map->format.format_val = regmap_format_24;
1011 map->format.parse_val = regmap_parse_24;
1012 break;
1013 case 32:
1014 switch (val_endian) {
1015 case REGMAP_ENDIAN_BIG:
1016 map->format.format_val = regmap_format_32_be;
1017 map->format.parse_val = regmap_parse_32_be;
1018 map->format.parse_inplace = regmap_parse_32_be_inplace;
1019 break;
1020 case REGMAP_ENDIAN_LITTLE:
1021 map->format.format_val = regmap_format_32_le;
1022 map->format.parse_val = regmap_parse_32_le;
1023 map->format.parse_inplace = regmap_parse_32_le_inplace;
1024 break;
1025 case REGMAP_ENDIAN_NATIVE:
1026 map->format.format_val = regmap_format_32_native;
1027 map->format.parse_val = regmap_parse_32_native;
1028 break;
1029 default:
1030 goto err_hwlock;
1031 }
1032 break;
1033#ifdef CONFIG_64BIT
1034 case 64:
1035 switch (val_endian) {
1036 case REGMAP_ENDIAN_BIG:
1037 map->format.format_val = regmap_format_64_be;
1038 map->format.parse_val = regmap_parse_64_be;
1039 map->format.parse_inplace = regmap_parse_64_be_inplace;
1040 break;
1041 case REGMAP_ENDIAN_LITTLE:
1042 map->format.format_val = regmap_format_64_le;
1043 map->format.parse_val = regmap_parse_64_le;
1044 map->format.parse_inplace = regmap_parse_64_le_inplace;
1045 break;
1046 case REGMAP_ENDIAN_NATIVE:
1047 map->format.format_val = regmap_format_64_native;
1048 map->format.parse_val = regmap_parse_64_native;
1049 break;
1050 default:
1051 goto err_hwlock;
1052 }
1053 break;
1054#endif
1055 }
1056
1057 if (map->format.format_write) {
1058 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1059 (val_endian != REGMAP_ENDIAN_BIG))
1060 goto err_hwlock;
1061 map->use_single_write = true;
1062 }
1063
1064 if (!map->format.format_write &&
1065 !(map->format.format_reg && map->format.format_val))
1066 goto err_hwlock;
1067
1068 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1069 if (map->work_buf == NULL) {
1070 ret = -ENOMEM;
1071 goto err_hwlock;
1072 }
1073
1074 if (map->format.format_write) {
1075 map->defer_caching = false;
1076 map->reg_write = _regmap_bus_formatted_write;
1077 } else if (map->format.format_val) {
1078 map->defer_caching = true;
1079 map->reg_write = _regmap_bus_raw_write;
1080 }
1081
1082skip_format_initialization:
1083
1084 map->range_tree = RB_ROOT;
1085 for (i = 0; i < config->num_ranges; i++) {
1086 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1087 struct regmap_range_node *new;
1088
1089 /* Sanity check */
1090 if (range_cfg->range_max < range_cfg->range_min) {
1091 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1092 range_cfg->range_max, range_cfg->range_min);
1093 goto err_range;
1094 }
1095
1096 if (range_cfg->range_max > map->max_register) {
1097 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1098 range_cfg->range_max, map->max_register);
1099 goto err_range;
1100 }
1101
1102 if (range_cfg->selector_reg > map->max_register) {
1103 dev_err(map->dev,
1104 "Invalid range %d: selector out of map\n", i);
1105 goto err_range;
1106 }
1107
1108 if (range_cfg->window_len == 0) {
1109 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1110 i);
1111 goto err_range;
1112 }
1113
1114 /* Make sure, that this register range has no selector
1115 or data window within its boundary */
1116 for (j = 0; j < config->num_ranges; j++) {
1117 unsigned sel_reg = config->ranges[j].selector_reg;
1118 unsigned win_min = config->ranges[j].window_start;
1119 unsigned win_max = win_min +
1120 config->ranges[j].window_len - 1;
1121
1122 /* Allow data window inside its own virtual range */
1123 if (j == i)
1124 continue;
1125
1126 if (range_cfg->range_min <= sel_reg &&
1127 sel_reg <= range_cfg->range_max) {
1128 dev_err(map->dev,
1129 "Range %d: selector for %d in window\n",
1130 i, j);
1131 goto err_range;
1132 }
1133
1134 if (!(win_max < range_cfg->range_min ||
1135 win_min > range_cfg->range_max)) {
1136 dev_err(map->dev,
1137 "Range %d: window for %d in window\n",
1138 i, j);
1139 goto err_range;
1140 }
1141 }
1142
1143 new = kzalloc(sizeof(*new), GFP_KERNEL);
1144 if (new == NULL) {
1145 ret = -ENOMEM;
1146 goto err_range;
1147 }
1148
1149 new->map = map;
1150 new->name = range_cfg->name;
1151 new->range_min = range_cfg->range_min;
1152 new->range_max = range_cfg->range_max;
1153 new->selector_reg = range_cfg->selector_reg;
1154 new->selector_mask = range_cfg->selector_mask;
1155 new->selector_shift = range_cfg->selector_shift;
1156 new->window_start = range_cfg->window_start;
1157 new->window_len = range_cfg->window_len;
1158
1159 if (!_regmap_range_add(map, new)) {
1160 dev_err(map->dev, "Failed to add range %d\n", i);
1161 kfree(new);
1162 goto err_range;
1163 }
1164
1165 if (map->selector_work_buf == NULL) {
1166 map->selector_work_buf =
1167 kzalloc(map->format.buf_size, GFP_KERNEL);
1168 if (map->selector_work_buf == NULL) {
1169 ret = -ENOMEM;
1170 goto err_range;
1171 }
1172 }
1173 }
1174
1175 ret = regcache_init(map, config);
1176 if (ret != 0)
1177 goto err_range;
1178
1179 if (dev) {
1180 ret = regmap_attach_dev(dev, map, config);
1181 if (ret != 0)
1182 goto err_regcache;
1183 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +02001184 regmap_debugfs_init(map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001185 }
1186
1187 return map;
1188
1189err_regcache:
1190 regcache_exit(map);
1191err_range:
1192 regmap_range_exit(map);
1193 kfree(map->work_buf);
1194err_hwlock:
1195 if (map->hwlock)
1196 hwspin_lock_free(map->hwlock);
1197err_name:
1198 kfree_const(map->name);
1199err_map:
1200 kfree(map);
1201err:
1202 return ERR_PTR(ret);
1203}
1204EXPORT_SYMBOL_GPL(__regmap_init);
1205
1206static void devm_regmap_release(struct device *dev, void *res)
1207{
1208 regmap_exit(*(struct regmap **)res);
1209}
1210
1211struct regmap *__devm_regmap_init(struct device *dev,
1212 const struct regmap_bus *bus,
1213 void *bus_context,
1214 const struct regmap_config *config,
1215 struct lock_class_key *lock_key,
1216 const char *lock_name)
1217{
1218 struct regmap **ptr, *regmap;
1219
1220 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1221 if (!ptr)
1222 return ERR_PTR(-ENOMEM);
1223
1224 regmap = __regmap_init(dev, bus, bus_context, config,
1225 lock_key, lock_name);
1226 if (!IS_ERR(regmap)) {
1227 *ptr = regmap;
1228 devres_add(dev, ptr);
1229 } else {
1230 devres_free(ptr);
1231 }
1232
1233 return regmap;
1234}
1235EXPORT_SYMBOL_GPL(__devm_regmap_init);
1236
1237static void regmap_field_init(struct regmap_field *rm_field,
1238 struct regmap *regmap, struct reg_field reg_field)
1239{
1240 rm_field->regmap = regmap;
1241 rm_field->reg = reg_field.reg;
1242 rm_field->shift = reg_field.lsb;
1243 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1244 rm_field->id_size = reg_field.id_size;
1245 rm_field->id_offset = reg_field.id_offset;
1246}
1247
1248/**
1249 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1250 *
1251 * @dev: Device that will be interacted with
1252 * @regmap: regmap bank in which this register field is located.
1253 * @reg_field: Register field with in the bank.
1254 *
1255 * The return value will be an ERR_PTR() on error or a valid pointer
1256 * to a struct regmap_field. The regmap_field will be automatically freed
1257 * by the device management code.
1258 */
1259struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1260 struct regmap *regmap, struct reg_field reg_field)
1261{
1262 struct regmap_field *rm_field = devm_kzalloc(dev,
1263 sizeof(*rm_field), GFP_KERNEL);
1264 if (!rm_field)
1265 return ERR_PTR(-ENOMEM);
1266
1267 regmap_field_init(rm_field, regmap, reg_field);
1268
1269 return rm_field;
1270
1271}
1272EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1273
Olivier Deprez157378f2022-04-04 15:47:50 +02001274
1275/**
1276 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1277 *
1278 * @regmap: regmap bank in which this register field is located.
1279 * @rm_field: regmap register fields within the bank.
1280 * @reg_field: Register fields within the bank.
1281 * @num_fields: Number of register fields.
1282 *
1283 * The return value will be an -ENOMEM on error or zero for success.
1284 * Newly allocated regmap_fields should be freed by calling
1285 * regmap_field_bulk_free()
1286 */
1287int regmap_field_bulk_alloc(struct regmap *regmap,
1288 struct regmap_field **rm_field,
1289 struct reg_field *reg_field,
1290 int num_fields)
1291{
1292 struct regmap_field *rf;
1293 int i;
1294
1295 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1296 if (!rf)
1297 return -ENOMEM;
1298
1299 for (i = 0; i < num_fields; i++) {
1300 regmap_field_init(&rf[i], regmap, reg_field[i]);
1301 rm_field[i] = &rf[i];
1302 }
1303
1304 return 0;
1305}
1306EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1307
1308/**
1309 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1310 * fields.
1311 *
1312 * @dev: Device that will be interacted with
1313 * @regmap: regmap bank in which this register field is located.
1314 * @rm_field: regmap register fields within the bank.
1315 * @reg_field: Register fields within the bank.
1316 * @num_fields: Number of register fields.
1317 *
1318 * The return value will be an -ENOMEM on error or zero for success.
1319 * Newly allocated regmap_fields will be automatically freed by the
1320 * device management code.
1321 */
1322int devm_regmap_field_bulk_alloc(struct device *dev,
1323 struct regmap *regmap,
1324 struct regmap_field **rm_field,
1325 struct reg_field *reg_field,
1326 int num_fields)
1327{
1328 struct regmap_field *rf;
1329 int i;
1330
1331 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1332 if (!rf)
1333 return -ENOMEM;
1334
1335 for (i = 0; i < num_fields; i++) {
1336 regmap_field_init(&rf[i], regmap, reg_field[i]);
1337 rm_field[i] = &rf[i];
1338 }
1339
1340 return 0;
1341}
1342EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1343
1344/**
1345 * regmap_field_bulk_free() - Free register field allocated using
1346 * regmap_field_bulk_alloc.
1347 *
1348 * @field: regmap fields which should be freed.
1349 */
1350void regmap_field_bulk_free(struct regmap_field *field)
1351{
1352 kfree(field);
1353}
1354EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1355
1356/**
1357 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1358 * devm_regmap_field_bulk_alloc.
1359 *
1360 * @dev: Device that will be interacted with
1361 * @field: regmap field which should be freed.
1362 *
1363 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1364 * drivers need not call this function, as the memory allocated via devm
1365 * will be freed as per device-driver life-cycle.
1366 */
1367void devm_regmap_field_bulk_free(struct device *dev,
1368 struct regmap_field *field)
1369{
1370 devm_kfree(dev, field);
1371}
1372EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1373
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001374/**
1375 * devm_regmap_field_free() - Free a register field allocated using
1376 * devm_regmap_field_alloc.
1377 *
1378 * @dev: Device that will be interacted with
1379 * @field: regmap field which should be freed.
1380 *
1381 * Free register field allocated using devm_regmap_field_alloc(). Usually
1382 * drivers need not call this function, as the memory allocated via devm
1383 * will be freed as per device-driver life-cyle.
1384 */
1385void devm_regmap_field_free(struct device *dev,
1386 struct regmap_field *field)
1387{
1388 devm_kfree(dev, field);
1389}
1390EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1391
1392/**
1393 * regmap_field_alloc() - Allocate and initialise a register field.
1394 *
1395 * @regmap: regmap bank in which this register field is located.
1396 * @reg_field: Register field with in the bank.
1397 *
1398 * The return value will be an ERR_PTR() on error or a valid pointer
1399 * to a struct regmap_field. The regmap_field should be freed by the
1400 * user once its finished working with it using regmap_field_free().
1401 */
1402struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1403 struct reg_field reg_field)
1404{
1405 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1406
1407 if (!rm_field)
1408 return ERR_PTR(-ENOMEM);
1409
1410 regmap_field_init(rm_field, regmap, reg_field);
1411
1412 return rm_field;
1413}
1414EXPORT_SYMBOL_GPL(regmap_field_alloc);
1415
1416/**
1417 * regmap_field_free() - Free register field allocated using
1418 * regmap_field_alloc.
1419 *
1420 * @field: regmap field which should be freed.
1421 */
1422void regmap_field_free(struct regmap_field *field)
1423{
1424 kfree(field);
1425}
1426EXPORT_SYMBOL_GPL(regmap_field_free);
1427
1428/**
1429 * regmap_reinit_cache() - Reinitialise the current register cache
1430 *
1431 * @map: Register map to operate on.
1432 * @config: New configuration. Only the cache data will be used.
1433 *
1434 * Discard any existing register cache for the map and initialize a
1435 * new cache. This can be used to restore the cache to defaults or to
1436 * update the cache configuration to reflect runtime discovery of the
1437 * hardware.
1438 *
1439 * No explicit locking is done here, the user needs to ensure that
1440 * this function will not race with other calls to regmap.
1441 */
1442int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1443{
Olivier Deprez157378f2022-04-04 15:47:50 +02001444 int ret;
1445
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001446 regcache_exit(map);
1447 regmap_debugfs_exit(map);
1448
1449 map->max_register = config->max_register;
1450 map->writeable_reg = config->writeable_reg;
1451 map->readable_reg = config->readable_reg;
1452 map->volatile_reg = config->volatile_reg;
1453 map->precious_reg = config->precious_reg;
David Brazdil0f672f62019-12-10 10:32:29 +00001454 map->writeable_noinc_reg = config->writeable_noinc_reg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001455 map->readable_noinc_reg = config->readable_noinc_reg;
1456 map->cache_type = config->cache_type;
1457
Olivier Deprez157378f2022-04-04 15:47:50 +02001458 ret = regmap_set_name(map, config);
1459 if (ret)
1460 return ret;
1461
1462 regmap_debugfs_init(map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001463
1464 map->cache_bypass = false;
1465 map->cache_only = false;
1466
1467 return regcache_init(map, config);
1468}
1469EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1470
1471/**
1472 * regmap_exit() - Free a previously allocated register map
1473 *
1474 * @map: Register map to operate on.
1475 */
1476void regmap_exit(struct regmap *map)
1477{
1478 struct regmap_async *async;
1479
1480 regcache_exit(map);
1481 regmap_debugfs_exit(map);
1482 regmap_range_exit(map);
1483 if (map->bus && map->bus->free_context)
1484 map->bus->free_context(map->bus_context);
1485 kfree(map->work_buf);
1486 while (!list_empty(&map->async_free)) {
1487 async = list_first_entry_or_null(&map->async_free,
1488 struct regmap_async,
1489 list);
1490 list_del(&async->list);
1491 kfree(async->work_buf);
1492 kfree(async);
1493 }
1494 if (map->hwlock)
1495 hwspin_lock_free(map->hwlock);
Olivier Deprez157378f2022-04-04 15:47:50 +02001496 if (map->lock == regmap_lock_mutex)
1497 mutex_destroy(&map->mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001498 kfree_const(map->name);
Olivier Deprez0e641232021-09-23 10:07:05 +02001499 kfree(map->patch);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001500 kfree(map);
1501}
1502EXPORT_SYMBOL_GPL(regmap_exit);
1503
1504static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1505{
1506 struct regmap **r = res;
1507 if (!r || !*r) {
1508 WARN_ON(!r || !*r);
1509 return 0;
1510 }
1511
1512 /* If the user didn't specify a name match any */
1513 if (data)
Olivier Deprez0e641232021-09-23 10:07:05 +02001514 return !strcmp((*r)->name, data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001515 else
1516 return 1;
1517}
1518
1519/**
1520 * dev_get_regmap() - Obtain the regmap (if any) for a device
1521 *
1522 * @dev: Device to retrieve the map for
1523 * @name: Optional name for the register map, usually NULL.
1524 *
1525 * Returns the regmap for the device if one is present, or NULL. If
1526 * name is specified then it must match the name specified when
1527 * registering the device, if it is NULL then the first regmap found
1528 * will be used. Devices with multiple register maps are very rare,
1529 * generic code should normally not need to specify a name.
1530 */
1531struct regmap *dev_get_regmap(struct device *dev, const char *name)
1532{
1533 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1534 dev_get_regmap_match, (void *)name);
1535
1536 if (!r)
1537 return NULL;
1538 return *r;
1539}
1540EXPORT_SYMBOL_GPL(dev_get_regmap);
1541
1542/**
1543 * regmap_get_device() - Obtain the device from a regmap
1544 *
1545 * @map: Register map to operate on.
1546 *
1547 * Returns the underlying device that the regmap has been created for.
1548 */
1549struct device *regmap_get_device(struct regmap *map)
1550{
1551 return map->dev;
1552}
1553EXPORT_SYMBOL_GPL(regmap_get_device);
1554
1555static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1556 struct regmap_range_node *range,
1557 unsigned int val_num)
1558{
1559 void *orig_work_buf;
1560 unsigned int win_offset;
1561 unsigned int win_page;
1562 bool page_chg;
1563 int ret;
1564
1565 win_offset = (*reg - range->range_min) % range->window_len;
1566 win_page = (*reg - range->range_min) / range->window_len;
1567
1568 if (val_num > 1) {
1569 /* Bulk write shouldn't cross range boundary */
1570 if (*reg + val_num - 1 > range->range_max)
1571 return -EINVAL;
1572
1573 /* ... or single page boundary */
1574 if (val_num > range->window_len - win_offset)
1575 return -EINVAL;
1576 }
1577
1578 /* It is possible to have selector register inside data window.
1579 In that case, selector register is located on every page and
1580 it needs no page switching, when accessed alone. */
1581 if (val_num > 1 ||
1582 range->window_start + win_offset != range->selector_reg) {
1583 /* Use separate work_buf during page switching */
1584 orig_work_buf = map->work_buf;
1585 map->work_buf = map->selector_work_buf;
1586
1587 ret = _regmap_update_bits(map, range->selector_reg,
1588 range->selector_mask,
1589 win_page << range->selector_shift,
1590 &page_chg, false);
1591
1592 map->work_buf = orig_work_buf;
1593
1594 if (ret != 0)
1595 return ret;
1596 }
1597
1598 *reg = range->window_start + win_offset;
1599
1600 return 0;
1601}
1602
1603static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1604 unsigned long mask)
1605{
1606 u8 *buf;
1607 int i;
1608
1609 if (!mask || !map->work_buf)
1610 return;
1611
1612 buf = map->work_buf;
1613
1614 for (i = 0; i < max_bytes; i++)
1615 buf[i] |= (mask >> (8 * i)) & 0xff;
1616}
1617
1618static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
Olivier Deprez0e641232021-09-23 10:07:05 +02001619 const void *val, size_t val_len, bool noinc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001620{
1621 struct regmap_range_node *range;
1622 unsigned long flags;
1623 void *work_val = map->work_buf + map->format.reg_bytes +
1624 map->format.pad_bytes;
1625 void *buf;
1626 int ret = -ENOTSUPP;
1627 size_t len;
1628 int i;
1629
1630 WARN_ON(!map->bus);
1631
Olivier Deprez0e641232021-09-23 10:07:05 +02001632 /* Check for unwritable or noinc registers in range
1633 * before we start
1634 */
1635 if (!regmap_writeable_noinc(map, reg)) {
1636 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1637 unsigned int element =
1638 reg + regmap_get_offset(map, i);
1639 if (!regmap_writeable(map, element) ||
1640 regmap_writeable_noinc(map, element))
1641 return -EINVAL;
1642 }
1643 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001644
1645 if (!map->cache_bypass && map->format.parse_val) {
1646 unsigned int ival;
1647 int val_bytes = map->format.val_bytes;
1648 for (i = 0; i < val_len / val_bytes; i++) {
1649 ival = map->format.parse_val(val + (i * val_bytes));
1650 ret = regcache_write(map,
1651 reg + regmap_get_offset(map, i),
1652 ival);
1653 if (ret) {
1654 dev_err(map->dev,
1655 "Error in caching of register: %x ret: %d\n",
Olivier Deprez0e641232021-09-23 10:07:05 +02001656 reg + regmap_get_offset(map, i), ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001657 return ret;
1658 }
1659 }
1660 if (map->cache_only) {
1661 map->cache_dirty = true;
1662 return 0;
1663 }
1664 }
1665
1666 range = _regmap_range_lookup(map, reg);
1667 if (range) {
1668 int val_num = val_len / map->format.val_bytes;
1669 int win_offset = (reg - range->range_min) % range->window_len;
1670 int win_residue = range->window_len - win_offset;
1671
1672 /* If the write goes beyond the end of the window split it */
1673 while (val_num > win_residue) {
1674 dev_dbg(map->dev, "Writing window %d/%zu\n",
1675 win_residue, val_len / map->format.val_bytes);
1676 ret = _regmap_raw_write_impl(map, reg, val,
1677 win_residue *
Olivier Deprez0e641232021-09-23 10:07:05 +02001678 map->format.val_bytes, noinc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001679 if (ret != 0)
1680 return ret;
1681
1682 reg += win_residue;
1683 val_num -= win_residue;
1684 val += win_residue * map->format.val_bytes;
1685 val_len -= win_residue * map->format.val_bytes;
1686
1687 win_offset = (reg - range->range_min) %
1688 range->window_len;
1689 win_residue = range->window_len - win_offset;
1690 }
1691
Olivier Deprez0e641232021-09-23 10:07:05 +02001692 ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001693 if (ret != 0)
1694 return ret;
1695 }
1696
1697 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1698 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1699 map->write_flag_mask);
1700
1701 /*
1702 * Essentially all I/O mechanisms will be faster with a single
1703 * buffer to write. Since register syncs often generate raw
1704 * writes of single registers optimise that case.
1705 */
1706 if (val != work_val && val_len == map->format.val_bytes) {
1707 memcpy(work_val, val, map->format.val_bytes);
1708 val = work_val;
1709 }
1710
1711 if (map->async && map->bus->async_write) {
1712 struct regmap_async *async;
1713
1714 trace_regmap_async_write_start(map, reg, val_len);
1715
1716 spin_lock_irqsave(&map->async_lock, flags);
1717 async = list_first_entry_or_null(&map->async_free,
1718 struct regmap_async,
1719 list);
1720 if (async)
1721 list_del(&async->list);
1722 spin_unlock_irqrestore(&map->async_lock, flags);
1723
1724 if (!async) {
1725 async = map->bus->async_alloc();
1726 if (!async)
1727 return -ENOMEM;
1728
1729 async->work_buf = kzalloc(map->format.buf_size,
1730 GFP_KERNEL | GFP_DMA);
1731 if (!async->work_buf) {
1732 kfree(async);
1733 return -ENOMEM;
1734 }
1735 }
1736
1737 async->map = map;
1738
1739 /* If the caller supplied the value we can use it safely. */
1740 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1741 map->format.reg_bytes + map->format.val_bytes);
1742
1743 spin_lock_irqsave(&map->async_lock, flags);
1744 list_add_tail(&async->list, &map->async_list);
1745 spin_unlock_irqrestore(&map->async_lock, flags);
1746
1747 if (val != work_val)
1748 ret = map->bus->async_write(map->bus_context,
1749 async->work_buf,
1750 map->format.reg_bytes +
1751 map->format.pad_bytes,
1752 val, val_len, async);
1753 else
1754 ret = map->bus->async_write(map->bus_context,
1755 async->work_buf,
1756 map->format.reg_bytes +
1757 map->format.pad_bytes +
1758 val_len, NULL, 0, async);
1759
1760 if (ret != 0) {
1761 dev_err(map->dev, "Failed to schedule write: %d\n",
1762 ret);
1763
1764 spin_lock_irqsave(&map->async_lock, flags);
1765 list_move(&async->list, &map->async_free);
1766 spin_unlock_irqrestore(&map->async_lock, flags);
1767 }
1768
1769 return ret;
1770 }
1771
1772 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1773
1774 /* If we're doing a single register write we can probably just
1775 * send the work_buf directly, otherwise try to do a gather
1776 * write.
1777 */
1778 if (val == work_val)
1779 ret = map->bus->write(map->bus_context, map->work_buf,
1780 map->format.reg_bytes +
1781 map->format.pad_bytes +
1782 val_len);
1783 else if (map->bus->gather_write)
1784 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1785 map->format.reg_bytes +
1786 map->format.pad_bytes,
1787 val, val_len);
David Brazdil0f672f62019-12-10 10:32:29 +00001788 else
1789 ret = -ENOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001790
1791 /* If that didn't work fall back on linearising by hand. */
1792 if (ret == -ENOTSUPP) {
1793 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1794 buf = kzalloc(len, GFP_KERNEL);
1795 if (!buf)
1796 return -ENOMEM;
1797
1798 memcpy(buf, map->work_buf, map->format.reg_bytes);
1799 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1800 val, val_len);
1801 ret = map->bus->write(map->bus_context, buf, len);
1802
1803 kfree(buf);
1804 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1805 /* regcache_drop_region() takes lock that we already have,
1806 * thus call map->cache_ops->drop() directly
1807 */
1808 if (map->cache_ops && map->cache_ops->drop)
1809 map->cache_ops->drop(map, reg, reg + 1);
1810 }
1811
1812 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1813
1814 return ret;
1815}
1816
1817/**
1818 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1819 *
1820 * @map: Map to check.
1821 */
1822bool regmap_can_raw_write(struct regmap *map)
1823{
1824 return map->bus && map->bus->write && map->format.format_val &&
1825 map->format.format_reg;
1826}
1827EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1828
1829/**
1830 * regmap_get_raw_read_max - Get the maximum size we can read
1831 *
1832 * @map: Map to check.
1833 */
1834size_t regmap_get_raw_read_max(struct regmap *map)
1835{
1836 return map->max_raw_read;
1837}
1838EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1839
1840/**
1841 * regmap_get_raw_write_max - Get the maximum size we can read
1842 *
1843 * @map: Map to check.
1844 */
1845size_t regmap_get_raw_write_max(struct regmap *map)
1846{
1847 return map->max_raw_write;
1848}
1849EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1850
1851static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1852 unsigned int val)
1853{
1854 int ret;
1855 struct regmap_range_node *range;
1856 struct regmap *map = context;
1857
1858 WARN_ON(!map->bus || !map->format.format_write);
1859
1860 range = _regmap_range_lookup(map, reg);
1861 if (range) {
1862 ret = _regmap_select_page(map, &reg, range, 1);
1863 if (ret != 0)
1864 return ret;
1865 }
1866
1867 map->format.format_write(map, reg, val);
1868
1869 trace_regmap_hw_write_start(map, reg, 1);
1870
1871 ret = map->bus->write(map->bus_context, map->work_buf,
1872 map->format.buf_size);
1873
1874 trace_regmap_hw_write_done(map, reg, 1);
1875
1876 return ret;
1877}
1878
1879static int _regmap_bus_reg_write(void *context, unsigned int reg,
1880 unsigned int val)
1881{
1882 struct regmap *map = context;
1883
1884 return map->bus->reg_write(map->bus_context, reg, val);
1885}
1886
1887static int _regmap_bus_raw_write(void *context, unsigned int reg,
1888 unsigned int val)
1889{
1890 struct regmap *map = context;
1891
1892 WARN_ON(!map->bus || !map->format.format_val);
1893
1894 map->format.format_val(map->work_buf + map->format.reg_bytes
1895 + map->format.pad_bytes, val, 0);
1896 return _regmap_raw_write_impl(map, reg,
1897 map->work_buf +
1898 map->format.reg_bytes +
1899 map->format.pad_bytes,
Olivier Deprez0e641232021-09-23 10:07:05 +02001900 map->format.val_bytes,
1901 false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001902}
1903
1904static inline void *_regmap_map_get_context(struct regmap *map)
1905{
1906 return (map->bus) ? map : map->bus_context;
1907}
1908
1909int _regmap_write(struct regmap *map, unsigned int reg,
1910 unsigned int val)
1911{
1912 int ret;
1913 void *context = _regmap_map_get_context(map);
1914
1915 if (!regmap_writeable(map, reg))
1916 return -EIO;
1917
1918 if (!map->cache_bypass && !map->defer_caching) {
1919 ret = regcache_write(map, reg, val);
1920 if (ret != 0)
1921 return ret;
1922 if (map->cache_only) {
1923 map->cache_dirty = true;
1924 return 0;
1925 }
1926 }
1927
David Brazdil0f672f62019-12-10 10:32:29 +00001928 if (regmap_should_log(map))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001929 dev_info(map->dev, "%x <= %x\n", reg, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001930
1931 trace_regmap_reg_write(map, reg, val);
1932
1933 return map->reg_write(context, reg, val);
1934}
1935
1936/**
1937 * regmap_write() - Write a value to a single register
1938 *
1939 * @map: Register map to write to
1940 * @reg: Register to write to
1941 * @val: Value to be written
1942 *
1943 * A value of zero will be returned on success, a negative errno will
1944 * be returned in error cases.
1945 */
1946int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1947{
1948 int ret;
1949
1950 if (!IS_ALIGNED(reg, map->reg_stride))
1951 return -EINVAL;
1952
1953 map->lock(map->lock_arg);
1954
1955 ret = _regmap_write(map, reg, val);
1956
1957 map->unlock(map->lock_arg);
1958
1959 return ret;
1960}
1961EXPORT_SYMBOL_GPL(regmap_write);
1962
1963/**
1964 * regmap_write_async() - Write a value to a single register asynchronously
1965 *
1966 * @map: Register map to write to
1967 * @reg: Register to write to
1968 * @val: Value to be written
1969 *
1970 * A value of zero will be returned on success, a negative errno will
1971 * be returned in error cases.
1972 */
1973int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1974{
1975 int ret;
1976
1977 if (!IS_ALIGNED(reg, map->reg_stride))
1978 return -EINVAL;
1979
1980 map->lock(map->lock_arg);
1981
1982 map->async = true;
1983
1984 ret = _regmap_write(map, reg, val);
1985
1986 map->async = false;
1987
1988 map->unlock(map->lock_arg);
1989
1990 return ret;
1991}
1992EXPORT_SYMBOL_GPL(regmap_write_async);
1993
1994int _regmap_raw_write(struct regmap *map, unsigned int reg,
Olivier Deprez0e641232021-09-23 10:07:05 +02001995 const void *val, size_t val_len, bool noinc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001996{
1997 size_t val_bytes = map->format.val_bytes;
1998 size_t val_count = val_len / val_bytes;
1999 size_t chunk_count, chunk_bytes;
2000 size_t chunk_regs = val_count;
2001 int ret, i;
2002
2003 if (!val_count)
2004 return -EINVAL;
2005
2006 if (map->use_single_write)
2007 chunk_regs = 1;
2008 else if (map->max_raw_write && val_len > map->max_raw_write)
2009 chunk_regs = map->max_raw_write / val_bytes;
2010
2011 chunk_count = val_count / chunk_regs;
2012 chunk_bytes = chunk_regs * val_bytes;
2013
2014 /* Write as many bytes as possible with chunk_size */
2015 for (i = 0; i < chunk_count; i++) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002016 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002017 if (ret)
2018 return ret;
2019
2020 reg += regmap_get_offset(map, chunk_regs);
2021 val += chunk_bytes;
2022 val_len -= chunk_bytes;
2023 }
2024
2025 /* Write remaining bytes */
2026 if (val_len)
Olivier Deprez0e641232021-09-23 10:07:05 +02002027 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002028
2029 return ret;
2030}
2031
2032/**
2033 * regmap_raw_write() - Write raw values to one or more registers
2034 *
2035 * @map: Register map to write to
2036 * @reg: Initial register to write to
2037 * @val: Block of data to be written, laid out for direct transmission to the
2038 * device
2039 * @val_len: Length of data pointed to by val.
2040 *
2041 * This function is intended to be used for things like firmware
2042 * download where a large block of data needs to be transferred to the
2043 * device. No formatting will be done on the data provided.
2044 *
2045 * A value of zero will be returned on success, a negative errno will
2046 * be returned in error cases.
2047 */
2048int regmap_raw_write(struct regmap *map, unsigned int reg,
2049 const void *val, size_t val_len)
2050{
2051 int ret;
2052
2053 if (!regmap_can_raw_write(map))
2054 return -EINVAL;
2055 if (val_len % map->format.val_bytes)
2056 return -EINVAL;
2057
2058 map->lock(map->lock_arg);
2059
Olivier Deprez0e641232021-09-23 10:07:05 +02002060 ret = _regmap_raw_write(map, reg, val, val_len, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002061
2062 map->unlock(map->lock_arg);
2063
2064 return ret;
2065}
2066EXPORT_SYMBOL_GPL(regmap_raw_write);
2067
2068/**
David Brazdil0f672f62019-12-10 10:32:29 +00002069 * regmap_noinc_write(): Write data from a register without incrementing the
2070 * register number
2071 *
2072 * @map: Register map to write to
2073 * @reg: Register to write to
2074 * @val: Pointer to data buffer
2075 * @val_len: Length of output buffer in bytes.
2076 *
2077 * The regmap API usually assumes that bulk bus write operations will write a
2078 * range of registers. Some devices have certain registers for which a write
2079 * operation can write to an internal FIFO.
2080 *
2081 * The target register must be volatile but registers after it can be
2082 * completely unrelated cacheable registers.
2083 *
2084 * This will attempt multiple writes as required to write val_len bytes.
2085 *
2086 * A value of zero will be returned on success, a negative errno will be
2087 * returned in error cases.
2088 */
2089int regmap_noinc_write(struct regmap *map, unsigned int reg,
2090 const void *val, size_t val_len)
2091{
2092 size_t write_len;
2093 int ret;
2094
2095 if (!map->bus)
2096 return -EINVAL;
2097 if (!map->bus->write)
2098 return -ENOTSUPP;
2099 if (val_len % map->format.val_bytes)
2100 return -EINVAL;
2101 if (!IS_ALIGNED(reg, map->reg_stride))
2102 return -EINVAL;
2103 if (val_len == 0)
2104 return -EINVAL;
2105
2106 map->lock(map->lock_arg);
2107
2108 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2109 ret = -EINVAL;
2110 goto out_unlock;
2111 }
2112
2113 while (val_len) {
2114 if (map->max_raw_write && map->max_raw_write < val_len)
2115 write_len = map->max_raw_write;
2116 else
2117 write_len = val_len;
Olivier Deprez0e641232021-09-23 10:07:05 +02002118 ret = _regmap_raw_write(map, reg, val, write_len, true);
David Brazdil0f672f62019-12-10 10:32:29 +00002119 if (ret)
2120 goto out_unlock;
2121 val = ((u8 *)val) + write_len;
2122 val_len -= write_len;
2123 }
2124
2125out_unlock:
2126 map->unlock(map->lock_arg);
2127 return ret;
2128}
2129EXPORT_SYMBOL_GPL(regmap_noinc_write);
2130
2131/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002132 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2133 * register field.
2134 *
2135 * @field: Register field to write to
2136 * @mask: Bitmask to change
2137 * @val: Value to be written
2138 * @change: Boolean indicating if a write was done
2139 * @async: Boolean indicating asynchronously
2140 * @force: Boolean indicating use force update
2141 *
2142 * Perform a read/modify/write cycle on the register field with change,
2143 * async, force option.
2144 *
2145 * A value of zero will be returned on success, a negative errno will
2146 * be returned in error cases.
2147 */
2148int regmap_field_update_bits_base(struct regmap_field *field,
2149 unsigned int mask, unsigned int val,
2150 bool *change, bool async, bool force)
2151{
2152 mask = (mask << field->shift) & field->mask;
2153
2154 return regmap_update_bits_base(field->regmap, field->reg,
2155 mask, val << field->shift,
2156 change, async, force);
2157}
2158EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2159
2160/**
2161 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2162 * register field with port ID
2163 *
2164 * @field: Register field to write to
2165 * @id: port ID
2166 * @mask: Bitmask to change
2167 * @val: Value to be written
2168 * @change: Boolean indicating if a write was done
2169 * @async: Boolean indicating asynchronously
2170 * @force: Boolean indicating use force update
2171 *
2172 * A value of zero will be returned on success, a negative errno will
2173 * be returned in error cases.
2174 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002175int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002176 unsigned int mask, unsigned int val,
2177 bool *change, bool async, bool force)
2178{
2179 if (id >= field->id_size)
2180 return -EINVAL;
2181
2182 mask = (mask << field->shift) & field->mask;
2183
2184 return regmap_update_bits_base(field->regmap,
2185 field->reg + (field->id_offset * id),
2186 mask, val << field->shift,
2187 change, async, force);
2188}
2189EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2190
2191/**
2192 * regmap_bulk_write() - Write multiple registers to the device
2193 *
2194 * @map: Register map to write to
2195 * @reg: First register to be write from
2196 * @val: Block of data to be written, in native register size for device
2197 * @val_count: Number of registers to write
2198 *
2199 * This function is intended to be used for writing a large block of
2200 * data to the device either in single transfer or multiple transfer.
2201 *
2202 * A value of zero will be returned on success, a negative errno will
2203 * be returned in error cases.
2204 */
2205int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2206 size_t val_count)
2207{
2208 int ret = 0, i;
2209 size_t val_bytes = map->format.val_bytes;
2210
2211 if (!IS_ALIGNED(reg, map->reg_stride))
2212 return -EINVAL;
2213
2214 /*
2215 * Some devices don't support bulk write, for them we have a series of
2216 * single write operations.
2217 */
2218 if (!map->bus || !map->format.parse_inplace) {
2219 map->lock(map->lock_arg);
2220 for (i = 0; i < val_count; i++) {
2221 unsigned int ival;
2222
2223 switch (val_bytes) {
2224 case 1:
2225 ival = *(u8 *)(val + (i * val_bytes));
2226 break;
2227 case 2:
2228 ival = *(u16 *)(val + (i * val_bytes));
2229 break;
2230 case 4:
2231 ival = *(u32 *)(val + (i * val_bytes));
2232 break;
2233#ifdef CONFIG_64BIT
2234 case 8:
2235 ival = *(u64 *)(val + (i * val_bytes));
2236 break;
2237#endif
2238 default:
2239 ret = -EINVAL;
2240 goto out;
2241 }
2242
2243 ret = _regmap_write(map,
2244 reg + regmap_get_offset(map, i),
2245 ival);
2246 if (ret != 0)
2247 goto out;
2248 }
2249out:
2250 map->unlock(map->lock_arg);
2251 } else {
2252 void *wval;
2253
2254 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2255 if (!wval)
2256 return -ENOMEM;
2257
2258 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2259 map->format.parse_inplace(wval + i);
2260
2261 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2262
2263 kfree(wval);
2264 }
2265 return ret;
2266}
2267EXPORT_SYMBOL_GPL(regmap_bulk_write);
2268
2269/*
2270 * _regmap_raw_multi_reg_write()
2271 *
2272 * the (register,newvalue) pairs in regs have not been formatted, but
2273 * they are all in the same page and have been changed to being page
2274 * relative. The page register has been written if that was necessary.
2275 */
2276static int _regmap_raw_multi_reg_write(struct regmap *map,
2277 const struct reg_sequence *regs,
2278 size_t num_regs)
2279{
2280 int ret;
2281 void *buf;
2282 int i;
2283 u8 *u8;
2284 size_t val_bytes = map->format.val_bytes;
2285 size_t reg_bytes = map->format.reg_bytes;
2286 size_t pad_bytes = map->format.pad_bytes;
2287 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2288 size_t len = pair_size * num_regs;
2289
2290 if (!len)
2291 return -EINVAL;
2292
2293 buf = kzalloc(len, GFP_KERNEL);
2294 if (!buf)
2295 return -ENOMEM;
2296
2297 /* We have to linearise by hand. */
2298
2299 u8 = buf;
2300
2301 for (i = 0; i < num_regs; i++) {
2302 unsigned int reg = regs[i].reg;
2303 unsigned int val = regs[i].def;
2304 trace_regmap_hw_write_start(map, reg, 1);
2305 map->format.format_reg(u8, reg, map->reg_shift);
2306 u8 += reg_bytes + pad_bytes;
2307 map->format.format_val(u8, val, 0);
2308 u8 += val_bytes;
2309 }
2310 u8 = buf;
2311 *u8 |= map->write_flag_mask;
2312
2313 ret = map->bus->write(map->bus_context, buf, len);
2314
2315 kfree(buf);
2316
2317 for (i = 0; i < num_regs; i++) {
2318 int reg = regs[i].reg;
2319 trace_regmap_hw_write_done(map, reg, 1);
2320 }
2321 return ret;
2322}
2323
2324static unsigned int _regmap_register_page(struct regmap *map,
2325 unsigned int reg,
2326 struct regmap_range_node *range)
2327{
2328 unsigned int win_page = (reg - range->range_min) / range->window_len;
2329
2330 return win_page;
2331}
2332
2333static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2334 struct reg_sequence *regs,
2335 size_t num_regs)
2336{
2337 int ret;
2338 int i, n;
2339 struct reg_sequence *base;
2340 unsigned int this_page = 0;
2341 unsigned int page_change = 0;
2342 /*
2343 * the set of registers are not neccessarily in order, but
2344 * since the order of write must be preserved this algorithm
2345 * chops the set each time the page changes. This also applies
2346 * if there is a delay required at any point in the sequence.
2347 */
2348 base = regs;
2349 for (i = 0, n = 0; i < num_regs; i++, n++) {
2350 unsigned int reg = regs[i].reg;
2351 struct regmap_range_node *range;
2352
2353 range = _regmap_range_lookup(map, reg);
2354 if (range) {
2355 unsigned int win_page = _regmap_register_page(map, reg,
2356 range);
2357
2358 if (i == 0)
2359 this_page = win_page;
2360 if (win_page != this_page) {
2361 this_page = win_page;
2362 page_change = 1;
2363 }
2364 }
2365
2366 /* If we have both a page change and a delay make sure to
2367 * write the regs and apply the delay before we change the
2368 * page.
2369 */
2370
2371 if (page_change || regs[i].delay_us) {
2372
2373 /* For situations where the first write requires
2374 * a delay we need to make sure we don't call
2375 * raw_multi_reg_write with n=0
2376 * This can't occur with page breaks as we
2377 * never write on the first iteration
2378 */
2379 if (regs[i].delay_us && i == 0)
2380 n = 1;
2381
2382 ret = _regmap_raw_multi_reg_write(map, base, n);
2383 if (ret != 0)
2384 return ret;
2385
Olivier Deprez157378f2022-04-04 15:47:50 +02002386 if (regs[i].delay_us) {
2387 if (map->can_sleep)
2388 fsleep(regs[i].delay_us);
2389 else
2390 udelay(regs[i].delay_us);
2391 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002392
2393 base += n;
2394 n = 0;
2395
2396 if (page_change) {
2397 ret = _regmap_select_page(map,
2398 &base[n].reg,
2399 range, 1);
2400 if (ret != 0)
2401 return ret;
2402
2403 page_change = 0;
2404 }
2405
2406 }
2407
2408 }
2409 if (n > 0)
2410 return _regmap_raw_multi_reg_write(map, base, n);
2411 return 0;
2412}
2413
2414static int _regmap_multi_reg_write(struct regmap *map,
2415 const struct reg_sequence *regs,
2416 size_t num_regs)
2417{
2418 int i;
2419 int ret;
2420
2421 if (!map->can_multi_write) {
2422 for (i = 0; i < num_regs; i++) {
2423 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2424 if (ret != 0)
2425 return ret;
2426
Olivier Deprez157378f2022-04-04 15:47:50 +02002427 if (regs[i].delay_us) {
2428 if (map->can_sleep)
2429 fsleep(regs[i].delay_us);
2430 else
2431 udelay(regs[i].delay_us);
2432 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002433 }
2434 return 0;
2435 }
2436
2437 if (!map->format.parse_inplace)
2438 return -EINVAL;
2439
2440 if (map->writeable_reg)
2441 for (i = 0; i < num_regs; i++) {
2442 int reg = regs[i].reg;
2443 if (!map->writeable_reg(map->dev, reg))
2444 return -EINVAL;
2445 if (!IS_ALIGNED(reg, map->reg_stride))
2446 return -EINVAL;
2447 }
2448
2449 if (!map->cache_bypass) {
2450 for (i = 0; i < num_regs; i++) {
2451 unsigned int val = regs[i].def;
2452 unsigned int reg = regs[i].reg;
2453 ret = regcache_write(map, reg, val);
2454 if (ret) {
2455 dev_err(map->dev,
2456 "Error in caching of register: %x ret: %d\n",
2457 reg, ret);
2458 return ret;
2459 }
2460 }
2461 if (map->cache_only) {
2462 map->cache_dirty = true;
2463 return 0;
2464 }
2465 }
2466
2467 WARN_ON(!map->bus);
2468
2469 for (i = 0; i < num_regs; i++) {
2470 unsigned int reg = regs[i].reg;
2471 struct regmap_range_node *range;
2472
2473 /* Coalesce all the writes between a page break or a delay
2474 * in a sequence
2475 */
2476 range = _regmap_range_lookup(map, reg);
2477 if (range || regs[i].delay_us) {
2478 size_t len = sizeof(struct reg_sequence)*num_regs;
2479 struct reg_sequence *base = kmemdup(regs, len,
2480 GFP_KERNEL);
2481 if (!base)
2482 return -ENOMEM;
2483 ret = _regmap_range_multi_paged_reg_write(map, base,
2484 num_regs);
2485 kfree(base);
2486
2487 return ret;
2488 }
2489 }
2490 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2491}
2492
2493/**
2494 * regmap_multi_reg_write() - Write multiple registers to the device
2495 *
2496 * @map: Register map to write to
2497 * @regs: Array of structures containing register,value to be written
2498 * @num_regs: Number of registers to write
2499 *
2500 * Write multiple registers to the device where the set of register, value
2501 * pairs are supplied in any order, possibly not all in a single range.
2502 *
2503 * The 'normal' block write mode will send ultimately send data on the
2504 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2505 * addressed. However, this alternative block multi write mode will send
2506 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2507 * must of course support the mode.
2508 *
2509 * A value of zero will be returned on success, a negative errno will be
2510 * returned in error cases.
2511 */
2512int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2513 int num_regs)
2514{
2515 int ret;
2516
2517 map->lock(map->lock_arg);
2518
2519 ret = _regmap_multi_reg_write(map, regs, num_regs);
2520
2521 map->unlock(map->lock_arg);
2522
2523 return ret;
2524}
2525EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2526
2527/**
2528 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2529 * device but not the cache
2530 *
2531 * @map: Register map to write to
2532 * @regs: Array of structures containing register,value to be written
2533 * @num_regs: Number of registers to write
2534 *
2535 * Write multiple registers to the device but not the cache where the set
2536 * of register are supplied in any order.
2537 *
2538 * This function is intended to be used for writing a large block of data
2539 * atomically to the device in single transfer for those I2C client devices
2540 * that implement this alternative block write mode.
2541 *
2542 * A value of zero will be returned on success, a negative errno will
2543 * be returned in error cases.
2544 */
2545int regmap_multi_reg_write_bypassed(struct regmap *map,
2546 const struct reg_sequence *regs,
2547 int num_regs)
2548{
2549 int ret;
2550 bool bypass;
2551
2552 map->lock(map->lock_arg);
2553
2554 bypass = map->cache_bypass;
2555 map->cache_bypass = true;
2556
2557 ret = _regmap_multi_reg_write(map, regs, num_regs);
2558
2559 map->cache_bypass = bypass;
2560
2561 map->unlock(map->lock_arg);
2562
2563 return ret;
2564}
2565EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2566
2567/**
2568 * regmap_raw_write_async() - Write raw values to one or more registers
2569 * asynchronously
2570 *
2571 * @map: Register map to write to
2572 * @reg: Initial register to write to
2573 * @val: Block of data to be written, laid out for direct transmission to the
2574 * device. Must be valid until regmap_async_complete() is called.
2575 * @val_len: Length of data pointed to by val.
2576 *
2577 * This function is intended to be used for things like firmware
2578 * download where a large block of data needs to be transferred to the
2579 * device. No formatting will be done on the data provided.
2580 *
2581 * If supported by the underlying bus the write will be scheduled
2582 * asynchronously, helping maximise I/O speed on higher speed buses
2583 * like SPI. regmap_async_complete() can be called to ensure that all
2584 * asynchrnous writes have been completed.
2585 *
2586 * A value of zero will be returned on success, a negative errno will
2587 * be returned in error cases.
2588 */
2589int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2590 const void *val, size_t val_len)
2591{
2592 int ret;
2593
2594 if (val_len % map->format.val_bytes)
2595 return -EINVAL;
2596 if (!IS_ALIGNED(reg, map->reg_stride))
2597 return -EINVAL;
2598
2599 map->lock(map->lock_arg);
2600
2601 map->async = true;
2602
Olivier Deprez0e641232021-09-23 10:07:05 +02002603 ret = _regmap_raw_write(map, reg, val, val_len, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002604
2605 map->async = false;
2606
2607 map->unlock(map->lock_arg);
2608
2609 return ret;
2610}
2611EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2612
2613static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
Olivier Deprez0e641232021-09-23 10:07:05 +02002614 unsigned int val_len, bool noinc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002615{
2616 struct regmap_range_node *range;
2617 int ret;
2618
2619 WARN_ON(!map->bus);
2620
2621 if (!map->bus || !map->bus->read)
2622 return -EINVAL;
2623
2624 range = _regmap_range_lookup(map, reg);
2625 if (range) {
2626 ret = _regmap_select_page(map, &reg, range,
Olivier Deprez0e641232021-09-23 10:07:05 +02002627 noinc ? 1 : val_len / map->format.val_bytes);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002628 if (ret != 0)
2629 return ret;
2630 }
2631
2632 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2633 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2634 map->read_flag_mask);
2635 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2636
2637 ret = map->bus->read(map->bus_context, map->work_buf,
2638 map->format.reg_bytes + map->format.pad_bytes,
2639 val, val_len);
2640
2641 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2642
2643 return ret;
2644}
2645
2646static int _regmap_bus_reg_read(void *context, unsigned int reg,
2647 unsigned int *val)
2648{
2649 struct regmap *map = context;
2650
2651 return map->bus->reg_read(map->bus_context, reg, val);
2652}
2653
2654static int _regmap_bus_read(void *context, unsigned int reg,
2655 unsigned int *val)
2656{
2657 int ret;
2658 struct regmap *map = context;
2659 void *work_val = map->work_buf + map->format.reg_bytes +
2660 map->format.pad_bytes;
2661
2662 if (!map->format.parse_val)
2663 return -EINVAL;
2664
Olivier Deprez0e641232021-09-23 10:07:05 +02002665 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002666 if (ret == 0)
2667 *val = map->format.parse_val(work_val);
2668
2669 return ret;
2670}
2671
2672static int _regmap_read(struct regmap *map, unsigned int reg,
2673 unsigned int *val)
2674{
2675 int ret;
2676 void *context = _regmap_map_get_context(map);
2677
2678 if (!map->cache_bypass) {
2679 ret = regcache_read(map, reg, val);
2680 if (ret == 0)
2681 return 0;
2682 }
2683
2684 if (map->cache_only)
2685 return -EBUSY;
2686
2687 if (!regmap_readable(map, reg))
2688 return -EIO;
2689
2690 ret = map->reg_read(context, reg, val);
2691 if (ret == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002692 if (regmap_should_log(map))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002693 dev_info(map->dev, "%x => %x\n", reg, *val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002694
2695 trace_regmap_reg_read(map, reg, *val);
2696
2697 if (!map->cache_bypass)
2698 regcache_write(map, reg, *val);
2699 }
2700
2701 return ret;
2702}
2703
2704/**
2705 * regmap_read() - Read a value from a single register
2706 *
2707 * @map: Register map to read from
2708 * @reg: Register to be read from
2709 * @val: Pointer to store read value
2710 *
2711 * A value of zero will be returned on success, a negative errno will
2712 * be returned in error cases.
2713 */
2714int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2715{
2716 int ret;
2717
2718 if (!IS_ALIGNED(reg, map->reg_stride))
2719 return -EINVAL;
2720
2721 map->lock(map->lock_arg);
2722
2723 ret = _regmap_read(map, reg, val);
2724
2725 map->unlock(map->lock_arg);
2726
2727 return ret;
2728}
2729EXPORT_SYMBOL_GPL(regmap_read);
2730
2731/**
2732 * regmap_raw_read() - Read raw data from the device
2733 *
2734 * @map: Register map to read from
2735 * @reg: First register to be read from
2736 * @val: Pointer to store read value
2737 * @val_len: Size of data to read
2738 *
2739 * A value of zero will be returned on success, a negative errno will
2740 * be returned in error cases.
2741 */
2742int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2743 size_t val_len)
2744{
2745 size_t val_bytes = map->format.val_bytes;
2746 size_t val_count = val_len / val_bytes;
2747 unsigned int v;
2748 int ret, i;
2749
2750 if (!map->bus)
2751 return -EINVAL;
2752 if (val_len % map->format.val_bytes)
2753 return -EINVAL;
2754 if (!IS_ALIGNED(reg, map->reg_stride))
2755 return -EINVAL;
2756 if (val_count == 0)
2757 return -EINVAL;
2758
2759 map->lock(map->lock_arg);
2760
2761 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2762 map->cache_type == REGCACHE_NONE) {
2763 size_t chunk_count, chunk_bytes;
2764 size_t chunk_regs = val_count;
2765
2766 if (!map->bus->read) {
2767 ret = -ENOTSUPP;
2768 goto out;
2769 }
2770
2771 if (map->use_single_read)
2772 chunk_regs = 1;
2773 else if (map->max_raw_read && val_len > map->max_raw_read)
2774 chunk_regs = map->max_raw_read / val_bytes;
2775
2776 chunk_count = val_count / chunk_regs;
2777 chunk_bytes = chunk_regs * val_bytes;
2778
2779 /* Read bytes that fit into whole chunks */
2780 for (i = 0; i < chunk_count; i++) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002781 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002782 if (ret != 0)
2783 goto out;
2784
2785 reg += regmap_get_offset(map, chunk_regs);
2786 val += chunk_bytes;
2787 val_len -= chunk_bytes;
2788 }
2789
2790 /* Read remaining bytes */
2791 if (val_len) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002792 ret = _regmap_raw_read(map, reg, val, val_len, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002793 if (ret != 0)
2794 goto out;
2795 }
2796 } else {
2797 /* Otherwise go word by word for the cache; should be low
2798 * cost as we expect to hit the cache.
2799 */
2800 for (i = 0; i < val_count; i++) {
2801 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2802 &v);
2803 if (ret != 0)
2804 goto out;
2805
2806 map->format.format_val(val + (i * val_bytes), v, 0);
2807 }
2808 }
2809
2810 out:
2811 map->unlock(map->lock_arg);
2812
2813 return ret;
2814}
2815EXPORT_SYMBOL_GPL(regmap_raw_read);
2816
2817/**
2818 * regmap_noinc_read(): Read data from a register without incrementing the
2819 * register number
2820 *
2821 * @map: Register map to read from
2822 * @reg: Register to read from
2823 * @val: Pointer to data buffer
2824 * @val_len: Length of output buffer in bytes.
2825 *
2826 * The regmap API usually assumes that bulk bus read operations will read a
2827 * range of registers. Some devices have certain registers for which a read
2828 * operation read will read from an internal FIFO.
2829 *
2830 * The target register must be volatile but registers after it can be
2831 * completely unrelated cacheable registers.
2832 *
2833 * This will attempt multiple reads as required to read val_len bytes.
2834 *
2835 * A value of zero will be returned on success, a negative errno will be
2836 * returned in error cases.
2837 */
2838int regmap_noinc_read(struct regmap *map, unsigned int reg,
2839 void *val, size_t val_len)
2840{
2841 size_t read_len;
2842 int ret;
2843
2844 if (!map->bus)
2845 return -EINVAL;
2846 if (!map->bus->read)
2847 return -ENOTSUPP;
2848 if (val_len % map->format.val_bytes)
2849 return -EINVAL;
2850 if (!IS_ALIGNED(reg, map->reg_stride))
2851 return -EINVAL;
2852 if (val_len == 0)
2853 return -EINVAL;
2854
2855 map->lock(map->lock_arg);
2856
2857 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2858 ret = -EINVAL;
2859 goto out_unlock;
2860 }
2861
2862 while (val_len) {
2863 if (map->max_raw_read && map->max_raw_read < val_len)
2864 read_len = map->max_raw_read;
2865 else
2866 read_len = val_len;
Olivier Deprez0e641232021-09-23 10:07:05 +02002867 ret = _regmap_raw_read(map, reg, val, read_len, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002868 if (ret)
2869 goto out_unlock;
2870 val = ((u8 *)val) + read_len;
2871 val_len -= read_len;
2872 }
2873
2874out_unlock:
2875 map->unlock(map->lock_arg);
2876 return ret;
2877}
2878EXPORT_SYMBOL_GPL(regmap_noinc_read);
2879
2880/**
2881 * regmap_field_read(): Read a value to a single register field
2882 *
2883 * @field: Register field to read from
2884 * @val: Pointer to store read value
2885 *
2886 * A value of zero will be returned on success, a negative errno will
2887 * be returned in error cases.
2888 */
2889int regmap_field_read(struct regmap_field *field, unsigned int *val)
2890{
2891 int ret;
2892 unsigned int reg_val;
2893 ret = regmap_read(field->regmap, field->reg, &reg_val);
2894 if (ret != 0)
2895 return ret;
2896
2897 reg_val &= field->mask;
2898 reg_val >>= field->shift;
2899 *val = reg_val;
2900
2901 return ret;
2902}
2903EXPORT_SYMBOL_GPL(regmap_field_read);
2904
2905/**
2906 * regmap_fields_read() - Read a value to a single register field with port ID
2907 *
2908 * @field: Register field to read from
2909 * @id: port ID
2910 * @val: Pointer to store read value
2911 *
2912 * A value of zero will be returned on success, a negative errno will
2913 * be returned in error cases.
2914 */
2915int regmap_fields_read(struct regmap_field *field, unsigned int id,
2916 unsigned int *val)
2917{
2918 int ret;
2919 unsigned int reg_val;
2920
2921 if (id >= field->id_size)
2922 return -EINVAL;
2923
2924 ret = regmap_read(field->regmap,
2925 field->reg + (field->id_offset * id),
2926 &reg_val);
2927 if (ret != 0)
2928 return ret;
2929
2930 reg_val &= field->mask;
2931 reg_val >>= field->shift;
2932 *val = reg_val;
2933
2934 return ret;
2935}
2936EXPORT_SYMBOL_GPL(regmap_fields_read);
2937
2938/**
2939 * regmap_bulk_read() - Read multiple registers from the device
2940 *
2941 * @map: Register map to read from
2942 * @reg: First register to be read from
2943 * @val: Pointer to store read value, in native register size for device
2944 * @val_count: Number of registers to read
2945 *
2946 * A value of zero will be returned on success, a negative errno will
2947 * be returned in error cases.
2948 */
2949int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2950 size_t val_count)
2951{
2952 int ret, i;
2953 size_t val_bytes = map->format.val_bytes;
2954 bool vol = regmap_volatile_range(map, reg, val_count);
2955
2956 if (!IS_ALIGNED(reg, map->reg_stride))
2957 return -EINVAL;
2958 if (val_count == 0)
2959 return -EINVAL;
2960
2961 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2962 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2963 if (ret != 0)
2964 return ret;
2965
2966 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2967 map->format.parse_inplace(val + i);
2968 } else {
2969#ifdef CONFIG_64BIT
2970 u64 *u64 = val;
2971#endif
2972 u32 *u32 = val;
2973 u16 *u16 = val;
2974 u8 *u8 = val;
2975
2976 map->lock(map->lock_arg);
2977
2978 for (i = 0; i < val_count; i++) {
2979 unsigned int ival;
2980
2981 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2982 &ival);
2983 if (ret != 0)
2984 goto out;
2985
2986 switch (map->format.val_bytes) {
2987#ifdef CONFIG_64BIT
2988 case 8:
2989 u64[i] = ival;
2990 break;
2991#endif
2992 case 4:
2993 u32[i] = ival;
2994 break;
2995 case 2:
2996 u16[i] = ival;
2997 break;
2998 case 1:
2999 u8[i] = ival;
3000 break;
3001 default:
3002 ret = -EINVAL;
3003 goto out;
3004 }
3005 }
3006
3007out:
3008 map->unlock(map->lock_arg);
3009 }
3010
3011 return ret;
3012}
3013EXPORT_SYMBOL_GPL(regmap_bulk_read);
3014
3015static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3016 unsigned int mask, unsigned int val,
3017 bool *change, bool force_write)
3018{
3019 int ret;
3020 unsigned int tmp, orig;
3021
3022 if (change)
3023 *change = false;
3024
3025 if (regmap_volatile(map, reg) && map->reg_update_bits) {
3026 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3027 if (ret == 0 && change)
3028 *change = true;
3029 } else {
3030 ret = _regmap_read(map, reg, &orig);
3031 if (ret != 0)
3032 return ret;
3033
3034 tmp = orig & ~mask;
3035 tmp |= val & mask;
3036
3037 if (force_write || (tmp != orig)) {
3038 ret = _regmap_write(map, reg, tmp);
3039 if (ret == 0 && change)
3040 *change = true;
3041 }
3042 }
3043
3044 return ret;
3045}
3046
3047/**
3048 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3049 *
3050 * @map: Register map to update
3051 * @reg: Register to update
3052 * @mask: Bitmask to change
3053 * @val: New value for bitmask
3054 * @change: Boolean indicating if a write was done
3055 * @async: Boolean indicating asynchronously
3056 * @force: Boolean indicating use force update
3057 *
3058 * Perform a read/modify/write cycle on a register map with change, async, force
3059 * options.
3060 *
3061 * If async is true:
3062 *
3063 * With most buses the read must be done synchronously so this is most useful
3064 * for devices with a cache which do not need to interact with the hardware to
3065 * determine the current register value.
3066 *
3067 * Returns zero for success, a negative number on error.
3068 */
3069int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3070 unsigned int mask, unsigned int val,
3071 bool *change, bool async, bool force)
3072{
3073 int ret;
3074
3075 map->lock(map->lock_arg);
3076
3077 map->async = async;
3078
3079 ret = _regmap_update_bits(map, reg, mask, val, change, force);
3080
3081 map->async = false;
3082
3083 map->unlock(map->lock_arg);
3084
3085 return ret;
3086}
3087EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3088
Olivier Deprez157378f2022-04-04 15:47:50 +02003089/**
3090 * regmap_test_bits() - Check if all specified bits are set in a register.
3091 *
3092 * @map: Register map to operate on
3093 * @reg: Register to read from
3094 * @bits: Bits to test
3095 *
3096 * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3097 * bits are set and a negative error number if the underlying regmap_read()
3098 * fails.
3099 */
3100int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3101{
3102 unsigned int val, ret;
3103
3104 ret = regmap_read(map, reg, &val);
3105 if (ret)
3106 return ret;
3107
3108 return (val & bits) == bits;
3109}
3110EXPORT_SYMBOL_GPL(regmap_test_bits);
3111
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003112void regmap_async_complete_cb(struct regmap_async *async, int ret)
3113{
3114 struct regmap *map = async->map;
3115 bool wake;
3116
3117 trace_regmap_async_io_complete(map);
3118
3119 spin_lock(&map->async_lock);
3120 list_move(&async->list, &map->async_free);
3121 wake = list_empty(&map->async_list);
3122
3123 if (ret != 0)
3124 map->async_ret = ret;
3125
3126 spin_unlock(&map->async_lock);
3127
3128 if (wake)
3129 wake_up(&map->async_waitq);
3130}
3131EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3132
3133static int regmap_async_is_done(struct regmap *map)
3134{
3135 unsigned long flags;
3136 int ret;
3137
3138 spin_lock_irqsave(&map->async_lock, flags);
3139 ret = list_empty(&map->async_list);
3140 spin_unlock_irqrestore(&map->async_lock, flags);
3141
3142 return ret;
3143}
3144
3145/**
3146 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3147 *
3148 * @map: Map to operate on.
3149 *
3150 * Blocks until any pending asynchronous I/O has completed. Returns
3151 * an error code for any failed I/O operations.
3152 */
3153int regmap_async_complete(struct regmap *map)
3154{
3155 unsigned long flags;
3156 int ret;
3157
3158 /* Nothing to do with no async support */
3159 if (!map->bus || !map->bus->async_write)
3160 return 0;
3161
3162 trace_regmap_async_complete_start(map);
3163
3164 wait_event(map->async_waitq, regmap_async_is_done(map));
3165
3166 spin_lock_irqsave(&map->async_lock, flags);
3167 ret = map->async_ret;
3168 map->async_ret = 0;
3169 spin_unlock_irqrestore(&map->async_lock, flags);
3170
3171 trace_regmap_async_complete_done(map);
3172
3173 return ret;
3174}
3175EXPORT_SYMBOL_GPL(regmap_async_complete);
3176
3177/**
3178 * regmap_register_patch - Register and apply register updates to be applied
3179 * on device initialistion
3180 *
3181 * @map: Register map to apply updates to.
3182 * @regs: Values to update.
3183 * @num_regs: Number of entries in regs.
3184 *
3185 * Register a set of register updates to be applied to the device
3186 * whenever the device registers are synchronised with the cache and
3187 * apply them immediately. Typically this is used to apply
3188 * corrections to be applied to the device defaults on startup, such
3189 * as the updates some vendors provide to undocumented registers.
3190 *
3191 * The caller must ensure that this function cannot be called
3192 * concurrently with either itself or regcache_sync().
3193 */
3194int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3195 int num_regs)
3196{
3197 struct reg_sequence *p;
3198 int ret;
3199 bool bypass;
3200
3201 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3202 num_regs))
3203 return 0;
3204
3205 p = krealloc(map->patch,
3206 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3207 GFP_KERNEL);
3208 if (p) {
3209 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3210 map->patch = p;
3211 map->patch_regs += num_regs;
3212 } else {
3213 return -ENOMEM;
3214 }
3215
3216 map->lock(map->lock_arg);
3217
3218 bypass = map->cache_bypass;
3219
3220 map->cache_bypass = true;
3221 map->async = true;
3222
3223 ret = _regmap_multi_reg_write(map, regs, num_regs);
3224
3225 map->async = false;
3226 map->cache_bypass = bypass;
3227
3228 map->unlock(map->lock_arg);
3229
3230 regmap_async_complete(map);
3231
3232 return ret;
3233}
3234EXPORT_SYMBOL_GPL(regmap_register_patch);
3235
3236/**
3237 * regmap_get_val_bytes() - Report the size of a register value
3238 *
3239 * @map: Register map to operate on.
3240 *
3241 * Report the size of a register value, mainly intended to for use by
3242 * generic infrastructure built on top of regmap.
3243 */
3244int regmap_get_val_bytes(struct regmap *map)
3245{
3246 if (map->format.format_write)
3247 return -EINVAL;
3248
3249 return map->format.val_bytes;
3250}
3251EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3252
3253/**
3254 * regmap_get_max_register() - Report the max register value
3255 *
3256 * @map: Register map to operate on.
3257 *
3258 * Report the max register value, mainly intended to for use by
3259 * generic infrastructure built on top of regmap.
3260 */
3261int regmap_get_max_register(struct regmap *map)
3262{
3263 return map->max_register ? map->max_register : -EINVAL;
3264}
3265EXPORT_SYMBOL_GPL(regmap_get_max_register);
3266
3267/**
3268 * regmap_get_reg_stride() - Report the register address stride
3269 *
3270 * @map: Register map to operate on.
3271 *
3272 * Report the register address stride, mainly intended to for use by
3273 * generic infrastructure built on top of regmap.
3274 */
3275int regmap_get_reg_stride(struct regmap *map)
3276{
3277 return map->reg_stride;
3278}
3279EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3280
3281int regmap_parse_val(struct regmap *map, const void *buf,
3282 unsigned int *val)
3283{
3284 if (!map->format.parse_val)
3285 return -EINVAL;
3286
3287 *val = map->format.parse_val(buf);
3288
3289 return 0;
3290}
3291EXPORT_SYMBOL_GPL(regmap_parse_val);
3292
3293static int __init regmap_initcall(void)
3294{
3295 regmap_debugfs_initcall();
3296
3297 return 0;
3298}
3299postcore_initcall(regmap_initcall);