blob: 87c5c421e0f461a307c484821243f82aef75a1b0 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2//
3// regmap based irq_chip
4//
5// Copyright 2011 Wolfson Microelectronics plc
6//
7// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/interrupt.h>
12#include <linux/irq.h>
13#include <linux/irqdomain.h>
14#include <linux/pm_runtime.h>
15#include <linux/regmap.h>
16#include <linux/slab.h>
17
18#include "internal.h"
19
20struct regmap_irq_chip_data {
21 struct mutex lock;
22 struct irq_chip irq_chip;
23
24 struct regmap *map;
25 const struct regmap_irq_chip *chip;
26
27 int irq_base;
28 struct irq_domain *domain;
29
30 int irq;
31 int wake_count;
32
33 void *status_reg_buf;
David Brazdil0f672f62019-12-10 10:32:29 +000034 unsigned int *main_status_buf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035 unsigned int *status_buf;
36 unsigned int *mask_buf;
37 unsigned int *mask_buf_def;
38 unsigned int *wake_buf;
39 unsigned int *type_buf;
40 unsigned int *type_buf_def;
41
42 unsigned int irq_reg_stride;
43 unsigned int type_reg_stride;
David Brazdil0f672f62019-12-10 10:32:29 +000044
45 bool clear_status:1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046};
47
48static inline const
49struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
50 int irq)
51{
52 return &data->chip->irqs[irq];
53}
54
55static void regmap_irq_lock(struct irq_data *data)
56{
57 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
58
59 mutex_lock(&d->lock);
60}
61
62static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
63 unsigned int reg, unsigned int mask,
64 unsigned int val)
65{
66 if (d->chip->mask_writeonly)
67 return regmap_write_bits(d->map, reg, mask, val);
68 else
69 return regmap_update_bits(d->map, reg, mask, val);
70}
71
72static void regmap_irq_sync_unlock(struct irq_data *data)
73{
74 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
75 struct regmap *map = d->map;
76 int i, ret;
77 u32 reg;
78 u32 unmask_offset;
David Brazdil0f672f62019-12-10 10:32:29 +000079 u32 val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080
81 if (d->chip->runtime_pm) {
82 ret = pm_runtime_get_sync(map->dev);
83 if (ret < 0)
84 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
85 ret);
86 }
87
David Brazdil0f672f62019-12-10 10:32:29 +000088 if (d->clear_status) {
89 for (i = 0; i < d->chip->num_regs; i++) {
90 reg = d->chip->status_base +
91 (i * map->reg_stride * d->irq_reg_stride);
92
93 ret = regmap_read(map, reg, &val);
94 if (ret)
95 dev_err(d->map->dev,
96 "Failed to clear the interrupt status bits\n");
97 }
98
99 d->clear_status = false;
100 }
101
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 /*
103 * If there's been a change in the mask write it back to the
104 * hardware. We rely on the use of the regmap core cache to
105 * suppress pointless writes.
106 */
107 for (i = 0; i < d->chip->num_regs; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +0000108 if (!d->chip->mask_base)
109 continue;
110
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 reg = d->chip->mask_base +
112 (i * map->reg_stride * d->irq_reg_stride);
113 if (d->chip->mask_invert) {
114 ret = regmap_irq_update_bits(d, reg,
115 d->mask_buf_def[i], ~d->mask_buf[i]);
116 } else if (d->chip->unmask_base) {
117 /* set mask with mask_base register */
118 ret = regmap_irq_update_bits(d, reg,
119 d->mask_buf_def[i], ~d->mask_buf[i]);
120 if (ret < 0)
121 dev_err(d->map->dev,
122 "Failed to sync unmasks in %x\n",
123 reg);
124 unmask_offset = d->chip->unmask_base -
125 d->chip->mask_base;
126 /* clear mask with unmask_base register */
127 ret = regmap_irq_update_bits(d,
128 reg + unmask_offset,
129 d->mask_buf_def[i],
130 d->mask_buf[i]);
131 } else {
132 ret = regmap_irq_update_bits(d, reg,
133 d->mask_buf_def[i], d->mask_buf[i]);
134 }
135 if (ret != 0)
136 dev_err(d->map->dev, "Failed to sync masks in %x\n",
137 reg);
138
139 reg = d->chip->wake_base +
140 (i * map->reg_stride * d->irq_reg_stride);
141 if (d->wake_buf) {
142 if (d->chip->wake_invert)
143 ret = regmap_irq_update_bits(d, reg,
144 d->mask_buf_def[i],
145 ~d->wake_buf[i]);
146 else
147 ret = regmap_irq_update_bits(d, reg,
148 d->mask_buf_def[i],
149 d->wake_buf[i]);
150 if (ret != 0)
151 dev_err(d->map->dev,
152 "Failed to sync wakes in %x: %d\n",
153 reg, ret);
154 }
155
156 if (!d->chip->init_ack_masked)
157 continue;
158 /*
159 * Ack all the masked interrupts unconditionally,
160 * OR if there is masked interrupt which hasn't been Acked,
161 * it'll be ignored in irq handler, then may introduce irq storm
162 */
163 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164 reg = d->chip->ack_base +
165 (i * map->reg_stride * d->irq_reg_stride);
166 /* some chips ack by write 0 */
167 if (d->chip->ack_invert)
168 ret = regmap_write(map, reg, ~d->mask_buf[i]);
169 else
170 ret = regmap_write(map, reg, d->mask_buf[i]);
Olivier Deprez157378f2022-04-04 15:47:50 +0200171 if (d->chip->clear_ack) {
172 if (d->chip->ack_invert && !ret)
173 ret = regmap_write(map, reg, UINT_MAX);
174 else if (!ret)
175 ret = regmap_write(map, reg, 0);
176 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177 if (ret != 0)
178 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
179 reg, ret);
180 }
181 }
182
David Brazdil0f672f62019-12-10 10:32:29 +0000183 /* Don't update the type bits if we're using mask bits for irq type. */
184 if (!d->chip->type_in_mask) {
185 for (i = 0; i < d->chip->num_type_reg; i++) {
186 if (!d->type_buf_def[i])
187 continue;
188 reg = d->chip->type_base +
189 (i * map->reg_stride * d->type_reg_stride);
190 if (d->chip->type_invert)
191 ret = regmap_irq_update_bits(d, reg,
192 d->type_buf_def[i], ~d->type_buf[i]);
193 else
194 ret = regmap_irq_update_bits(d, reg,
195 d->type_buf_def[i], d->type_buf[i]);
196 if (ret != 0)
197 dev_err(d->map->dev, "Failed to sync type in %x\n",
198 reg);
199 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200 }
201
202 if (d->chip->runtime_pm)
203 pm_runtime_put(map->dev);
204
205 /* If we've changed our wakeup count propagate it to the parent */
206 if (d->wake_count < 0)
207 for (i = d->wake_count; i < 0; i++)
208 irq_set_irq_wake(d->irq, 0);
209 else if (d->wake_count > 0)
210 for (i = 0; i < d->wake_count; i++)
211 irq_set_irq_wake(d->irq, 1);
212
213 d->wake_count = 0;
214
215 mutex_unlock(&d->lock);
216}
217
218static void regmap_irq_enable(struct irq_data *data)
219{
220 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
221 struct regmap *map = d->map;
222 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
David Brazdil0f672f62019-12-10 10:32:29 +0000223 unsigned int mask, type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224
David Brazdil0f672f62019-12-10 10:32:29 +0000225 type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
226
227 /*
228 * The type_in_mask flag means that the underlying hardware uses
229 * separate mask bits for rising and falling edge interrupts, but
230 * we want to make them into a single virtual interrupt with
231 * configurable edge.
232 *
233 * If the interrupt we're enabling defines the falling or rising
234 * masks then instead of using the regular mask bits for this
235 * interrupt, use the value previously written to the type buffer
236 * at the corresponding offset in regmap_irq_set_type().
237 */
238 if (d->chip->type_in_mask && type)
239 mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
240 else
241 mask = irq_data->mask;
242
243 if (d->chip->clear_on_unmask)
244 d->clear_status = true;
245
246 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000247}
248
249static void regmap_irq_disable(struct irq_data *data)
250{
251 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
252 struct regmap *map = d->map;
253 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
254
255 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
256}
257
258static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
259{
260 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
261 struct regmap *map = d->map;
262 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
David Brazdil0f672f62019-12-10 10:32:29 +0000263 int reg;
264 const struct regmap_irq_type *t = &irq_data->type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265
David Brazdil0f672f62019-12-10 10:32:29 +0000266 if ((t->types_supported & type) != type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 return 0;
268
David Brazdil0f672f62019-12-10 10:32:29 +0000269 reg = t->type_reg_offset / map->reg_stride;
270
271 if (t->type_reg_mask)
272 d->type_buf[reg] &= ~t->type_reg_mask;
273 else
274 d->type_buf[reg] &= ~(t->type_falling_val |
275 t->type_rising_val |
276 t->type_level_low_val |
277 t->type_level_high_val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278 switch (type) {
279 case IRQ_TYPE_EDGE_FALLING:
David Brazdil0f672f62019-12-10 10:32:29 +0000280 d->type_buf[reg] |= t->type_falling_val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281 break;
282
283 case IRQ_TYPE_EDGE_RISING:
David Brazdil0f672f62019-12-10 10:32:29 +0000284 d->type_buf[reg] |= t->type_rising_val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 break;
286
287 case IRQ_TYPE_EDGE_BOTH:
David Brazdil0f672f62019-12-10 10:32:29 +0000288 d->type_buf[reg] |= (t->type_falling_val |
289 t->type_rising_val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290 break;
291
David Brazdil0f672f62019-12-10 10:32:29 +0000292 case IRQ_TYPE_LEVEL_HIGH:
293 d->type_buf[reg] |= t->type_level_high_val;
294 break;
295
296 case IRQ_TYPE_LEVEL_LOW:
297 d->type_buf[reg] |= t->type_level_low_val;
298 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000299 default:
300 return -EINVAL;
301 }
302 return 0;
303}
304
305static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
306{
307 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
308 struct regmap *map = d->map;
309 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
310
311 if (on) {
312 if (d->wake_buf)
313 d->wake_buf[irq_data->reg_offset / map->reg_stride]
314 &= ~irq_data->mask;
315 d->wake_count++;
316 } else {
317 if (d->wake_buf)
318 d->wake_buf[irq_data->reg_offset / map->reg_stride]
319 |= irq_data->mask;
320 d->wake_count--;
321 }
322
323 return 0;
324}
325
326static const struct irq_chip regmap_irq_chip = {
327 .irq_bus_lock = regmap_irq_lock,
328 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
329 .irq_disable = regmap_irq_disable,
330 .irq_enable = regmap_irq_enable,
331 .irq_set_type = regmap_irq_set_type,
332 .irq_set_wake = regmap_irq_set_wake,
333};
334
David Brazdil0f672f62019-12-10 10:32:29 +0000335static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
336 unsigned int b)
337{
338 const struct regmap_irq_chip *chip = data->chip;
339 struct regmap *map = data->map;
340 struct regmap_irq_sub_irq_map *subreg;
341 int i, ret = 0;
342
343 if (!chip->sub_reg_offsets) {
344 /* Assume linear mapping */
345 ret = regmap_read(map, chip->status_base +
346 (b * map->reg_stride * data->irq_reg_stride),
347 &data->status_buf[b]);
348 } else {
349 subreg = &chip->sub_reg_offsets[b];
350 for (i = 0; i < subreg->num_regs; i++) {
351 unsigned int offset = subreg->offset[i];
352
353 ret = regmap_read(map, chip->status_base + offset,
354 &data->status_buf[offset]);
355 if (ret)
356 break;
357 }
358 }
359 return ret;
360}
361
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362static irqreturn_t regmap_irq_thread(int irq, void *d)
363{
364 struct regmap_irq_chip_data *data = d;
365 const struct regmap_irq_chip *chip = data->chip;
366 struct regmap *map = data->map;
367 int ret, i;
368 bool handled = false;
369 u32 reg;
370
371 if (chip->handle_pre_irq)
372 chip->handle_pre_irq(chip->irq_drv_data);
373
374 if (chip->runtime_pm) {
375 ret = pm_runtime_get_sync(map->dev);
376 if (ret < 0) {
377 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
378 ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000379 goto exit;
380 }
381 }
382
383 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000384 * Read only registers with active IRQs if the chip has 'main status
385 * register'. Else read in the statuses, using a single bulk read if
386 * possible in order to reduce the I/O overheads.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000387 */
David Brazdil0f672f62019-12-10 10:32:29 +0000388
389 if (chip->num_main_regs) {
390 unsigned int max_main_bits;
391 unsigned long size;
392
393 size = chip->num_regs * sizeof(unsigned int);
394
395 max_main_bits = (chip->num_main_status_bits) ?
396 chip->num_main_status_bits : chip->num_regs;
397 /* Clear the status buf as we don't read all status regs */
398 memset(data->status_buf, 0, size);
399
400 /* We could support bulk read for main status registers
401 * but I don't expect to see devices with really many main
402 * status registers so let's only support single reads for the
403 * sake of simplicity. and add bulk reads only if needed
404 */
405 for (i = 0; i < chip->num_main_regs; i++) {
406 ret = regmap_read(map, chip->main_status +
407 (i * map->reg_stride
408 * data->irq_reg_stride),
409 &data->main_status_buf[i]);
410 if (ret) {
411 dev_err(map->dev,
412 "Failed to read IRQ status %d\n",
413 ret);
414 goto exit;
415 }
416 }
417
418 /* Read sub registers with active IRQs */
419 for (i = 0; i < chip->num_main_regs; i++) {
420 unsigned int b;
421 const unsigned long mreg = data->main_status_buf[i];
422
423 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
424 if (i * map->format.val_bytes * 8 + b >
425 max_main_bits)
426 break;
427 ret = read_sub_irq_data(data, b);
428
429 if (ret != 0) {
430 dev_err(map->dev,
431 "Failed to read IRQ status %d\n",
432 ret);
433 goto exit;
434 }
435 }
436
437 }
438 } else if (!map->use_single_read && map->reg_stride == 1 &&
439 data->irq_reg_stride == 1) {
440
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441 u8 *buf8 = data->status_reg_buf;
442 u16 *buf16 = data->status_reg_buf;
443 u32 *buf32 = data->status_reg_buf;
444
445 BUG_ON(!data->status_reg_buf);
446
447 ret = regmap_bulk_read(map, chip->status_base,
448 data->status_reg_buf,
449 chip->num_regs);
450 if (ret != 0) {
451 dev_err(map->dev, "Failed to read IRQ status: %d\n",
452 ret);
453 goto exit;
454 }
455
456 for (i = 0; i < data->chip->num_regs; i++) {
457 switch (map->format.val_bytes) {
458 case 1:
459 data->status_buf[i] = buf8[i];
460 break;
461 case 2:
462 data->status_buf[i] = buf16[i];
463 break;
464 case 4:
465 data->status_buf[i] = buf32[i];
466 break;
467 default:
468 BUG();
469 goto exit;
470 }
471 }
472
473 } else {
474 for (i = 0; i < data->chip->num_regs; i++) {
475 ret = regmap_read(map, chip->status_base +
476 (i * map->reg_stride
477 * data->irq_reg_stride),
478 &data->status_buf[i]);
479
480 if (ret != 0) {
481 dev_err(map->dev,
482 "Failed to read IRQ status: %d\n",
483 ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484 goto exit;
485 }
486 }
487 }
488
489 /*
490 * Ignore masked IRQs and ack if we need to; we ack early so
491 * there is no race between handling and acknowleding the
492 * interrupt. We assume that typically few of the interrupts
493 * will fire simultaneously so don't worry about overhead from
494 * doing a write per register.
495 */
496 for (i = 0; i < data->chip->num_regs; i++) {
497 data->status_buf[i] &= ~data->mask_buf[i];
498
499 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
500 reg = chip->ack_base +
501 (i * map->reg_stride * data->irq_reg_stride);
Olivier Deprez157378f2022-04-04 15:47:50 +0200502 if (chip->ack_invert)
503 ret = regmap_write(map, reg,
504 ~data->status_buf[i]);
505 else
506 ret = regmap_write(map, reg,
507 data->status_buf[i]);
508 if (chip->clear_ack) {
509 if (chip->ack_invert && !ret)
510 ret = regmap_write(map, reg, UINT_MAX);
511 else if (!ret)
512 ret = regmap_write(map, reg, 0);
513 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514 if (ret != 0)
515 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
516 reg, ret);
517 }
518 }
519
520 for (i = 0; i < chip->num_irqs; i++) {
521 if (data->status_buf[chip->irqs[i].reg_offset /
522 map->reg_stride] & chip->irqs[i].mask) {
523 handle_nested_irq(irq_find_mapping(data->domain, i));
524 handled = true;
525 }
526 }
527
David Brazdil0f672f62019-12-10 10:32:29 +0000528exit:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529 if (chip->runtime_pm)
530 pm_runtime_put(map->dev);
531
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532 if (chip->handle_post_irq)
533 chip->handle_post_irq(chip->irq_drv_data);
534
535 if (handled)
536 return IRQ_HANDLED;
537 else
538 return IRQ_NONE;
539}
540
541static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
542 irq_hw_number_t hw)
543{
544 struct regmap_irq_chip_data *data = h->host_data;
545
546 irq_set_chip_data(virq, data);
547 irq_set_chip(virq, &data->irq_chip);
548 irq_set_nested_thread(virq, 1);
549 irq_set_parent(virq, data->irq);
550 irq_set_noprobe(virq);
551
552 return 0;
553}
554
555static const struct irq_domain_ops regmap_domain_ops = {
556 .map = regmap_irq_map,
557 .xlate = irq_domain_xlate_onetwocell,
558};
559
560/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200561 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200563 * @fwnode: The firmware node where the IRQ domain should be added to.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564 * @map: The regmap for the device.
565 * @irq: The IRQ the device uses to signal interrupts.
566 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
567 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
568 * @chip: Configuration for the interrupt controller.
569 * @data: Runtime data structure for the controller, allocated on success.
570 *
571 * Returns 0 on success or an errno on failure.
572 *
573 * In order for this to be efficient the chip really should use a
574 * register cache. The chip driver is responsible for restoring the
575 * register values used by the IRQ controller over suspend and resume.
576 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200577int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
578 struct regmap *map, int irq,
579 int irq_flags, int irq_base,
580 const struct regmap_irq_chip *chip,
581 struct regmap_irq_chip_data **data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582{
583 struct regmap_irq_chip_data *d;
584 int i;
585 int ret = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +0000586 int num_type_reg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000587 u32 reg;
588 u32 unmask_offset;
589
590 if (chip->num_regs <= 0)
591 return -EINVAL;
592
David Brazdil0f672f62019-12-10 10:32:29 +0000593 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
594 return -EINVAL;
595
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596 for (i = 0; i < chip->num_irqs; i++) {
597 if (chip->irqs[i].reg_offset % map->reg_stride)
598 return -EINVAL;
599 if (chip->irqs[i].reg_offset / map->reg_stride >=
600 chip->num_regs)
601 return -EINVAL;
602 }
603
604 if (irq_base) {
605 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
606 if (irq_base < 0) {
607 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
608 irq_base);
609 return irq_base;
610 }
611 }
612
613 d = kzalloc(sizeof(*d), GFP_KERNEL);
614 if (!d)
615 return -ENOMEM;
616
David Brazdil0f672f62019-12-10 10:32:29 +0000617 if (chip->num_main_regs) {
618 d->main_status_buf = kcalloc(chip->num_main_regs,
619 sizeof(unsigned int),
620 GFP_KERNEL);
621
622 if (!d->main_status_buf)
623 goto err_alloc;
624 }
625
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
627 GFP_KERNEL);
628 if (!d->status_buf)
629 goto err_alloc;
630
631 d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
632 GFP_KERNEL);
633 if (!d->mask_buf)
634 goto err_alloc;
635
636 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
637 GFP_KERNEL);
638 if (!d->mask_buf_def)
639 goto err_alloc;
640
641 if (chip->wake_base) {
642 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
643 GFP_KERNEL);
644 if (!d->wake_buf)
645 goto err_alloc;
646 }
647
David Brazdil0f672f62019-12-10 10:32:29 +0000648 num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
649 if (num_type_reg) {
650 d->type_buf_def = kcalloc(num_type_reg,
651 sizeof(unsigned int), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652 if (!d->type_buf_def)
653 goto err_alloc;
654
David Brazdil0f672f62019-12-10 10:32:29 +0000655 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656 GFP_KERNEL);
657 if (!d->type_buf)
658 goto err_alloc;
659 }
660
661 d->irq_chip = regmap_irq_chip;
662 d->irq_chip.name = chip->name;
663 d->irq = irq;
664 d->map = map;
665 d->chip = chip;
666 d->irq_base = irq_base;
667
668 if (chip->irq_reg_stride)
669 d->irq_reg_stride = chip->irq_reg_stride;
670 else
671 d->irq_reg_stride = 1;
672
673 if (chip->type_reg_stride)
674 d->type_reg_stride = chip->type_reg_stride;
675 else
676 d->type_reg_stride = 1;
677
678 if (!map->use_single_read && map->reg_stride == 1 &&
679 d->irq_reg_stride == 1) {
680 d->status_reg_buf = kmalloc_array(chip->num_regs,
681 map->format.val_bytes,
682 GFP_KERNEL);
683 if (!d->status_reg_buf)
684 goto err_alloc;
685 }
686
687 mutex_init(&d->lock);
688
689 for (i = 0; i < chip->num_irqs; i++)
690 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
691 |= chip->irqs[i].mask;
692
693 /* Mask all the interrupts by default */
694 for (i = 0; i < chip->num_regs; i++) {
695 d->mask_buf[i] = d->mask_buf_def[i];
David Brazdil0f672f62019-12-10 10:32:29 +0000696 if (!chip->mask_base)
697 continue;
698
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000699 reg = chip->mask_base +
700 (i * map->reg_stride * d->irq_reg_stride);
701 if (chip->mask_invert)
702 ret = regmap_irq_update_bits(d, reg,
703 d->mask_buf[i], ~d->mask_buf[i]);
704 else if (d->chip->unmask_base) {
705 unmask_offset = d->chip->unmask_base -
706 d->chip->mask_base;
707 ret = regmap_irq_update_bits(d,
708 reg + unmask_offset,
709 d->mask_buf[i],
710 d->mask_buf[i]);
711 } else
712 ret = regmap_irq_update_bits(d, reg,
713 d->mask_buf[i], d->mask_buf[i]);
714 if (ret != 0) {
715 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
716 reg, ret);
717 goto err_alloc;
718 }
719
720 if (!chip->init_ack_masked)
721 continue;
722
723 /* Ack masked but set interrupts */
724 reg = chip->status_base +
725 (i * map->reg_stride * d->irq_reg_stride);
726 ret = regmap_read(map, reg, &d->status_buf[i]);
727 if (ret != 0) {
728 dev_err(map->dev, "Failed to read IRQ status: %d\n",
729 ret);
730 goto err_alloc;
731 }
732
733 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
734 reg = chip->ack_base +
735 (i * map->reg_stride * d->irq_reg_stride);
736 if (chip->ack_invert)
737 ret = regmap_write(map, reg,
738 ~(d->status_buf[i] & d->mask_buf[i]));
739 else
740 ret = regmap_write(map, reg,
741 d->status_buf[i] & d->mask_buf[i]);
Olivier Deprez157378f2022-04-04 15:47:50 +0200742 if (chip->clear_ack) {
743 if (chip->ack_invert && !ret)
744 ret = regmap_write(map, reg, UINT_MAX);
745 else if (!ret)
746 ret = regmap_write(map, reg, 0);
747 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748 if (ret != 0) {
749 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
750 reg, ret);
751 goto err_alloc;
752 }
753 }
754 }
755
756 /* Wake is disabled by default */
757 if (d->wake_buf) {
758 for (i = 0; i < chip->num_regs; i++) {
759 d->wake_buf[i] = d->mask_buf_def[i];
760 reg = chip->wake_base +
761 (i * map->reg_stride * d->irq_reg_stride);
762
763 if (chip->wake_invert)
764 ret = regmap_irq_update_bits(d, reg,
765 d->mask_buf_def[i],
766 0);
767 else
768 ret = regmap_irq_update_bits(d, reg,
769 d->mask_buf_def[i],
770 d->wake_buf[i]);
771 if (ret != 0) {
772 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
773 reg, ret);
774 goto err_alloc;
775 }
776 }
777 }
778
David Brazdil0f672f62019-12-10 10:32:29 +0000779 if (chip->num_type_reg && !chip->type_in_mask) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 for (i = 0; i < chip->num_type_reg; ++i) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 reg = chip->type_base +
782 (i * map->reg_stride * d->type_reg_stride);
David Brazdil0f672f62019-12-10 10:32:29 +0000783
784 ret = regmap_read(map, reg, &d->type_buf_def[i]);
785
786 if (d->chip->type_invert)
787 d->type_buf_def[i] = ~d->type_buf_def[i];
788
789 if (ret) {
790 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791 reg, ret);
792 goto err_alloc;
793 }
794 }
795 }
796
797 if (irq_base)
Olivier Deprez157378f2022-04-04 15:47:50 +0200798 d->domain = irq_domain_add_legacy(to_of_node(fwnode),
799 chip->num_irqs, irq_base,
800 0, &regmap_domain_ops, d);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000801 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200802 d->domain = irq_domain_add_linear(to_of_node(fwnode),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000803 chip->num_irqs,
804 &regmap_domain_ops, d);
805 if (!d->domain) {
806 dev_err(map->dev, "Failed to create IRQ domain\n");
807 ret = -ENOMEM;
808 goto err_alloc;
809 }
810
811 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
812 irq_flags | IRQF_ONESHOT,
813 chip->name, d);
814 if (ret != 0) {
815 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
816 irq, chip->name, ret);
817 goto err_domain;
818 }
819
820 *data = d;
821
822 return 0;
823
824err_domain:
825 /* Should really dispose of the domain but... */
826err_alloc:
827 kfree(d->type_buf);
828 kfree(d->type_buf_def);
829 kfree(d->wake_buf);
830 kfree(d->mask_buf_def);
831 kfree(d->mask_buf);
832 kfree(d->status_buf);
833 kfree(d->status_reg_buf);
834 kfree(d);
835 return ret;
836}
Olivier Deprez157378f2022-04-04 15:47:50 +0200837EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
838
839/**
840 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
841 *
842 * @map: The regmap for the device.
843 * @irq: The IRQ the device uses to signal interrupts.
844 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
845 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
846 * @chip: Configuration for the interrupt controller.
847 * @data: Runtime data structure for the controller, allocated on success.
848 *
849 * Returns 0 on success or an errno on failure.
850 *
851 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
852 * node of the regmap is used.
853 */
854int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
855 int irq_base, const struct regmap_irq_chip *chip,
856 struct regmap_irq_chip_data **data)
857{
858 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
859 irq_flags, irq_base, chip, data);
860}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000861EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
862
863/**
864 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
865 *
866 * @irq: Primary IRQ for the device
867 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
868 *
869 * This function also disposes of all mapped IRQs on the chip.
870 */
871void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
872{
873 unsigned int virq;
874 int hwirq;
875
876 if (!d)
877 return;
878
879 free_irq(irq, d);
880
881 /* Dispose all virtual irq from irq domain before removing it */
882 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
883 /* Ignore hwirq if holes in the IRQ list */
884 if (!d->chip->irqs[hwirq].mask)
885 continue;
886
887 /*
888 * Find the virtual irq of hwirq on chip and if it is
889 * there then dispose it
890 */
891 virq = irq_find_mapping(d->domain, hwirq);
892 if (virq)
893 irq_dispose_mapping(virq);
894 }
895
896 irq_domain_remove(d->domain);
897 kfree(d->type_buf);
898 kfree(d->type_buf_def);
899 kfree(d->wake_buf);
900 kfree(d->mask_buf_def);
901 kfree(d->mask_buf);
902 kfree(d->status_reg_buf);
903 kfree(d->status_buf);
904 kfree(d);
905}
906EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
907
908static void devm_regmap_irq_chip_release(struct device *dev, void *res)
909{
910 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
911
912 regmap_del_irq_chip(d->irq, d);
913}
914
915static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
916
917{
918 struct regmap_irq_chip_data **r = res;
919
920 if (!r || !*r) {
921 WARN_ON(!r || !*r);
922 return 0;
923 }
924 return *r == data;
925}
926
927/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200928 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
929 *
930 * @dev: The device pointer on which irq_chip belongs to.
931 * @fwnode: The firmware node where the IRQ domain should be added to.
932 * @map: The regmap for the device.
933 * @irq: The IRQ the device uses to signal interrupts
934 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
935 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
936 * @chip: Configuration for the interrupt controller.
937 * @data: Runtime data structure for the controller, allocated on success
938 *
939 * Returns 0 on success or an errno on failure.
940 *
941 * The &regmap_irq_chip_data will be automatically released when the device is
942 * unbound.
943 */
944int devm_regmap_add_irq_chip_fwnode(struct device *dev,
945 struct fwnode_handle *fwnode,
946 struct regmap *map, int irq,
947 int irq_flags, int irq_base,
948 const struct regmap_irq_chip *chip,
949 struct regmap_irq_chip_data **data)
950{
951 struct regmap_irq_chip_data **ptr, *d;
952 int ret;
953
954 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
955 GFP_KERNEL);
956 if (!ptr)
957 return -ENOMEM;
958
959 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
960 chip, &d);
961 if (ret < 0) {
962 devres_free(ptr);
963 return ret;
964 }
965
966 *ptr = d;
967 devres_add(dev, ptr);
968 *data = d;
969 return 0;
970}
971EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
972
973/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000974 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
975 *
976 * @dev: The device pointer on which irq_chip belongs to.
977 * @map: The regmap for the device.
978 * @irq: The IRQ the device uses to signal interrupts
979 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
980 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
981 * @chip: Configuration for the interrupt controller.
982 * @data: Runtime data structure for the controller, allocated on success
983 *
984 * Returns 0 on success or an errno on failure.
985 *
986 * The &regmap_irq_chip_data will be automatically released when the device is
987 * unbound.
988 */
989int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
990 int irq_flags, int irq_base,
991 const struct regmap_irq_chip *chip,
992 struct regmap_irq_chip_data **data)
993{
Olivier Deprez157378f2022-04-04 15:47:50 +0200994 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
995 irq, irq_flags, irq_base, chip,
996 data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000997}
998EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
999
1000/**
1001 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1002 *
1003 * @dev: Device for which which resource was allocated.
1004 * @irq: Primary IRQ for the device.
1005 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
1006 *
1007 * A resource managed version of regmap_del_irq_chip().
1008 */
1009void devm_regmap_del_irq_chip(struct device *dev, int irq,
1010 struct regmap_irq_chip_data *data)
1011{
1012 int rc;
1013
1014 WARN_ON(irq != data->irq);
1015 rc = devres_release(dev, devm_regmap_irq_chip_release,
1016 devm_regmap_irq_chip_match, data);
1017
1018 if (rc != 0)
1019 WARN_ON(rc);
1020}
1021EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1022
1023/**
1024 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1025 *
1026 * @data: regmap irq controller to operate on.
1027 *
1028 * Useful for drivers to request their own IRQs.
1029 */
1030int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1031{
1032 WARN_ON(!data->irq_base);
1033 return data->irq_base;
1034}
1035EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1036
1037/**
1038 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1039 *
1040 * @data: regmap irq controller to operate on.
1041 * @irq: index of the interrupt requested in the chip IRQs.
1042 *
1043 * Useful for drivers to request their own IRQs.
1044 */
1045int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1046{
1047 /* Handle holes in the IRQ list */
1048 if (!data->chip->irqs[irq].mask)
1049 return -EINVAL;
1050
1051 return irq_create_mapping(data->domain, irq);
1052}
1053EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1054
1055/**
1056 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1057 *
1058 * @data: regmap_irq controller to operate on.
1059 *
1060 * Useful for drivers to request their own IRQs and for integration
1061 * with subsystems. For ease of integration NULL is accepted as a
1062 * domain, allowing devices to just call this even if no domain is
1063 * allocated.
1064 */
1065struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1066{
1067 if (data)
1068 return data->domain;
1069 else
1070 return NULL;
1071}
1072EXPORT_SYMBOL_GPL(regmap_irq_get_domain);