blob: 09e599069a81d2133127c1ebba763bd84b174a4c [file] [log] [blame]
Olivier Deprez157378f2022-04-04 15:47:50 +02001// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3// Copyright (c) 2017-20 Linaro Limited.
4
5#include <linux/clk.h>
6#include <linux/completion.h>
7#include <linux/i2c.h>
8#include <linux/io.h>
9#include <linux/interrupt.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/platform_device.h>
13#include <linux/pm_runtime.h>
14
15#define CCI_HW_VERSION 0x0
16#define CCI_RESET_CMD 0x004
17#define CCI_RESET_CMD_MASK 0x0f73f3f7
18#define CCI_RESET_CMD_M0_MASK 0x000003f1
19#define CCI_RESET_CMD_M1_MASK 0x0003f001
20#define CCI_QUEUE_START 0x008
21#define CCI_HALT_REQ 0x034
22#define CCI_HALT_REQ_I2C_M0_Q0Q1 BIT(0)
23#define CCI_HALT_REQ_I2C_M1_Q0Q1 BIT(1)
24
25#define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m))
26#define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m))
27#define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m))
28#define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m))
29#define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m))
30
31#define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m))
32#define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m))
33#define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n))
34#define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n))
35#define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n))
36#define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n))
37#define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n))
38
39#define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00
40#define CCI_IRQ_MASK_0 0xc04
41#define CCI_IRQ_MASK_0_I2C_M0_RD_DONE BIT(0)
42#define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT BIT(4)
43#define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT BIT(8)
44#define CCI_IRQ_MASK_0_I2C_M1_RD_DONE BIT(12)
45#define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT BIT(16)
46#define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT BIT(20)
47#define CCI_IRQ_MASK_0_RST_DONE_ACK BIT(24)
48#define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
49#define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
50#define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6
51#define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000
52#define CCI_IRQ_CLEAR_0 0xc08
53#define CCI_IRQ_STATUS_0 0xc0c
54#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE BIT(0)
55#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT BIT(4)
56#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT BIT(8)
57#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE BIT(12)
58#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT BIT(16)
59#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT BIT(20)
60#define CCI_IRQ_STATUS_0_RST_DONE_ACK BIT(24)
61#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
62#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
63#define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR BIT(27)
64#define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR BIT(28)
65#define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR BIT(29)
66#define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR BIT(30)
67#define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6
68#define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000
69
70#define CCI_TIMEOUT (msecs_to_jiffies(100))
71#define NUM_MASTERS 2
72#define NUM_QUEUES 2
73
74/* Max number of resources + 1 for a NULL terminator */
75#define CCI_RES_MAX 6
76
77#define CCI_I2C_SET_PARAM 1
78#define CCI_I2C_REPORT 8
79#define CCI_I2C_WRITE 9
80#define CCI_I2C_READ 10
81
82#define CCI_I2C_REPORT_IRQ_EN BIT(8)
83
84enum {
85 I2C_MODE_STANDARD,
86 I2C_MODE_FAST,
87 I2C_MODE_FAST_PLUS,
88};
89
90enum cci_i2c_queue_t {
91 QUEUE_0,
92 QUEUE_1
93};
94
95struct hw_params {
96 u16 thigh; /* HIGH period of the SCL clock in clock ticks */
97 u16 tlow; /* LOW period of the SCL clock */
98 u16 tsu_sto; /* set-up time for STOP condition */
99 u16 tsu_sta; /* set-up time for a repeated START condition */
100 u16 thd_dat; /* data hold time */
101 u16 thd_sta; /* hold time (repeated) START condition */
102 u16 tbuf; /* bus free time between a STOP and START condition */
103 u8 scl_stretch_en;
104 u16 trdhld;
105 u16 tsp; /* pulse width of spikes suppressed by the input filter */
106};
107
108struct cci;
109
110struct cci_master {
111 struct i2c_adapter adap;
112 u16 master;
113 u8 mode;
114 int status;
115 struct completion irq_complete;
116 struct cci *cci;
117};
118
119struct cci_data {
120 unsigned int num_masters;
121 struct i2c_adapter_quirks quirks;
122 u16 queue_size[NUM_QUEUES];
123 unsigned long cci_clk_rate;
124 struct hw_params params[3];
125};
126
127struct cci {
128 struct device *dev;
129 void __iomem *base;
130 unsigned int irq;
131 const struct cci_data *data;
132 struct clk_bulk_data *clocks;
133 int nclocks;
134 struct cci_master master[NUM_MASTERS];
135};
136
137static irqreturn_t cci_isr(int irq, void *dev)
138{
139 struct cci *cci = dev;
140 u32 val, reset = 0;
141 int ret = IRQ_NONE;
142
143 val = readl(cci->base + CCI_IRQ_STATUS_0);
144 writel(val, cci->base + CCI_IRQ_CLEAR_0);
145 writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
146
147 if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
148 complete(&cci->master[0].irq_complete);
149 if (cci->master[1].master)
150 complete(&cci->master[1].irq_complete);
151 ret = IRQ_HANDLED;
152 }
153
154 if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
155 val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
156 val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
157 cci->master[0].status = 0;
158 complete(&cci->master[0].irq_complete);
159 ret = IRQ_HANDLED;
160 }
161
162 if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
163 val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
164 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
165 cci->master[1].status = 0;
166 complete(&cci->master[1].irq_complete);
167 ret = IRQ_HANDLED;
168 }
169
170 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
171 reset = CCI_RESET_CMD_M0_MASK;
172 ret = IRQ_HANDLED;
173 }
174
175 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
176 reset = CCI_RESET_CMD_M1_MASK;
177 ret = IRQ_HANDLED;
178 }
179
180 if (unlikely(reset))
181 writel(reset, cci->base + CCI_RESET_CMD);
182
183 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
184 if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
185 val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
186 cci->master[0].status = -ENXIO;
187 else
188 cci->master[0].status = -EIO;
189
190 writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
191 ret = IRQ_HANDLED;
192 }
193
194 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
195 if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
196 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
197 cci->master[1].status = -ENXIO;
198 else
199 cci->master[1].status = -EIO;
200
201 writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
202 ret = IRQ_HANDLED;
203 }
204
205 return ret;
206}
207
208static int cci_halt(struct cci *cci, u8 master_num)
209{
210 struct cci_master *master;
211 u32 val;
212
213 if (master_num >= cci->data->num_masters) {
214 dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
215 return -EINVAL;
216 }
217
218 val = BIT(master_num);
219 master = &cci->master[master_num];
220
221 reinit_completion(&master->irq_complete);
222 writel(val, cci->base + CCI_HALT_REQ);
223
224 if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
225 dev_err(cci->dev, "CCI halt timeout\n");
226 return -ETIMEDOUT;
227 }
228
229 return 0;
230}
231
232static int cci_reset(struct cci *cci)
233{
234 /*
235 * we reset the whole controller, here and for implicity use
236 * master[0].xxx for waiting on it.
237 */
238 reinit_completion(&cci->master[0].irq_complete);
239 writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
240
241 if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
242 CCI_TIMEOUT)) {
243 dev_err(cci->dev, "CCI reset timeout\n");
244 return -ETIMEDOUT;
245 }
246
247 return 0;
248}
249
250static int cci_init(struct cci *cci)
251{
252 u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
253 CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
254 CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
255 CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
256 CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
257 CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
258 CCI_IRQ_MASK_0_RST_DONE_ACK |
259 CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
260 CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
261 CCI_IRQ_MASK_0_I2C_M0_ERROR |
262 CCI_IRQ_MASK_0_I2C_M1_ERROR;
263 int i;
264
265 writel(val, cci->base + CCI_IRQ_MASK_0);
266
267 for (i = 0; i < cci->data->num_masters; i++) {
268 int mode = cci->master[i].mode;
269 const struct hw_params *hw;
270
271 if (!cci->master[i].cci)
272 continue;
273
274 hw = &cci->data->params[mode];
275
276 val = hw->thigh << 16 | hw->tlow;
277 writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
278
279 val = hw->tsu_sto << 16 | hw->tsu_sta;
280 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
281
282 val = hw->thd_dat << 16 | hw->thd_sta;
283 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
284
285 val = hw->tbuf;
286 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
287
288 val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
289 writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
290 }
291
292 return 0;
293}
294
295static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
296{
297 u32 val;
298
299 val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
300 writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
301
302 reinit_completion(&cci->master[master].irq_complete);
303 val = BIT(master * 2 + queue);
304 writel(val, cci->base + CCI_QUEUE_START);
305
306 if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
307 CCI_TIMEOUT)) {
308 dev_err(cci->dev, "master %d queue %d timeout\n",
309 master, queue);
310 cci_reset(cci);
311 cci_init(cci);
312 return -ETIMEDOUT;
313 }
314
315 return cci->master[master].status;
316}
317
318static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
319{
320 u32 val;
321
322 val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
323 if (val == cci->data->queue_size[queue])
324 return -EINVAL;
325
326 if (!val)
327 return 0;
328
329 val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
330 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
331
332 return cci_run_queue(cci, master, queue);
333}
334
335static int cci_i2c_read(struct cci *cci, u16 master,
336 u16 addr, u8 *buf, u16 len)
337{
338 u32 val, words_read, words_exp;
339 u8 queue = QUEUE_1;
340 int i, index = 0, ret;
341 bool first = true;
342
343 /*
344 * Call validate queue to make sure queue is empty before starting.
345 * This is to avoid overflow / underflow of queue.
346 */
347 ret = cci_validate_queue(cci, master, queue);
348 if (ret < 0)
349 return ret;
350
351 val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
352 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
353
354 val = CCI_I2C_READ | len << 4;
355 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
356
357 ret = cci_run_queue(cci, master, queue);
358 if (ret < 0)
359 return ret;
360
361 words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
362 words_exp = len / 4 + 1;
363 if (words_read != words_exp) {
364 dev_err(cci->dev, "words read = %d, words expected = %d\n",
365 words_read, words_exp);
366 return -EIO;
367 }
368
369 do {
370 val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
371
372 for (i = 0; i < 4 && index < len; i++) {
373 if (first) {
374 /* The LS byte of this register represents the
375 * first byte read from the slave during a read
376 * access.
377 */
378 first = false;
379 continue;
380 }
381 buf[index++] = (val >> (i * 8)) & 0xff;
382 }
383 } while (--words_read);
384
385 return 0;
386}
387
388static int cci_i2c_write(struct cci *cci, u16 master,
389 u16 addr, u8 *buf, u16 len)
390{
391 u8 queue = QUEUE_0;
392 u8 load[12] = { 0 };
393 int i = 0, j, ret;
394 u32 val;
395
396 /*
397 * Call validate queue to make sure queue is empty before starting.
398 * This is to avoid overflow / underflow of queue.
399 */
400 ret = cci_validate_queue(cci, master, queue);
401 if (ret < 0)
402 return ret;
403
404 val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
405 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
406
407 load[i++] = CCI_I2C_WRITE | len << 4;
408
409 for (j = 0; j < len; j++)
410 load[i++] = buf[j];
411
412 for (j = 0; j < i; j += 4) {
413 val = load[j];
414 val |= load[j + 1] << 8;
415 val |= load[j + 2] << 16;
416 val |= load[j + 3] << 24;
417 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
418 }
419
420 val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
421 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
422
423 return cci_run_queue(cci, master, queue);
424}
425
426static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
427{
428 struct cci_master *cci_master = i2c_get_adapdata(adap);
429 struct cci *cci = cci_master->cci;
430 int i, ret;
431
432 ret = pm_runtime_get_sync(cci->dev);
433 if (ret < 0)
434 goto err;
435
436 for (i = 0; i < num; i++) {
437 if (msgs[i].flags & I2C_M_RD)
438 ret = cci_i2c_read(cci, cci_master->master,
439 msgs[i].addr, msgs[i].buf,
440 msgs[i].len);
441 else
442 ret = cci_i2c_write(cci, cci_master->master,
443 msgs[i].addr, msgs[i].buf,
444 msgs[i].len);
445
446 if (ret < 0)
447 break;
448 }
449
450 if (!ret)
451 ret = num;
452
453err:
454 pm_runtime_mark_last_busy(cci->dev);
455 pm_runtime_put_autosuspend(cci->dev);
456
457 return ret;
458}
459
460static u32 cci_func(struct i2c_adapter *adap)
461{
462 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
463}
464
465static const struct i2c_algorithm cci_algo = {
466 .master_xfer = cci_xfer,
467 .functionality = cci_func,
468};
469
470static int cci_enable_clocks(struct cci *cci)
471{
472 return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
473}
474
475static void cci_disable_clocks(struct cci *cci)
476{
477 clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
478}
479
480static int __maybe_unused cci_suspend_runtime(struct device *dev)
481{
482 struct cci *cci = dev_get_drvdata(dev);
483
484 cci_disable_clocks(cci);
485 return 0;
486}
487
488static int __maybe_unused cci_resume_runtime(struct device *dev)
489{
490 struct cci *cci = dev_get_drvdata(dev);
491 int ret;
492
493 ret = cci_enable_clocks(cci);
494 if (ret)
495 return ret;
496
497 cci_init(cci);
498 return 0;
499}
500
501static int __maybe_unused cci_suspend(struct device *dev)
502{
503 if (!pm_runtime_suspended(dev))
504 return cci_suspend_runtime(dev);
505
506 return 0;
507}
508
509static int __maybe_unused cci_resume(struct device *dev)
510{
511 cci_resume_runtime(dev);
512 pm_runtime_mark_last_busy(dev);
513 pm_request_autosuspend(dev);
514
515 return 0;
516}
517
518static const struct dev_pm_ops qcom_cci_pm = {
519 SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
520 SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
521};
522
523static int cci_probe(struct platform_device *pdev)
524{
525 struct device *dev = &pdev->dev;
526 unsigned long cci_clk_rate = 0;
527 struct device_node *child;
528 struct resource *r;
529 struct cci *cci;
530 int ret, i;
531 u32 val;
532
533 cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
534 if (!cci)
535 return -ENOMEM;
536
537 cci->dev = dev;
538 platform_set_drvdata(pdev, cci);
539 cci->data = device_get_match_data(dev);
540 if (!cci->data)
541 return -ENOENT;
542
543 for_each_available_child_of_node(dev->of_node, child) {
544 u32 idx;
545
546 ret = of_property_read_u32(child, "reg", &idx);
547 if (ret) {
548 dev_err(dev, "%pOF invalid 'reg' property", child);
549 continue;
550 }
551
552 if (idx >= cci->data->num_masters) {
553 dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
554 child, idx, cci->data->num_masters - 1);
555 continue;
556 }
557
558 cci->master[idx].adap.quirks = &cci->data->quirks;
559 cci->master[idx].adap.algo = &cci_algo;
560 cci->master[idx].adap.dev.parent = dev;
561 cci->master[idx].adap.dev.of_node = of_node_get(child);
562 cci->master[idx].master = idx;
563 cci->master[idx].cci = cci;
564
565 i2c_set_adapdata(&cci->master[idx].adap, &cci->master[idx]);
566 snprintf(cci->master[idx].adap.name,
567 sizeof(cci->master[idx].adap.name), "Qualcomm-CCI");
568
569 cci->master[idx].mode = I2C_MODE_STANDARD;
570 ret = of_property_read_u32(child, "clock-frequency", &val);
571 if (!ret) {
572 if (val == 400000)
573 cci->master[idx].mode = I2C_MODE_FAST;
574 else if (val == 1000000)
575 cci->master[idx].mode = I2C_MODE_FAST_PLUS;
576 }
577
578 init_completion(&cci->master[idx].irq_complete);
579 }
580
581 /* Memory */
582
583 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
584 cci->base = devm_ioremap_resource(dev, r);
585 if (IS_ERR(cci->base))
586 return PTR_ERR(cci->base);
587
588 /* Clocks */
589
590 ret = devm_clk_bulk_get_all(dev, &cci->clocks);
591 if (ret < 1) {
592 dev_err(dev, "failed to get clocks %d\n", ret);
593 return ret;
594 }
595 cci->nclocks = ret;
596
597 /* Retrieve CCI clock rate */
598 for (i = 0; i < cci->nclocks; i++) {
599 if (!strcmp(cci->clocks[i].id, "cci")) {
600 cci_clk_rate = clk_get_rate(cci->clocks[i].clk);
601 break;
602 }
603 }
604
605 if (cci_clk_rate != cci->data->cci_clk_rate) {
606 /* cci clock set by the bootloader or via assigned clock rate
607 * in DT.
608 */
609 dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n",
610 cci_clk_rate, cci->data->cci_clk_rate);
611 }
612
613 ret = cci_enable_clocks(cci);
614 if (ret < 0)
615 return ret;
616
617 /* Interrupt */
618
619 ret = platform_get_irq(pdev, 0);
620 if (ret < 0)
621 goto disable_clocks;
622 cci->irq = ret;
623
624 ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
625 if (ret < 0) {
626 dev_err(dev, "request_irq failed, ret: %d\n", ret);
627 goto disable_clocks;
628 }
629
630 val = readl(cci->base + CCI_HW_VERSION);
631 dev_dbg(dev, "CCI HW version = 0x%08x", val);
632
633 ret = cci_reset(cci);
634 if (ret < 0)
635 goto error;
636
637 ret = cci_init(cci);
638 if (ret < 0)
639 goto error;
640
641 for (i = 0; i < cci->data->num_masters; i++) {
642 if (!cci->master[i].cci)
643 continue;
644
645 ret = i2c_add_adapter(&cci->master[i].adap);
646 if (ret < 0) {
647 of_node_put(cci->master[i].adap.dev.of_node);
648 goto error_i2c;
649 }
650 }
651
652 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
653 pm_runtime_use_autosuspend(dev);
654 pm_runtime_set_active(dev);
655 pm_runtime_enable(dev);
656
657 return 0;
658
659error_i2c:
660 for (--i ; i >= 0; i--) {
661 if (cci->master[i].cci) {
662 i2c_del_adapter(&cci->master[i].adap);
663 of_node_put(cci->master[i].adap.dev.of_node);
664 }
665 }
666error:
667 disable_irq(cci->irq);
668disable_clocks:
669 cci_disable_clocks(cci);
670
671 return ret;
672}
673
674static int cci_remove(struct platform_device *pdev)
675{
676 struct cci *cci = platform_get_drvdata(pdev);
677 int i;
678
679 for (i = 0; i < cci->data->num_masters; i++) {
680 if (cci->master[i].cci) {
681 i2c_del_adapter(&cci->master[i].adap);
682 of_node_put(cci->master[i].adap.dev.of_node);
683 }
684 cci_halt(cci, i);
685 }
686
687 disable_irq(cci->irq);
688 pm_runtime_disable(&pdev->dev);
689 pm_runtime_set_suspended(&pdev->dev);
690
691 return 0;
692}
693
694static const struct cci_data cci_v1_data = {
695 .num_masters = 1,
696 .queue_size = { 64, 16 },
697 .quirks = {
698 .max_write_len = 10,
699 .max_read_len = 12,
700 },
701 .cci_clk_rate = 19200000,
702 .params[I2C_MODE_STANDARD] = {
703 .thigh = 78,
704 .tlow = 114,
705 .tsu_sto = 28,
706 .tsu_sta = 28,
707 .thd_dat = 10,
708 .thd_sta = 77,
709 .tbuf = 118,
710 .scl_stretch_en = 0,
711 .trdhld = 6,
712 .tsp = 1
713 },
714 .params[I2C_MODE_FAST] = {
715 .thigh = 20,
716 .tlow = 28,
717 .tsu_sto = 21,
718 .tsu_sta = 21,
719 .thd_dat = 13,
720 .thd_sta = 18,
721 .tbuf = 32,
722 .scl_stretch_en = 0,
723 .trdhld = 6,
724 .tsp = 3
725 },
726};
727
728static const struct cci_data cci_v2_data = {
729 .num_masters = 2,
730 .queue_size = { 64, 16 },
731 .quirks = {
732 .max_write_len = 11,
733 .max_read_len = 12,
734 },
735 .cci_clk_rate = 37500000,
736 .params[I2C_MODE_STANDARD] = {
737 .thigh = 201,
738 .tlow = 174,
739 .tsu_sto = 204,
740 .tsu_sta = 231,
741 .thd_dat = 22,
742 .thd_sta = 162,
743 .tbuf = 227,
744 .scl_stretch_en = 0,
745 .trdhld = 6,
746 .tsp = 3
747 },
748 .params[I2C_MODE_FAST] = {
749 .thigh = 38,
750 .tlow = 56,
751 .tsu_sto = 40,
752 .tsu_sta = 40,
753 .thd_dat = 22,
754 .thd_sta = 35,
755 .tbuf = 62,
756 .scl_stretch_en = 0,
757 .trdhld = 6,
758 .tsp = 3
759 },
760 .params[I2C_MODE_FAST_PLUS] = {
761 .thigh = 16,
762 .tlow = 22,
763 .tsu_sto = 17,
764 .tsu_sta = 18,
765 .thd_dat = 16,
766 .thd_sta = 15,
767 .tbuf = 24,
768 .scl_stretch_en = 0,
769 .trdhld = 3,
770 .tsp = 3
771 },
772};
773
774static const struct of_device_id cci_dt_match[] = {
775 { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
776 { .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
777 { .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
778 {}
779};
780MODULE_DEVICE_TABLE(of, cci_dt_match);
781
782static struct platform_driver qcom_cci_driver = {
783 .probe = cci_probe,
784 .remove = cci_remove,
785 .driver = {
786 .name = "i2c-qcom-cci",
787 .of_match_table = cci_dt_match,
788 .pm = &qcom_cci_pm,
789 },
790};
791
792module_platform_driver(qcom_cci_driver);
793
794MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
795MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
796MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
797MODULE_LICENSE("GPL v2");