blob: 3de7dc34def2437f9a2f56dc3f539a24b07023c4 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 *
4 * Implementation of primary alsa driver code base for Intel HD Audio.
5 *
6 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 *
8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9 * PeiSen Hou <pshou@realtek.com.tw>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 */
11
12#include <linux/clocksource.h>
13#include <linux/delay.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/pm_runtime.h>
18#include <linux/slab.h>
19
20#ifdef CONFIG_X86
21/* for art-tsc conversion */
22#include <asm/tsc.h>
23#endif
24
25#include <sound/core.h>
26#include <sound/initval.h>
27#include "hda_controller.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020028#include "hda_local.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
30#define CREATE_TRACE_POINTS
31#include "hda_controller_trace.h"
32
33/* DSP lock helpers */
34#define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
35#define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
36#define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
37
38/* assign a stream for the PCM */
39static inline struct azx_dev *
40azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
41{
42 struct hdac_stream *s;
43
44 s = snd_hdac_stream_assign(azx_bus(chip), substream);
45 if (!s)
46 return NULL;
47 return stream_to_azx_dev(s);
48}
49
50/* release the assigned stream */
51static inline void azx_release_device(struct azx_dev *azx_dev)
52{
53 snd_hdac_stream_release(azx_stream(azx_dev));
54}
55
56static inline struct hda_pcm_stream *
57to_hda_pcm_stream(struct snd_pcm_substream *substream)
58{
59 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
60 return &apcm->info->stream[substream->stream];
61}
62
63static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
64 u64 nsec)
65{
66 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
67 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
68 u64 codec_frames, codec_nsecs;
69
70 if (!hinfo->ops.get_delay)
71 return nsec;
72
73 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
74 codec_nsecs = div_u64(codec_frames * 1000000000LL,
75 substream->runtime->rate);
76
77 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
78 return nsec + codec_nsecs;
79
80 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
81}
82
83/*
84 * PCM ops
85 */
86
87static int azx_pcm_close(struct snd_pcm_substream *substream)
88{
89 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
90 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
91 struct azx *chip = apcm->chip;
92 struct azx_dev *azx_dev = get_azx_dev(substream);
93
94 trace_azx_pcm_close(chip, azx_dev);
95 mutex_lock(&chip->open_mutex);
96 azx_release_device(azx_dev);
97 if (hinfo->ops.close)
98 hinfo->ops.close(hinfo, apcm->codec, substream);
99 snd_hda_power_down(apcm->codec);
100 mutex_unlock(&chip->open_mutex);
101 snd_hda_codec_pcm_put(apcm->info);
102 return 0;
103}
104
105static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
106 struct snd_pcm_hw_params *hw_params)
107{
108 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
109 struct azx *chip = apcm->chip;
110 struct azx_dev *azx_dev = get_azx_dev(substream);
Olivier Deprez157378f2022-04-04 15:47:50 +0200111 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112
113 trace_azx_pcm_hw_params(chip, azx_dev);
114 dsp_lock(azx_dev);
115 if (dsp_is_locked(azx_dev)) {
116 ret = -EBUSY;
117 goto unlock;
118 }
119
120 azx_dev->core.bufsize = 0;
121 azx_dev->core.period_bytes = 0;
122 azx_dev->core.format_val = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000123
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124unlock:
125 dsp_unlock(azx_dev);
126 return ret;
127}
128
129static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
130{
131 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
132 struct azx_dev *azx_dev = get_azx_dev(substream);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134
135 /* reset BDL address */
136 dsp_lock(azx_dev);
137 if (!dsp_is_locked(azx_dev))
138 snd_hdac_stream_cleanup(azx_stream(azx_dev));
139
140 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
141
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 azx_stream(azx_dev)->prepared = 0;
143 dsp_unlock(azx_dev);
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145}
146
147static int azx_pcm_prepare(struct snd_pcm_substream *substream)
148{
149 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
150 struct azx *chip = apcm->chip;
151 struct azx_dev *azx_dev = get_azx_dev(substream);
152 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
153 struct snd_pcm_runtime *runtime = substream->runtime;
154 unsigned int format_val, stream_tag;
155 int err;
156 struct hda_spdif_out *spdif =
157 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
158 unsigned short ctls = spdif ? spdif->ctls : 0;
159
160 trace_azx_pcm_prepare(chip, azx_dev);
161 dsp_lock(azx_dev);
162 if (dsp_is_locked(azx_dev)) {
163 err = -EBUSY;
164 goto unlock;
165 }
166
167 snd_hdac_stream_reset(azx_stream(azx_dev));
168 format_val = snd_hdac_calc_stream_format(runtime->rate,
169 runtime->channels,
170 runtime->format,
171 hinfo->maxbps,
172 ctls);
173 if (!format_val) {
174 dev_err(chip->card->dev,
175 "invalid format_val, rate=%d, ch=%d, format=%d\n",
176 runtime->rate, runtime->channels, runtime->format);
177 err = -EINVAL;
178 goto unlock;
179 }
180
181 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
182 if (err < 0)
183 goto unlock;
184
185 snd_hdac_stream_setup(azx_stream(azx_dev));
186
187 stream_tag = azx_dev->core.stream_tag;
188 /* CA-IBG chips need the playback stream starting from 1 */
189 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
190 stream_tag > chip->capture_streams)
191 stream_tag -= chip->capture_streams;
192 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
193 azx_dev->core.format_val, substream);
194
195 unlock:
196 if (!err)
197 azx_stream(azx_dev)->prepared = 1;
198 dsp_unlock(azx_dev);
199 return err;
200}
201
202static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
203{
204 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
205 struct azx *chip = apcm->chip;
206 struct hdac_bus *bus = azx_bus(chip);
207 struct azx_dev *azx_dev;
208 struct snd_pcm_substream *s;
209 struct hdac_stream *hstr;
210 bool start;
211 int sbits = 0;
212 int sync_reg;
213
214 azx_dev = get_azx_dev(substream);
215 trace_azx_pcm_trigger(chip, azx_dev, cmd);
216
217 hstr = azx_stream(azx_dev);
218 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
219 sync_reg = AZX_REG_OLD_SSYNC;
220 else
221 sync_reg = AZX_REG_SSYNC;
222
223 if (dsp_is_locked(azx_dev) || !hstr->prepared)
224 return -EPIPE;
225
226 switch (cmd) {
227 case SNDRV_PCM_TRIGGER_START:
228 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
229 case SNDRV_PCM_TRIGGER_RESUME:
230 start = true;
231 break;
232 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
233 case SNDRV_PCM_TRIGGER_SUSPEND:
234 case SNDRV_PCM_TRIGGER_STOP:
235 start = false;
236 break;
237 default:
238 return -EINVAL;
239 }
240
241 snd_pcm_group_for_each_entry(s, substream) {
242 if (s->pcm->card != substream->pcm->card)
243 continue;
244 azx_dev = get_azx_dev(s);
245 sbits |= 1 << azx_dev->core.index;
246 snd_pcm_trigger_done(s, substream);
247 }
248
249 spin_lock(&bus->reg_lock);
250
251 /* first, set SYNC bits of corresponding streams */
252 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
253
254 snd_pcm_group_for_each_entry(s, substream) {
255 if (s->pcm->card != substream->pcm->card)
256 continue;
257 azx_dev = get_azx_dev(s);
258 if (start) {
259 azx_dev->insufficient = 1;
260 snd_hdac_stream_start(azx_stream(azx_dev), true);
261 } else {
262 snd_hdac_stream_stop(azx_stream(azx_dev));
263 }
264 }
265 spin_unlock(&bus->reg_lock);
266
267 snd_hdac_stream_sync(hstr, start, sbits);
268
269 spin_lock(&bus->reg_lock);
270 /* reset SYNC bits */
271 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
272 if (start)
273 snd_hdac_stream_timecounter_init(hstr, sbits);
274 spin_unlock(&bus->reg_lock);
275 return 0;
276}
277
278unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
279{
280 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
281}
282EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
283
284unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
285{
286 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
287}
288EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
289
290unsigned int azx_get_position(struct azx *chip,
291 struct azx_dev *azx_dev)
292{
293 struct snd_pcm_substream *substream = azx_dev->core.substream;
294 unsigned int pos;
295 int stream = substream->stream;
296 int delay = 0;
297
298 if (chip->get_position[stream])
299 pos = chip->get_position[stream](chip, azx_dev);
300 else /* use the position buffer as default */
301 pos = azx_get_pos_posbuf(chip, azx_dev);
302
303 if (pos >= azx_dev->core.bufsize)
304 pos = 0;
305
306 if (substream->runtime) {
307 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
308 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
309
310 if (chip->get_delay[stream])
311 delay += chip->get_delay[stream](chip, azx_dev, pos);
312 if (hinfo->ops.get_delay)
313 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
314 substream);
315 substream->runtime->delay = delay;
316 }
317
318 trace_azx_get_position(chip, azx_dev, pos, delay);
319 return pos;
320}
321EXPORT_SYMBOL_GPL(azx_get_position);
322
323static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
324{
325 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
326 struct azx *chip = apcm->chip;
327 struct azx_dev *azx_dev = get_azx_dev(substream);
328 return bytes_to_frames(substream->runtime,
329 azx_get_position(chip, azx_dev));
330}
331
332/*
333 * azx_scale64: Scale base by mult/div while not overflowing sanely
334 *
335 * Derived from scale64_check_overflow in kernel/time/timekeeping.c
336 *
337 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
338 * is about 384307 ie ~4.5 days.
339 *
340 * This scales the calculation so that overflow will happen but after 2^64 /
341 * 48000 secs, which is pretty large!
342 *
343 * In caln below:
344 * base may overflow, but since there isn’t any additional division
345 * performed on base it’s OK
346 * rem can’t overflow because both are 32-bit values
347 */
348
349#ifdef CONFIG_X86
350static u64 azx_scale64(u64 base, u32 num, u32 den)
351{
352 u64 rem;
353
354 rem = do_div(base, den);
355
356 base *= num;
357 rem *= num;
358
359 do_div(rem, den);
360
361 return base + rem;
362}
363
364static int azx_get_sync_time(ktime_t *device,
365 struct system_counterval_t *system, void *ctx)
366{
367 struct snd_pcm_substream *substream = ctx;
368 struct azx_dev *azx_dev = get_azx_dev(substream);
369 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
370 struct azx *chip = apcm->chip;
371 struct snd_pcm_runtime *runtime;
372 u64 ll_counter, ll_counter_l, ll_counter_h;
373 u64 tsc_counter, tsc_counter_l, tsc_counter_h;
374 u32 wallclk_ctr, wallclk_cycles;
375 bool direction;
376 u32 dma_select;
Olivier Deprez157378f2022-04-04 15:47:50 +0200377 u32 timeout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378 u32 retry_count = 0;
379
380 runtime = substream->runtime;
381
382 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
383 direction = 1;
384 else
385 direction = 0;
386
387 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
388 do {
389 timeout = 100;
390 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
391 (azx_dev->core.stream_tag - 1);
392 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
393
394 /* Enable the capture */
395 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
396
397 while (timeout) {
398 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
399 GTSCC_TSCCD_MASK)
400 break;
401
402 timeout--;
403 }
404
405 if (!timeout) {
406 dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
407 return -EIO;
408 }
409
410 /* Read wall clock counter */
411 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
412
413 /* Read TSC counter */
414 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
415 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
416
417 /* Read Link counter */
418 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
419 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
420
421 /* Ack: registers read done */
422 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
423
424 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
425 tsc_counter_l;
426
427 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
428 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
429
430 /*
431 * An error occurs near frame "rollover". The clocks in
432 * frame value indicates whether this error may have
433 * occurred. Here we use the value of 10 i.e.,
434 * HDA_MAX_CYCLE_OFFSET
435 */
436 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
437 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
438 break;
439
440 /*
441 * Sleep before we read again, else we may again get
442 * value near to MAX_CYCLE. Try to sleep for different
443 * amount of time so we dont hit the same number again
444 */
445 udelay(retry_count++);
446
447 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
448
449 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
450 dev_err_ratelimited(chip->card->dev,
451 "Error in WALFCC cycle count\n");
452 return -EIO;
453 }
454
455 *device = ns_to_ktime(azx_scale64(ll_counter,
456 NSEC_PER_SEC, runtime->rate));
457 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
458 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
459
460 *system = convert_art_to_tsc(tsc_counter);
461
462 return 0;
463}
464
465#else
466static int azx_get_sync_time(ktime_t *device,
467 struct system_counterval_t *system, void *ctx)
468{
469 return -ENXIO;
470}
471#endif
472
473static int azx_get_crosststamp(struct snd_pcm_substream *substream,
474 struct system_device_crosststamp *xtstamp)
475{
476 return get_device_system_crosststamp(azx_get_sync_time,
477 substream, NULL, xtstamp);
478}
479
480static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
481 struct snd_pcm_audio_tstamp_config *ts)
482{
483 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
484 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
485 return true;
486
487 return false;
488}
489
490static int azx_get_time_info(struct snd_pcm_substream *substream,
Olivier Deprez157378f2022-04-04 15:47:50 +0200491 struct timespec64 *system_ts, struct timespec64 *audio_ts,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
493 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
494{
495 struct azx_dev *azx_dev = get_azx_dev(substream);
496 struct snd_pcm_runtime *runtime = substream->runtime;
497 struct system_device_crosststamp xtstamp;
498 int ret;
499 u64 nsec;
500
501 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
502 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
503
504 snd_pcm_gettime(substream->runtime, system_ts);
505
506 nsec = timecounter_read(&azx_dev->core.tc);
507 nsec = div_u64(nsec, 3); /* can be optimized */
508 if (audio_tstamp_config->report_delay)
509 nsec = azx_adjust_codec_delay(substream, nsec);
510
Olivier Deprez157378f2022-04-04 15:47:50 +0200511 *audio_ts = ns_to_timespec64(nsec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512
513 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
514 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
515 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
516
517 } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
518
519 ret = azx_get_crosststamp(substream, &xtstamp);
520 if (ret)
521 return ret;
522
523 switch (runtime->tstamp_type) {
524 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
525 return -EINVAL;
526
527 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
Olivier Deprez157378f2022-04-04 15:47:50 +0200528 *system_ts = ktime_to_timespec64(xtstamp.sys_monoraw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529 break;
530
531 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200532 *system_ts = ktime_to_timespec64(xtstamp.sys_realtime);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000533 break;
534
535 }
536
Olivier Deprez157378f2022-04-04 15:47:50 +0200537 *audio_ts = ktime_to_timespec64(xtstamp.device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538
539 audio_tstamp_report->actual_type =
540 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
541 audio_tstamp_report->accuracy_report = 1;
542 /* 24 MHz WallClock == 42ns resolution */
543 audio_tstamp_report->accuracy = 42;
544
545 } else {
546 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
547 }
548
549 return 0;
550}
551
Olivier Deprez157378f2022-04-04 15:47:50 +0200552static const struct snd_pcm_hardware azx_pcm_hw = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000553 .info = (SNDRV_PCM_INFO_MMAP |
554 SNDRV_PCM_INFO_INTERLEAVED |
555 SNDRV_PCM_INFO_BLOCK_TRANSFER |
556 SNDRV_PCM_INFO_MMAP_VALID |
557 /* No full-resume yet implemented */
558 /* SNDRV_PCM_INFO_RESUME |*/
559 SNDRV_PCM_INFO_PAUSE |
560 SNDRV_PCM_INFO_SYNC_START |
561 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
562 SNDRV_PCM_INFO_HAS_LINK_ATIME |
563 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
564 .formats = SNDRV_PCM_FMTBIT_S16_LE,
565 .rates = SNDRV_PCM_RATE_48000,
566 .rate_min = 48000,
567 .rate_max = 48000,
568 .channels_min = 2,
569 .channels_max = 2,
570 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
571 .period_bytes_min = 128,
572 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
573 .periods_min = 2,
574 .periods_max = AZX_MAX_FRAG,
575 .fifo_size = 0,
576};
577
578static int azx_pcm_open(struct snd_pcm_substream *substream)
579{
580 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
581 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
582 struct azx *chip = apcm->chip;
583 struct azx_dev *azx_dev;
584 struct snd_pcm_runtime *runtime = substream->runtime;
585 int err;
586 int buff_step;
587
588 snd_hda_codec_pcm_get(apcm->info);
589 mutex_lock(&chip->open_mutex);
590 azx_dev = azx_assign_device(chip, substream);
591 trace_azx_pcm_open(chip, azx_dev);
592 if (azx_dev == NULL) {
593 err = -EBUSY;
594 goto unlock;
595 }
596 runtime->private_data = azx_dev;
597
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000598 runtime->hw = azx_pcm_hw;
David Brazdil0f672f62019-12-10 10:32:29 +0000599 if (chip->gts_present)
600 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 runtime->hw.channels_min = hinfo->channels_min;
602 runtime->hw.channels_max = hinfo->channels_max;
603 runtime->hw.formats = hinfo->formats;
604 runtime->hw.rates = hinfo->rates;
605 snd_pcm_limit_hw_rates(runtime);
606 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
607
608 /* avoid wrap-around with wall-clock */
609 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
610 20,
611 178000000);
612
613 if (chip->align_buffer_size)
614 /* constrain buffer sizes to be multiple of 128
615 bytes. This is more efficient in terms of memory
616 access but isn't required by the HDA spec and
617 prevents users from specifying exact period/buffer
618 sizes. For example for 44.1kHz, a period size set
619 to 20ms will be rounded to 19.59ms. */
620 buff_step = 128;
621 else
622 /* Don't enforce steps on buffer sizes, still need to
623 be multiple of 4 bytes (HDA spec). Tested on Intel
624 HDA controllers, may not work on all devices where
625 option needs to be disabled */
626 buff_step = 4;
627
628 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
629 buff_step);
630 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
631 buff_step);
632 snd_hda_power_up(apcm->codec);
633 if (hinfo->ops.open)
634 err = hinfo->ops.open(hinfo, apcm->codec, substream);
635 else
636 err = -ENODEV;
637 if (err < 0) {
638 azx_release_device(azx_dev);
639 goto powerdown;
640 }
641 snd_pcm_limit_hw_rates(runtime);
642 /* sanity check */
643 if (snd_BUG_ON(!runtime->hw.channels_min) ||
644 snd_BUG_ON(!runtime->hw.channels_max) ||
645 snd_BUG_ON(!runtime->hw.formats) ||
646 snd_BUG_ON(!runtime->hw.rates)) {
647 azx_release_device(azx_dev);
648 if (hinfo->ops.close)
649 hinfo->ops.close(hinfo, apcm->codec, substream);
650 err = -EINVAL;
651 goto powerdown;
652 }
653
654 /* disable LINK_ATIME timestamps for capture streams
655 until we figure out how to handle digital inputs */
656 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
657 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
658 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
659 }
660
661 snd_pcm_set_sync(substream);
662 mutex_unlock(&chip->open_mutex);
663 return 0;
664
665 powerdown:
666 snd_hda_power_down(apcm->codec);
667 unlock:
668 mutex_unlock(&chip->open_mutex);
669 snd_hda_codec_pcm_put(apcm->info);
670 return err;
671}
672
673static int azx_pcm_mmap(struct snd_pcm_substream *substream,
674 struct vm_area_struct *area)
675{
676 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
677 struct azx *chip = apcm->chip;
678 if (chip->ops->pcm_mmap_prepare)
679 chip->ops->pcm_mmap_prepare(substream, area);
680 return snd_pcm_lib_default_mmap(substream, area);
681}
682
683static const struct snd_pcm_ops azx_pcm_ops = {
684 .open = azx_pcm_open,
685 .close = azx_pcm_close,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686 .hw_params = azx_pcm_hw_params,
687 .hw_free = azx_pcm_hw_free,
688 .prepare = azx_pcm_prepare,
689 .trigger = azx_pcm_trigger,
690 .pointer = azx_pcm_pointer,
691 .get_time_info = azx_get_time_info,
692 .mmap = azx_pcm_mmap,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000693};
694
695static void azx_pcm_free(struct snd_pcm *pcm)
696{
697 struct azx_pcm *apcm = pcm->private_data;
698 if (apcm) {
699 list_del(&apcm->list);
700 apcm->info->pcm = NULL;
701 kfree(apcm);
702 }
703}
704
705#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
706
707int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
708 struct hda_pcm *cpcm)
709{
710 struct hdac_bus *bus = &_bus->core;
711 struct azx *chip = bus_to_azx(bus);
712 struct snd_pcm *pcm;
713 struct azx_pcm *apcm;
714 int pcm_dev = cpcm->device;
715 unsigned int size;
716 int s, err;
David Brazdil0f672f62019-12-10 10:32:29 +0000717 int type = SNDRV_DMA_TYPE_DEV_SG;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718
719 list_for_each_entry(apcm, &chip->pcm_list, list) {
720 if (apcm->pcm->device == pcm_dev) {
721 dev_err(chip->card->dev, "PCM %d already exists\n",
722 pcm_dev);
723 return -EBUSY;
724 }
725 }
726 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
727 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
728 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
729 &pcm);
730 if (err < 0)
731 return err;
732 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
733 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
734 if (apcm == NULL) {
735 snd_device_free(chip->card, pcm);
736 return -ENOMEM;
737 }
738 apcm->chip = chip;
739 apcm->pcm = pcm;
740 apcm->codec = codec;
741 apcm->info = cpcm;
742 pcm->private_data = apcm;
743 pcm->private_free = azx_pcm_free;
744 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
745 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
746 list_add_tail(&apcm->list, &chip->pcm_list);
747 cpcm->pcm = pcm;
748 for (s = 0; s < 2; s++) {
749 if (cpcm->stream[s].substreams)
750 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
751 }
752 /* buffer pre-allocation */
753 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
754 if (size > MAX_PREALLOC_SIZE)
755 size = MAX_PREALLOC_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +0000756 if (chip->uc_buffer)
757 type = SNDRV_DMA_TYPE_DEV_UC_SG;
Olivier Deprez157378f2022-04-04 15:47:50 +0200758 snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
759 size, MAX_PREALLOC_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000760 return 0;
761}
762
763static unsigned int azx_command_addr(u32 cmd)
764{
765 unsigned int addr = cmd >> 28;
766
767 if (addr >= AZX_MAX_CODECS) {
768 snd_BUG();
769 addr = 0;
770 }
771
772 return addr;
773}
774
775/* receive a response */
776static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
777 unsigned int *res)
778{
779 struct azx *chip = bus_to_azx(bus);
780 struct hda_bus *hbus = &chip->bus;
Olivier Deprez157378f2022-04-04 15:47:50 +0200781 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782
783 again:
Olivier Deprez157378f2022-04-04 15:47:50 +0200784 err = snd_hdac_bus_get_response(bus, addr, res);
785 if (!err)
786 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787
788 if (hbus->no_response_fallback)
789 return -EIO;
790
David Brazdil0f672f62019-12-10 10:32:29 +0000791 if (!bus->polling_mode) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000792 dev_warn(chip->card->dev,
793 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
794 bus->last_cmd[addr]);
David Brazdil0f672f62019-12-10 10:32:29 +0000795 bus->polling_mode = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796 goto again;
797 }
798
799 if (chip->msi) {
800 dev_warn(chip->card->dev,
801 "No response from codec, disabling MSI: last cmd=0x%08x\n",
802 bus->last_cmd[addr]);
803 if (chip->ops->disable_msi_reset_irq &&
804 chip->ops->disable_msi_reset_irq(chip) < 0)
805 return -EIO;
806 goto again;
807 }
808
809 if (chip->probing) {
810 /* If this critical timeout happens during the codec probing
811 * phase, this is likely an access to a non-existing codec
812 * slot. Better to return an error and reset the system.
813 */
814 return -EIO;
815 }
816
817 /* no fallback mechanism? */
818 if (!chip->fallback_to_single_cmd)
819 return -EIO;
820
821 /* a fatal communication error; need either to reset or to fallback
822 * to the single_cmd mode
823 */
824 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
825 hbus->response_reset = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000826 dev_err(chip->card->dev,
827 "No response from codec, resetting bus: last cmd=0x%08x\n",
828 bus->last_cmd[addr]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 return -EAGAIN; /* give a chance to retry */
830 }
831
Olivier Deprez0e641232021-09-23 10:07:05 +0200832 dev_err(chip->card->dev,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000833 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
834 bus->last_cmd[addr]);
835 chip->single_cmd = 1;
836 hbus->response_reset = 0;
837 snd_hdac_bus_stop_cmd_io(bus);
838 return -EIO;
839}
840
841/*
842 * Use the single immediate command instead of CORB/RIRB for simplicity
843 *
844 * Note: according to Intel, this is not preferred use. The command was
845 * intended for the BIOS only, and may get confused with unsolicited
846 * responses. So, we shouldn't use it for normal operation from the
847 * driver.
848 * I left the codes, however, for debugging/testing purposes.
849 */
850
851/* receive a response */
852static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
853{
854 int timeout = 50;
855
856 while (timeout--) {
857 /* check IRV busy bit */
858 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
859 /* reuse rirb.res as the response return value */
860 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
861 return 0;
862 }
863 udelay(1);
864 }
865 if (printk_ratelimit())
866 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
867 azx_readw(chip, IRS));
868 azx_bus(chip)->rirb.res[addr] = -1;
869 return -EIO;
870}
871
872/* send a command */
873static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
874{
875 struct azx *chip = bus_to_azx(bus);
876 unsigned int addr = azx_command_addr(val);
877 int timeout = 50;
878
879 bus->last_cmd[azx_command_addr(val)] = val;
880 while (timeout--) {
881 /* check ICB busy bit */
882 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
883 /* Clear IRV valid bit */
884 azx_writew(chip, IRS, azx_readw(chip, IRS) |
885 AZX_IRS_VALID);
886 azx_writel(chip, IC, val);
887 azx_writew(chip, IRS, azx_readw(chip, IRS) |
888 AZX_IRS_BUSY);
889 return azx_single_wait_for_response(chip, addr);
890 }
891 udelay(1);
892 }
893 if (printk_ratelimit())
894 dev_dbg(chip->card->dev,
895 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
896 azx_readw(chip, IRS), val);
897 return -EIO;
898}
899
900/* receive a response */
901static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
902 unsigned int *res)
903{
904 if (res)
905 *res = bus->rirb.res[addr];
906 return 0;
907}
908
909/*
910 * The below are the main callbacks from hda_codec.
911 *
912 * They are just the skeleton to call sub-callbacks according to the
913 * current setting of chip->single_cmd.
914 */
915
916/* send a command */
917static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
918{
919 struct azx *chip = bus_to_azx(bus);
920
921 if (chip->disabled)
922 return 0;
923 if (chip->single_cmd)
924 return azx_single_send_cmd(bus, val);
925 else
926 return snd_hdac_bus_send_cmd(bus, val);
927}
928
929/* get a response */
930static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
931 unsigned int *res)
932{
933 struct azx *chip = bus_to_azx(bus);
934
935 if (chip->disabled)
936 return 0;
937 if (chip->single_cmd)
938 return azx_single_get_response(bus, addr, res);
939 else
940 return azx_rirb_get_response(bus, addr, res);
941}
942
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000943static const struct hdac_bus_ops bus_core_ops = {
944 .command = azx_send_cmd,
945 .get_response = azx_get_response,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000946};
947
948#ifdef CONFIG_SND_HDA_DSP_LOADER
949/*
950 * DSP loading code (e.g. for CA0132)
951 */
952
953/* use the first stream for loading DSP */
954static struct azx_dev *
955azx_get_dsp_loader_dev(struct azx *chip)
956{
957 struct hdac_bus *bus = azx_bus(chip);
958 struct hdac_stream *s;
959
960 list_for_each_entry(s, &bus->stream_list, list)
961 if (s->index == chip->playback_index_offset)
962 return stream_to_azx_dev(s);
963
964 return NULL;
965}
966
967int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
968 unsigned int byte_size,
969 struct snd_dma_buffer *bufp)
970{
971 struct hdac_bus *bus = &codec->bus->core;
972 struct azx *chip = bus_to_azx(bus);
973 struct azx_dev *azx_dev;
974 struct hdac_stream *hstr;
975 bool saved = false;
976 int err;
977
978 azx_dev = azx_get_dsp_loader_dev(chip);
979 hstr = azx_stream(azx_dev);
980 spin_lock_irq(&bus->reg_lock);
981 if (hstr->opened) {
982 chip->saved_azx_dev = *azx_dev;
983 saved = true;
984 }
985 spin_unlock_irq(&bus->reg_lock);
986
987 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
988 if (err < 0) {
989 spin_lock_irq(&bus->reg_lock);
990 if (saved)
991 *azx_dev = chip->saved_azx_dev;
992 spin_unlock_irq(&bus->reg_lock);
993 return err;
994 }
995
996 hstr->prepared = 0;
997 return err;
998}
999EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
1000
1001void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
1002{
1003 struct hdac_bus *bus = &codec->bus->core;
1004 struct azx *chip = bus_to_azx(bus);
1005 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1006
1007 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1008}
1009EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1010
1011void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1012 struct snd_dma_buffer *dmab)
1013{
1014 struct hdac_bus *bus = &codec->bus->core;
1015 struct azx *chip = bus_to_azx(bus);
1016 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1017 struct hdac_stream *hstr = azx_stream(azx_dev);
1018
1019 if (!dmab->area || !hstr->locked)
1020 return;
1021
1022 snd_hdac_dsp_cleanup(hstr, dmab);
1023 spin_lock_irq(&bus->reg_lock);
1024 if (hstr->opened)
1025 *azx_dev = chip->saved_azx_dev;
1026 hstr->locked = false;
1027 spin_unlock_irq(&bus->reg_lock);
1028}
1029EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1030#endif /* CONFIG_SND_HDA_DSP_LOADER */
1031
1032/*
1033 * reset and start the controller registers
1034 */
1035void azx_init_chip(struct azx *chip, bool full_reset)
1036{
1037 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1038 /* correct RINTCNT for CXT */
1039 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1040 azx_writew(chip, RINTCNT, 0xc0);
1041 }
1042}
1043EXPORT_SYMBOL_GPL(azx_init_chip);
1044
1045void azx_stop_all_streams(struct azx *chip)
1046{
1047 struct hdac_bus *bus = azx_bus(chip);
1048 struct hdac_stream *s;
1049
1050 list_for_each_entry(s, &bus->stream_list, list)
1051 snd_hdac_stream_stop(s);
1052}
1053EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1054
1055void azx_stop_chip(struct azx *chip)
1056{
1057 snd_hdac_bus_stop_chip(azx_bus(chip));
1058}
1059EXPORT_SYMBOL_GPL(azx_stop_chip);
1060
1061/*
1062 * interrupt handler
1063 */
1064static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1065{
1066 struct azx *chip = bus_to_azx(bus);
1067 struct azx_dev *azx_dev = stream_to_azx_dev(s);
1068
1069 /* check whether this IRQ is really acceptable */
1070 if (!chip->ops->position_check ||
1071 chip->ops->position_check(chip, azx_dev)) {
1072 spin_unlock(&bus->reg_lock);
1073 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1074 spin_lock(&bus->reg_lock);
1075 }
1076}
1077
1078irqreturn_t azx_interrupt(int irq, void *dev_id)
1079{
1080 struct azx *chip = dev_id;
1081 struct hdac_bus *bus = azx_bus(chip);
1082 u32 status;
1083 bool active, handled = false;
1084 int repeat = 0; /* count for avoiding endless loop */
1085
1086#ifdef CONFIG_PM
1087 if (azx_has_pm_runtime(chip))
1088 if (!pm_runtime_active(chip->card->dev))
1089 return IRQ_NONE;
1090#endif
1091
1092 spin_lock(&bus->reg_lock);
1093
1094 if (chip->disabled)
1095 goto unlock;
1096
1097 do {
1098 status = azx_readl(chip, INTSTS);
1099 if (status == 0 || status == 0xffffffff)
1100 break;
1101
1102 handled = true;
1103 active = false;
1104 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1105 active = true;
1106
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001107 status = azx_readb(chip, RIRBSTS);
1108 if (status & RIRB_INT_MASK) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001109 /*
1110 * Clearing the interrupt status here ensures that no
1111 * interrupt gets masked after the RIRB wp is read in
1112 * snd_hdac_bus_update_rirb. This avoids a possible
1113 * race condition where codec response in RIRB may
1114 * remain unserviced by IRQ, eventually falling back
1115 * to polling mode in azx_rirb_get_response.
1116 */
1117 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001118 active = true;
1119 if (status & RIRB_INT_RESPONSE) {
1120 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1121 udelay(80);
1122 snd_hdac_bus_update_rirb(bus);
1123 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001124 }
1125 } while (active && ++repeat < 10);
1126
1127 unlock:
1128 spin_unlock(&bus->reg_lock);
1129
1130 return IRQ_RETVAL(handled);
1131}
1132EXPORT_SYMBOL_GPL(azx_interrupt);
1133
1134/*
1135 * Codec initerface
1136 */
1137
1138/*
1139 * Probe the given codec address
1140 */
1141static int probe_codec(struct azx *chip, int addr)
1142{
1143 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1144 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1145 struct hdac_bus *bus = azx_bus(chip);
1146 int err;
1147 unsigned int res = -1;
1148
1149 mutex_lock(&bus->cmd_mutex);
1150 chip->probing = 1;
1151 azx_send_cmd(bus, cmd);
1152 err = azx_get_response(bus, addr, &res);
1153 chip->probing = 0;
1154 mutex_unlock(&bus->cmd_mutex);
1155 if (err < 0 || res == -1)
1156 return -EIO;
1157 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1158 return 0;
1159}
1160
1161void snd_hda_bus_reset(struct hda_bus *bus)
1162{
1163 struct azx *chip = bus_to_azx(&bus->core);
1164
1165 bus->in_reset = 1;
1166 azx_stop_chip(chip);
1167 azx_init_chip(chip, true);
1168 if (bus->core.chip_init)
1169 snd_hda_bus_reset_codecs(bus);
1170 bus->in_reset = 0;
1171}
1172
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001173/* HD-audio bus initialization */
David Brazdil0f672f62019-12-10 10:32:29 +00001174int azx_bus_init(struct azx *chip, const char *model)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001175{
1176 struct hda_bus *bus = &chip->bus;
1177 int err;
1178
David Brazdil0f672f62019-12-10 10:32:29 +00001179 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001180 if (err < 0)
1181 return err;
1182
1183 bus->card = chip->card;
1184 mutex_init(&bus->prepare_mutex);
1185 bus->pci = chip->pci;
1186 bus->modelname = model;
1187 bus->mixer_assigned = -1;
1188 bus->core.snoop = azx_snoop(chip);
1189 if (chip->get_position[0] != azx_get_pos_lpib ||
1190 chip->get_position[1] != azx_get_pos_lpib)
1191 bus->core.use_posbuf = true;
1192 bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1193 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1194 bus->core.corbrp_self_clear = true;
1195
1196 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1197 bus->core.align_bdle_4k = true;
1198
Olivier Deprez157378f2022-04-04 15:47:50 +02001199 /* enable sync_write flag for stable communication as default */
1200 bus->core.sync_write = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001201
1202 return 0;
1203}
1204EXPORT_SYMBOL_GPL(azx_bus_init);
1205
1206/* Probe codecs */
1207int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1208{
1209 struct hdac_bus *bus = azx_bus(chip);
1210 int c, codecs, err;
1211
1212 codecs = 0;
1213 if (!max_slots)
1214 max_slots = AZX_DEFAULT_CODECS;
1215
1216 /* First try to probe all given codec slots */
1217 for (c = 0; c < max_slots; c++) {
1218 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1219 if (probe_codec(chip, c) < 0) {
1220 /* Some BIOSen give you wrong codec addresses
1221 * that don't exist
1222 */
1223 dev_warn(chip->card->dev,
1224 "Codec #%d probe error; disabling it...\n", c);
1225 bus->codec_mask &= ~(1 << c);
1226 /* More badly, accessing to a non-existing
1227 * codec often screws up the controller chip,
1228 * and disturbs the further communications.
1229 * Thus if an error occurs during probing,
1230 * better to reset the controller chip to
1231 * get back to the sanity state.
1232 */
1233 azx_stop_chip(chip);
1234 azx_init_chip(chip, true);
1235 }
1236 }
1237 }
1238
1239 /* Then create codec instances */
1240 for (c = 0; c < max_slots; c++) {
1241 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1242 struct hda_codec *codec;
1243 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1244 if (err < 0)
1245 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00001246 codec->jackpoll_interval = chip->jackpoll_interval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247 codec->beep_mode = chip->beep_mode;
1248 codecs++;
1249 }
1250 }
1251 if (!codecs) {
1252 dev_err(chip->card->dev, "no codecs initialized\n");
1253 return -ENXIO;
1254 }
1255 return 0;
1256}
1257EXPORT_SYMBOL_GPL(azx_probe_codecs);
1258
1259/* configure each codec instance */
1260int azx_codec_configure(struct azx *chip)
1261{
1262 struct hda_codec *codec, *next;
Olivier Deprez157378f2022-04-04 15:47:50 +02001263 int success = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264
Olivier Deprez157378f2022-04-04 15:47:50 +02001265 list_for_each_codec(codec, &chip->bus) {
1266 if (!snd_hda_codec_configure(codec))
1267 success++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001268 }
1269
Olivier Deprez157378f2022-04-04 15:47:50 +02001270 if (success) {
1271 /* unregister failed codecs if any codec has been probed */
1272 list_for_each_codec_safe(codec, next, &chip->bus) {
1273 if (!codec->configured) {
1274 codec_err(codec, "Unable to configure, disabling\n");
1275 snd_hdac_device_unregister(&codec->core);
1276 }
1277 }
1278 }
1279
1280 return success ? 0 : -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001281}
1282EXPORT_SYMBOL_GPL(azx_codec_configure);
1283
1284static int stream_direction(struct azx *chip, unsigned char index)
1285{
1286 if (index >= chip->capture_index_offset &&
1287 index < chip->capture_index_offset + chip->capture_streams)
1288 return SNDRV_PCM_STREAM_CAPTURE;
1289 return SNDRV_PCM_STREAM_PLAYBACK;
1290}
1291
1292/* initialize SD streams */
1293int azx_init_streams(struct azx *chip)
1294{
1295 int i;
1296 int stream_tags[2] = { 0, 0 };
1297
1298 /* initialize each stream (aka device)
1299 * assign the starting bdl address to each stream (device)
1300 * and initialize
1301 */
1302 for (i = 0; i < chip->num_streams; i++) {
1303 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1304 int dir, tag;
1305
1306 if (!azx_dev)
1307 return -ENOMEM;
1308
1309 dir = stream_direction(chip, i);
1310 /* stream tag must be unique throughout
1311 * the stream direction group,
1312 * valid values 1...15
1313 * use separate stream tag if the flag
1314 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1315 */
1316 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1317 tag = ++stream_tags[dir];
1318 else
1319 tag = i + 1;
1320 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1321 i, dir, tag);
1322 }
1323
1324 return 0;
1325}
1326EXPORT_SYMBOL_GPL(azx_init_streams);
1327
1328void azx_free_streams(struct azx *chip)
1329{
1330 struct hdac_bus *bus = azx_bus(chip);
1331 struct hdac_stream *s;
1332
1333 while (!list_empty(&bus->stream_list)) {
1334 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1335 list_del(&s->list);
1336 kfree(stream_to_azx_dev(s));
1337 }
1338}
1339EXPORT_SYMBOL_GPL(azx_free_streams);