blob: 3cd2489d398c55fe370c37c1a98cd043bed67325 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel(R) Trace Hub Memory Storage Unit
4 *
5 * Copyright (C) 2014-2015 Intel Corporation.
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/uaccess.h>
14#include <linux/sizes.h>
15#include <linux/printk.h>
16#include <linux/slab.h>
17#include <linux/mm.h>
18#include <linux/fs.h>
19#include <linux/io.h>
David Brazdil0f672f62019-12-10 10:32:29 +000020#include <linux/workqueue.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021#include <linux/dma-mapping.h>
22
23#ifdef CONFIG_X86
24#include <asm/set_memory.h>
25#endif
26
David Brazdil0f672f62019-12-10 10:32:29 +000027#include <linux/intel_th.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028#include "intel_th.h"
29#include "msu.h"
30
31#define msc_dev(x) (&(x)->thdev->dev)
32
David Brazdil0f672f62019-12-10 10:32:29 +000033/*
34 * Lockout state transitions:
35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
36 * \-----------/
37 * WIN_READY: window can be used by HW
38 * WIN_INUSE: window is in use
39 * WIN_LOCKED: window is filled up and is being processed by the buffer
40 * handling code
41 *
42 * All state transitions happen automatically, except for the LOCKED->READY,
43 * which needs to be signalled by the buffer code by calling
44 * intel_th_msc_window_unlock().
45 *
46 * When the interrupt handler has to switch to the next window, it checks
47 * whether it's READY, and if it is, it performs the switch and tracing
48 * continues. If it's LOCKED, it stops the trace.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049 */
David Brazdil0f672f62019-12-10 10:32:29 +000050enum lockout_state {
51 WIN_READY = 0,
52 WIN_INUSE,
53 WIN_LOCKED
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054};
55
56/**
57 * struct msc_window - multiblock mode window descriptor
58 * @entry: window list linkage (msc::win_list)
59 * @pgoff: page offset into the buffer that this window starts at
David Brazdil0f672f62019-12-10 10:32:29 +000060 * @lockout: lockout state, see comment below
61 * @lo_lock: lockout state serialization
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062 * @nr_blocks: number of blocks (pages) in this window
David Brazdil0f672f62019-12-10 10:32:29 +000063 * @nr_segs: number of segments in this window (<= @nr_blocks)
64 * @_sgt: array of block descriptors
65 * @sgt: array of block descriptors
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 */
67struct msc_window {
68 struct list_head entry;
69 unsigned long pgoff;
David Brazdil0f672f62019-12-10 10:32:29 +000070 enum lockout_state lockout;
71 spinlock_t lo_lock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 unsigned int nr_blocks;
David Brazdil0f672f62019-12-10 10:32:29 +000073 unsigned int nr_segs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074 struct msc *msc;
David Brazdil0f672f62019-12-10 10:32:29 +000075 struct sg_table _sgt;
76 struct sg_table *sgt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077};
78
79/**
80 * struct msc_iter - iterator for msc buffer
81 * @entry: msc::iter_list linkage
82 * @msc: pointer to the MSC device
83 * @start_win: oldest window
84 * @win: current window
85 * @offset: current logical offset into the buffer
86 * @start_block: oldest block in the window
87 * @block: block number in the window
88 * @block_off: offset into current block
89 * @wrap_count: block wrapping handling
90 * @eof: end of buffer reached
91 */
92struct msc_iter {
93 struct list_head entry;
94 struct msc *msc;
95 struct msc_window *start_win;
96 struct msc_window *win;
97 unsigned long offset;
David Brazdil0f672f62019-12-10 10:32:29 +000098 struct scatterlist *start_block;
99 struct scatterlist *block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 unsigned int block_off;
101 unsigned int wrap_count;
102 unsigned int eof;
103};
104
105/**
106 * struct msc - MSC device representation
107 * @reg_base: register window base address
108 * @thdev: intel_th_device pointer
David Brazdil0f672f62019-12-10 10:32:29 +0000109 * @mbuf: MSU buffer, if assigned
110 * @mbuf_priv MSU buffer's private data, if @mbuf
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 * @win_list: list of windows in multiblock mode
David Brazdil0f672f62019-12-10 10:32:29 +0000112 * @single_sgt: single mode buffer
113 * @cur_win: current window
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 * @nr_pages: total number of pages allocated for this buffer
115 * @single_sz: amount of data in single mode
116 * @single_wrap: single mode wrap occurred
117 * @base: buffer's base pointer
118 * @base_addr: buffer's base address
119 * @user_count: number of users of the buffer
120 * @mmap_count: number of mappings
121 * @buf_mutex: mutex to serialize access to buffer-related bits
122
123 * @enabled: MSC is enabled
124 * @wrap: wrapping is enabled
125 * @mode: MSC operating mode
126 * @burst_len: write burst length
127 * @index: number of this MSC in the MSU
128 */
129struct msc {
130 void __iomem *reg_base;
David Brazdil0f672f62019-12-10 10:32:29 +0000131 void __iomem *msu_base;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 struct intel_th_device *thdev;
133
David Brazdil0f672f62019-12-10 10:32:29 +0000134 const struct msu_buffer *mbuf;
135 void *mbuf_priv;
136
137 struct work_struct work;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138 struct list_head win_list;
David Brazdil0f672f62019-12-10 10:32:29 +0000139 struct sg_table single_sgt;
140 struct msc_window *cur_win;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141 unsigned long nr_pages;
142 unsigned long single_sz;
143 unsigned int single_wrap : 1;
144 void *base;
145 dma_addr_t base_addr;
David Brazdil0f672f62019-12-10 10:32:29 +0000146 u32 orig_addr;
147 u32 orig_sz;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148
149 /* <0: no buffer, 0: no users, >0: active users */
150 atomic_t user_count;
151
152 atomic_t mmap_count;
153 struct mutex buf_mutex;
154
155 struct list_head iter_list;
156
157 /* config */
158 unsigned int enabled : 1,
David Brazdil0f672f62019-12-10 10:32:29 +0000159 wrap : 1,
Olivier Deprez0e641232021-09-23 10:07:05 +0200160 do_irq : 1,
161 multi_is_broken : 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000162 unsigned int mode;
163 unsigned int burst_len;
164 unsigned int index;
165};
166
David Brazdil0f672f62019-12-10 10:32:29 +0000167static LIST_HEAD(msu_buffer_list);
168static DEFINE_MUTEX(msu_buffer_mutex);
169
170/**
171 * struct msu_buffer_entry - internal MSU buffer bookkeeping
172 * @entry: link to msu_buffer_list
173 * @mbuf: MSU buffer object
174 * @owner: module that provides this MSU buffer
175 */
176struct msu_buffer_entry {
177 struct list_head entry;
178 const struct msu_buffer *mbuf;
179 struct module *owner;
180};
181
182static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
183{
184 struct msu_buffer_entry *mbe;
185
186 lockdep_assert_held(&msu_buffer_mutex);
187
188 list_for_each_entry(mbe, &msu_buffer_list, entry) {
189 if (!strcmp(mbe->mbuf->name, name))
190 return mbe;
191 }
192
193 return NULL;
194}
195
196static const struct msu_buffer *
197msu_buffer_get(const char *name)
198{
199 struct msu_buffer_entry *mbe;
200
201 mutex_lock(&msu_buffer_mutex);
202 mbe = __msu_buffer_entry_find(name);
203 if (mbe && !try_module_get(mbe->owner))
204 mbe = NULL;
205 mutex_unlock(&msu_buffer_mutex);
206
207 return mbe ? mbe->mbuf : NULL;
208}
209
210static void msu_buffer_put(const struct msu_buffer *mbuf)
211{
212 struct msu_buffer_entry *mbe;
213
214 mutex_lock(&msu_buffer_mutex);
215 mbe = __msu_buffer_entry_find(mbuf->name);
216 if (mbe)
217 module_put(mbe->owner);
218 mutex_unlock(&msu_buffer_mutex);
219}
220
221int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
222 struct module *owner)
223{
224 struct msu_buffer_entry *mbe;
225 int ret = 0;
226
227 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
228 if (!mbe)
229 return -ENOMEM;
230
231 mutex_lock(&msu_buffer_mutex);
232 if (__msu_buffer_entry_find(mbuf->name)) {
233 ret = -EEXIST;
234 kfree(mbe);
235 goto unlock;
236 }
237
238 mbe->mbuf = mbuf;
239 mbe->owner = owner;
240 list_add_tail(&mbe->entry, &msu_buffer_list);
241unlock:
242 mutex_unlock(&msu_buffer_mutex);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
247
248void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
249{
250 struct msu_buffer_entry *mbe;
251
252 mutex_lock(&msu_buffer_mutex);
253 mbe = __msu_buffer_entry_find(mbuf->name);
254 if (mbe) {
255 list_del(&mbe->entry);
256 kfree(mbe);
257 }
258 mutex_unlock(&msu_buffer_mutex);
259}
260EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
261
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
263{
264 /* header hasn't been written */
265 if (!bdesc->valid_dw)
266 return true;
267
268 /* valid_dw includes the header */
269 if (!msc_data_sz(bdesc))
270 return true;
271
272 return false;
273}
274
David Brazdil0f672f62019-12-10 10:32:29 +0000275static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276{
David Brazdil0f672f62019-12-10 10:32:29 +0000277 return win->sgt->sgl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278}
279
David Brazdil0f672f62019-12-10 10:32:29 +0000280static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281{
David Brazdil0f672f62019-12-10 10:32:29 +0000282 return sg_virt(msc_win_base_sg(win));
283}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284
David Brazdil0f672f62019-12-10 10:32:29 +0000285static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
286{
287 return sg_dma_address(msc_win_base_sg(win));
288}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289
David Brazdil0f672f62019-12-10 10:32:29 +0000290static inline unsigned long
291msc_win_base_pfn(struct msc_window *win)
292{
293 return PFN_DOWN(msc_win_base_dma(win));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294}
295
296/**
297 * msc_is_last_win() - check if a window is the last one for a given MSC
298 * @win: window
299 * Return: true if @win is the last window in MSC's multiblock buffer
300 */
301static inline bool msc_is_last_win(struct msc_window *win)
302{
303 return win->entry.next == &win->msc->win_list;
304}
305
306/**
307 * msc_next_window() - return next window in the multiblock buffer
308 * @win: current window
309 *
310 * Return: window following the current one
311 */
312static struct msc_window *msc_next_window(struct msc_window *win)
313{
314 if (msc_is_last_win(win))
David Brazdil0f672f62019-12-10 10:32:29 +0000315 return list_first_entry(&win->msc->win_list, struct msc_window,
316 entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000317
David Brazdil0f672f62019-12-10 10:32:29 +0000318 return list_next_entry(win, entry);
319}
320
321static size_t msc_win_total_sz(struct msc_window *win)
322{
323 struct scatterlist *sg;
324 unsigned int blk;
325 size_t size = 0;
326
327 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
328 struct msc_block_desc *bdesc = sg_virt(sg);
329
330 if (msc_block_wrapped(bdesc))
331 return (size_t)win->nr_blocks << PAGE_SHIFT;
332
333 size += msc_total_sz(bdesc);
334 if (msc_block_last_written(bdesc))
335 break;
336 }
337
338 return size;
339}
340
341/**
342 * msc_find_window() - find a window matching a given sg_table
343 * @msc: MSC device
344 * @sgt: SG table of the window
345 * @nonempty: skip over empty windows
346 *
347 * Return: MSC window structure pointer or NULL if the window
348 * could not be found.
349 */
350static struct msc_window *
351msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
352{
353 struct msc_window *win;
354 unsigned int found = 0;
355
356 if (list_empty(&msc->win_list))
357 return NULL;
358
359 /*
360 * we might need a radix tree for this, depending on how
361 * many windows a typical user would allocate; ideally it's
362 * something like 2, in which case we're good
363 */
364 list_for_each_entry(win, &msc->win_list, entry) {
365 if (win->sgt == sgt)
366 found++;
367
368 /* skip the empty ones */
369 if (nonempty && msc_block_is_empty(msc_win_base(win)))
370 continue;
371
372 if (found)
373 return win;
374 }
375
376 return NULL;
377}
378
379/**
380 * msc_oldest_window() - locate the window with oldest data
381 * @msc: MSC device
382 *
383 * This should only be used in multiblock mode. Caller should hold the
384 * msc::user_count reference.
385 *
386 * Return: the oldest window with valid data
387 */
388static struct msc_window *msc_oldest_window(struct msc *msc)
389{
390 struct msc_window *win;
391
392 if (list_empty(&msc->win_list))
393 return NULL;
394
395 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
396 if (win)
397 return win;
398
399 return list_first_entry(&msc->win_list, struct msc_window, entry);
400}
401
402/**
403 * msc_win_oldest_sg() - locate the oldest block in a given window
404 * @win: window to look at
405 *
406 * Return: index of the block with the oldest data
407 */
408static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
409{
410 unsigned int blk;
411 struct scatterlist *sg;
412 struct msc_block_desc *bdesc = msc_win_base(win);
413
414 /* without wrapping, first block is the oldest */
415 if (!msc_block_wrapped(bdesc))
416 return msc_win_base_sg(win);
417
418 /*
419 * with wrapping, last written block contains both the newest and the
420 * oldest data for this window.
421 */
422 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
423 struct msc_block_desc *bdesc = sg_virt(sg);
424
425 if (msc_block_last_written(bdesc))
426 return sg;
427 }
428
429 return msc_win_base_sg(win);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430}
431
432static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
433{
David Brazdil0f672f62019-12-10 10:32:29 +0000434 return sg_virt(iter->block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435}
436
437static struct msc_iter *msc_iter_install(struct msc *msc)
438{
439 struct msc_iter *iter;
440
441 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
442 if (!iter)
443 return ERR_PTR(-ENOMEM);
444
445 mutex_lock(&msc->buf_mutex);
446
447 /*
448 * Reading and tracing are mutually exclusive; if msc is
449 * enabled, open() will fail; otherwise existing readers
450 * will prevent enabling the msc and the rest of fops don't
451 * need to worry about it.
452 */
453 if (msc->enabled) {
454 kfree(iter);
455 iter = ERR_PTR(-EBUSY);
456 goto unlock;
457 }
458
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000459 iter->msc = msc;
460
461 list_add_tail(&iter->entry, &msc->iter_list);
462unlock:
463 mutex_unlock(&msc->buf_mutex);
464
465 return iter;
466}
467
468static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
469{
470 mutex_lock(&msc->buf_mutex);
471 list_del(&iter->entry);
472 mutex_unlock(&msc->buf_mutex);
473
474 kfree(iter);
475}
476
477static void msc_iter_block_start(struct msc_iter *iter)
478{
David Brazdil0f672f62019-12-10 10:32:29 +0000479 if (iter->start_block)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 return;
481
David Brazdil0f672f62019-12-10 10:32:29 +0000482 iter->start_block = msc_win_oldest_sg(iter->win);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 iter->block = iter->start_block;
484 iter->wrap_count = 0;
485
486 /*
487 * start with the block with oldest data; if data has wrapped
488 * in this window, it should be in this block
489 */
490 if (msc_block_wrapped(msc_iter_bdesc(iter)))
491 iter->wrap_count = 2;
492
493}
494
495static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
496{
497 /* already started, nothing to do */
498 if (iter->start_win)
499 return 0;
500
501 iter->start_win = msc_oldest_window(msc);
502 if (!iter->start_win)
503 return -EINVAL;
504
505 iter->win = iter->start_win;
David Brazdil0f672f62019-12-10 10:32:29 +0000506 iter->start_block = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000507
508 msc_iter_block_start(iter);
509
510 return 0;
511}
512
513static int msc_iter_win_advance(struct msc_iter *iter)
514{
515 iter->win = msc_next_window(iter->win);
David Brazdil0f672f62019-12-10 10:32:29 +0000516 iter->start_block = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517
518 if (iter->win == iter->start_win) {
519 iter->eof++;
520 return 1;
521 }
522
523 msc_iter_block_start(iter);
524
525 return 0;
526}
527
528static int msc_iter_block_advance(struct msc_iter *iter)
529{
530 iter->block_off = 0;
531
532 /* wrapping */
533 if (iter->wrap_count && iter->block == iter->start_block) {
534 iter->wrap_count--;
535 if (!iter->wrap_count)
536 /* copied newest data from the wrapped block */
537 return msc_iter_win_advance(iter);
538 }
539
540 /* no wrapping, check for last written block */
541 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
542 /* copied newest data for the window */
543 return msc_iter_win_advance(iter);
544
545 /* block advance */
David Brazdil0f672f62019-12-10 10:32:29 +0000546 if (sg_is_last(iter->block))
547 iter->block = msc_win_base_sg(iter->win);
548 else
549 iter->block = sg_next(iter->block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550
551 /* no wrapping, sanity check in case there is no last written block */
552 if (!iter->wrap_count && iter->block == iter->start_block)
553 return msc_iter_win_advance(iter);
554
555 return 0;
556}
557
558/**
559 * msc_buffer_iterate() - go through multiblock buffer's data
560 * @iter: iterator structure
561 * @size: amount of data to scan
562 * @data: callback's private data
563 * @fn: iterator callback
564 *
565 * This will start at the window which will be written to next (containing
566 * the oldest data) and work its way to the current window, calling @fn
567 * for each chunk of data as it goes.
568 *
569 * Caller should have msc::user_count reference to make sure the buffer
570 * doesn't disappear from under us.
571 *
572 * Return: amount of data actually scanned.
573 */
574static ssize_t
575msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
576 unsigned long (*fn)(void *, void *, size_t))
577{
578 struct msc *msc = iter->msc;
579 size_t len = size;
580 unsigned int advance;
581
582 if (iter->eof)
583 return 0;
584
585 /* start with the oldest window */
586 if (msc_iter_win_start(iter, msc))
587 return 0;
588
589 do {
590 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
591 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
592 size_t tocopy = data_bytes, copied = 0;
593 size_t remaining = 0;
594
595 advance = 1;
596
597 /*
598 * If block wrapping happened, we need to visit the last block
599 * twice, because it contains both the oldest and the newest
600 * data in this window.
601 *
602 * First time (wrap_count==2), in the very beginning, to collect
603 * the oldest data, which is in the range
604 * (data_bytes..DATA_IN_PAGE).
605 *
606 * Second time (wrap_count==1), it's just like any other block,
607 * containing data in the range of [MSC_BDESC..data_bytes].
608 */
609 if (iter->block == iter->start_block && iter->wrap_count == 2) {
610 tocopy = DATA_IN_PAGE - data_bytes;
611 src += data_bytes;
612 }
613
614 if (!tocopy)
615 goto next_block;
616
617 tocopy -= iter->block_off;
618 src += iter->block_off;
619
620 if (len < tocopy) {
621 tocopy = len;
622 advance = 0;
623 }
624
625 remaining = fn(data, src, tocopy);
626
627 if (remaining)
628 advance = 0;
629
630 copied = tocopy - remaining;
631 len -= copied;
632 iter->block_off += copied;
633 iter->offset += copied;
634
635 if (!advance)
636 break;
637
638next_block:
639 if (msc_iter_block_advance(iter))
640 break;
641
642 } while (len);
643
644 return size - len;
645}
646
647/**
648 * msc_buffer_clear_hw_header() - clear hw header for multiblock
649 * @msc: MSC device
650 */
651static void msc_buffer_clear_hw_header(struct msc *msc)
652{
653 struct msc_window *win;
David Brazdil0f672f62019-12-10 10:32:29 +0000654 struct scatterlist *sg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655
656 list_for_each_entry(win, &msc->win_list, entry) {
657 unsigned int blk;
658 size_t hw_sz = sizeof(struct msc_block_desc) -
659 offsetof(struct msc_block_desc, hw_tag);
660
David Brazdil0f672f62019-12-10 10:32:29 +0000661 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
662 struct msc_block_desc *bdesc = sg_virt(sg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000663
664 memset(&bdesc->hw_tag, 0, hw_sz);
665 }
666 }
667}
668
David Brazdil0f672f62019-12-10 10:32:29 +0000669static int intel_th_msu_init(struct msc *msc)
670{
671 u32 mintctl, msusts;
672
673 if (!msc->do_irq)
674 return 0;
675
676 if (!msc->mbuf)
677 return 0;
678
679 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
680 mintctl |= msc->index ? M1BLIE : M0BLIE;
681 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
682 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
683 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
684 msc->do_irq = 0;
685 return 0;
686 }
687
688 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
689 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
690
691 return 0;
692}
693
694static void intel_th_msu_deinit(struct msc *msc)
695{
696 u32 mintctl;
697
698 if (!msc->do_irq)
699 return;
700
701 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
702 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
703 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
704}
705
706static int msc_win_set_lockout(struct msc_window *win,
707 enum lockout_state expect,
708 enum lockout_state new)
709{
710 enum lockout_state old;
711 unsigned long flags;
712 int ret = 0;
713
714 if (!win->msc->mbuf)
715 return 0;
716
717 spin_lock_irqsave(&win->lo_lock, flags);
718 old = win->lockout;
719
720 if (old != expect) {
721 ret = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +0000722 goto unlock;
723 }
724
725 win->lockout = new;
726
727 if (old == expect && new == WIN_LOCKED)
728 atomic_inc(&win->msc->user_count);
729 else if (old == expect && old == WIN_LOCKED)
730 atomic_dec(&win->msc->user_count);
731
732unlock:
733 spin_unlock_irqrestore(&win->lo_lock, flags);
734
735 if (ret) {
736 if (expect == WIN_READY && old == WIN_LOCKED)
737 return -EBUSY;
738
739 /* from intel_th_msc_window_unlock(), don't warn if not locked */
740 if (expect == WIN_LOCKED && old == new)
741 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200742
743 dev_warn_ratelimited(msc_dev(win->msc),
744 "expected lockout state %d, got %d\n",
745 expect, old);
David Brazdil0f672f62019-12-10 10:32:29 +0000746 }
747
748 return ret;
749}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750/**
751 * msc_configure() - set up MSC hardware
752 * @msc: the MSC device to configure
753 *
754 * Program storage mode, wrapping, burst length and trace buffer address
755 * into a given MSC. Then, enable tracing and set msc::enabled.
756 * The latter is serialized on msc::buf_mutex, so make sure to hold it.
757 */
758static int msc_configure(struct msc *msc)
759{
760 u32 reg;
761
762 lockdep_assert_held(&msc->buf_mutex);
763
764 if (msc->mode > MSC_MODE_MULTI)
Olivier Deprez0e641232021-09-23 10:07:05 +0200765 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000766
David Brazdil0f672f62019-12-10 10:32:29 +0000767 if (msc->mode == MSC_MODE_MULTI) {
768 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
769 return -EBUSY;
770
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 msc_buffer_clear_hw_header(msc);
David Brazdil0f672f62019-12-10 10:32:29 +0000772 }
773
774 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
775 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776
777 reg = msc->base_addr >> PAGE_SHIFT;
778 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
779
780 if (msc->mode == MSC_MODE_SINGLE) {
781 reg = msc->nr_pages;
782 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
783 }
784
785 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
786 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
787
788 reg |= MSC_EN;
789 reg |= msc->mode << __ffs(MSC_MODE);
790 reg |= msc->burst_len << __ffs(MSC_LEN);
791
792 if (msc->wrap)
793 reg |= MSC_WRAPEN;
794
795 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
796
David Brazdil0f672f62019-12-10 10:32:29 +0000797 intel_th_msu_init(msc);
798
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
800 intel_th_trace_enable(msc->thdev);
801 msc->enabled = 1;
802
David Brazdil0f672f62019-12-10 10:32:29 +0000803 if (msc->mbuf && msc->mbuf->activate)
804 msc->mbuf->activate(msc->mbuf_priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000805
806 return 0;
807}
808
809/**
810 * msc_disable() - disable MSC hardware
811 * @msc: MSC device to disable
812 *
813 * If @msc is enabled, disable tracing on the switch and then disable MSC
814 * storage. Caller must hold msc::buf_mutex.
815 */
816static void msc_disable(struct msc *msc)
817{
David Brazdil0f672f62019-12-10 10:32:29 +0000818 struct msc_window *win = msc->cur_win;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 u32 reg;
820
821 lockdep_assert_held(&msc->buf_mutex);
822
David Brazdil0f672f62019-12-10 10:32:29 +0000823 if (msc->mode == MSC_MODE_MULTI)
824 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
825
826 if (msc->mbuf && msc->mbuf->deactivate)
827 msc->mbuf->deactivate(msc->mbuf_priv);
828 intel_th_msu_deinit(msc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 intel_th_trace_disable(msc->thdev);
830
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831 if (msc->mode == MSC_MODE_SINGLE) {
David Brazdil0f672f62019-12-10 10:32:29 +0000832 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000833 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
834
835 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
836 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
837 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
838 reg, msc->single_sz, msc->single_wrap);
839 }
840
841 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
842 reg &= ~MSC_EN;
843 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
David Brazdil0f672f62019-12-10 10:32:29 +0000844
845 if (msc->mbuf && msc->mbuf->ready)
846 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
847 msc_win_total_sz(win));
848
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000849 msc->enabled = 0;
850
David Brazdil0f672f62019-12-10 10:32:29 +0000851 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
852 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000853
854 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
855 ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
856
857 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
858 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
David Brazdil0f672f62019-12-10 10:32:29 +0000859
860 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
861 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
862 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000863}
864
865static int intel_th_msc_activate(struct intel_th_device *thdev)
866{
867 struct msc *msc = dev_get_drvdata(&thdev->dev);
868 int ret = -EBUSY;
869
870 if (!atomic_inc_unless_negative(&msc->user_count))
871 return -ENODEV;
872
873 mutex_lock(&msc->buf_mutex);
874
875 /* if there are readers, refuse */
876 if (list_empty(&msc->iter_list))
877 ret = msc_configure(msc);
878
879 mutex_unlock(&msc->buf_mutex);
880
881 if (ret)
882 atomic_dec(&msc->user_count);
883
884 return ret;
885}
886
887static void intel_th_msc_deactivate(struct intel_th_device *thdev)
888{
889 struct msc *msc = dev_get_drvdata(&thdev->dev);
890
891 mutex_lock(&msc->buf_mutex);
892 if (msc->enabled) {
893 msc_disable(msc);
894 atomic_dec(&msc->user_count);
895 }
896 mutex_unlock(&msc->buf_mutex);
897}
898
899/**
900 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
901 * @msc: MSC device
902 * @size: allocation size in bytes
903 *
904 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
905 * caller is expected to hold it.
906 *
907 * Return: 0 on success, -errno otherwise.
908 */
909static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
910{
David Brazdil0f672f62019-12-10 10:32:29 +0000911 unsigned long nr_pages = size >> PAGE_SHIFT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912 unsigned int order = get_order(size);
913 struct page *page;
David Brazdil0f672f62019-12-10 10:32:29 +0000914 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000915
916 if (!size)
917 return 0;
918
David Brazdil0f672f62019-12-10 10:32:29 +0000919 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
920 if (ret)
921 goto err_out;
922
923 ret = -ENOMEM;
924 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000925 if (!page)
David Brazdil0f672f62019-12-10 10:32:29 +0000926 goto err_free_sgt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000927
928 split_page(page, order);
David Brazdil0f672f62019-12-10 10:32:29 +0000929 sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
930
931 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
932 DMA_FROM_DEVICE);
933 if (ret < 0)
934 goto err_free_pages;
935
936 msc->nr_pages = nr_pages;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 msc->base = page_address(page);
David Brazdil0f672f62019-12-10 10:32:29 +0000938 msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000939
940 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000941
942err_free_pages:
943 __free_pages(page, order);
944
945err_free_sgt:
946 sg_free_table(&msc->single_sgt);
947
948err_out:
949 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000950}
951
952/**
953 * msc_buffer_contig_free() - free a contiguous buffer
954 * @msc: MSC configured in SINGLE mode
955 */
956static void msc_buffer_contig_free(struct msc *msc)
957{
958 unsigned long off;
959
David Brazdil0f672f62019-12-10 10:32:29 +0000960 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
961 1, DMA_FROM_DEVICE);
962 sg_free_table(&msc->single_sgt);
963
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000964 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
965 struct page *page = virt_to_page(msc->base + off);
966
967 page->mapping = NULL;
968 __free_page(page);
969 }
970
971 msc->nr_pages = 0;
972}
973
974/**
975 * msc_buffer_contig_get_page() - find a page at a given offset
976 * @msc: MSC configured in SINGLE mode
977 * @pgoff: page offset
978 *
979 * Return: page, if @pgoff is within the range, NULL otherwise.
980 */
981static struct page *msc_buffer_contig_get_page(struct msc *msc,
982 unsigned long pgoff)
983{
984 if (pgoff >= msc->nr_pages)
985 return NULL;
986
987 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
988}
989
David Brazdil0f672f62019-12-10 10:32:29 +0000990static int __msc_buffer_win_alloc(struct msc_window *win,
991 unsigned int nr_segs)
992{
993 struct scatterlist *sg_ptr;
994 void *block;
995 int i, ret;
996
997 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
998 if (ret)
999 return -ENOMEM;
1000
1001 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1002 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1003 PAGE_SIZE, &sg_dma_address(sg_ptr),
1004 GFP_KERNEL);
1005 if (!block)
1006 goto err_nomem;
1007
1008 sg_set_buf(sg_ptr, block, PAGE_SIZE);
1009 }
1010
1011 return nr_segs;
1012
1013err_nomem:
1014 for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
1015 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1016 sg_virt(sg_ptr), sg_dma_address(sg_ptr));
1017
1018 sg_free_table(win->sgt);
1019
1020 return -ENOMEM;
1021}
1022
1023#ifdef CONFIG_X86
1024static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
1025{
1026 struct scatterlist *sg_ptr;
1027 int i;
1028
1029 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1030 /* Set the page as uncached */
1031 set_memory_uc((unsigned long)sg_virt(sg_ptr),
1032 PFN_DOWN(sg_ptr->length));
1033 }
1034}
1035
1036static void msc_buffer_set_wb(struct msc_window *win)
1037{
1038 struct scatterlist *sg_ptr;
1039 int i;
1040
1041 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1042 /* Reset the page to write-back */
1043 set_memory_wb((unsigned long)sg_virt(sg_ptr),
1044 PFN_DOWN(sg_ptr->length));
1045 }
1046}
1047#else /* !X86 */
1048static inline void
1049msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
1050static inline void msc_buffer_set_wb(struct msc_window *win) {}
1051#endif /* CONFIG_X86 */
1052
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001053/**
1054 * msc_buffer_win_alloc() - alloc a window for a multiblock mode
1055 * @msc: MSC device
1056 * @nr_blocks: number of pages in this window
1057 *
1058 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1059 * to serialize, so the caller is expected to hold it.
1060 *
1061 * Return: 0 on success, -errno otherwise.
1062 */
1063static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
1064{
1065 struct msc_window *win;
David Brazdil0f672f62019-12-10 10:32:29 +00001066 int ret = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001067
1068 if (!nr_blocks)
1069 return 0;
1070
David Brazdil0f672f62019-12-10 10:32:29 +00001071 win = kzalloc(sizeof(*win), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072 if (!win)
1073 return -ENOMEM;
1074
David Brazdil0f672f62019-12-10 10:32:29 +00001075 win->msc = msc;
1076 win->sgt = &win->_sgt;
1077 win->lockout = WIN_READY;
1078 spin_lock_init(&win->lo_lock);
1079
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001080 if (!list_empty(&msc->win_list)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001081 struct msc_window *prev = list_last_entry(&msc->win_list,
1082 struct msc_window,
1083 entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001084
1085 win->pgoff = prev->pgoff + prev->nr_blocks;
1086 }
1087
David Brazdil0f672f62019-12-10 10:32:29 +00001088 if (msc->mbuf && msc->mbuf->alloc_window)
1089 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1090 nr_blocks << PAGE_SHIFT);
1091 else
1092 ret = __msc_buffer_win_alloc(win, nr_blocks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001093
David Brazdil0f672f62019-12-10 10:32:29 +00001094 if (ret <= 0)
1095 goto err_nomem;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096
David Brazdil0f672f62019-12-10 10:32:29 +00001097 msc_buffer_set_uc(win, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098
David Brazdil0f672f62019-12-10 10:32:29 +00001099 win->nr_segs = ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 win->nr_blocks = nr_blocks;
1101
1102 if (list_empty(&msc->win_list)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001103 msc->base = msc_win_base(win);
1104 msc->base_addr = msc_win_base_dma(win);
1105 msc->cur_win = win;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106 }
1107
1108 list_add_tail(&win->entry, &msc->win_list);
1109 msc->nr_pages += nr_blocks;
1110
1111 return 0;
1112
1113err_nomem:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001114 kfree(win);
1115
1116 return ret;
1117}
1118
David Brazdil0f672f62019-12-10 10:32:29 +00001119static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1120{
1121 struct scatterlist *sg;
1122 int i;
1123
1124 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
1125 struct page *page = sg_page(sg);
1126
1127 page->mapping = NULL;
1128 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1129 sg_virt(sg), sg_dma_address(sg));
1130 }
1131 sg_free_table(win->sgt);
1132}
1133
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001134/**
1135 * msc_buffer_win_free() - free a window from MSC's window list
1136 * @msc: MSC device
1137 * @win: window to free
1138 *
1139 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1140 * to serialize, so the caller is expected to hold it.
1141 */
1142static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1143{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001144 msc->nr_pages -= win->nr_blocks;
1145
1146 list_del(&win->entry);
1147 if (list_empty(&msc->win_list)) {
1148 msc->base = NULL;
1149 msc->base_addr = 0;
1150 }
1151
David Brazdil0f672f62019-12-10 10:32:29 +00001152 msc_buffer_set_wb(win);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153
David Brazdil0f672f62019-12-10 10:32:29 +00001154 if (msc->mbuf && msc->mbuf->free_window)
1155 msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1156 else
1157 __msc_buffer_win_free(msc, win);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158
1159 kfree(win);
1160}
1161
1162/**
1163 * msc_buffer_relink() - set up block descriptors for multiblock mode
1164 * @msc: MSC device
1165 *
1166 * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
1167 * so the caller is expected to hold it.
1168 */
1169static void msc_buffer_relink(struct msc *msc)
1170{
1171 struct msc_window *win, *next_win;
1172
1173 /* call with msc::mutex locked */
1174 list_for_each_entry(win, &msc->win_list, entry) {
David Brazdil0f672f62019-12-10 10:32:29 +00001175 struct scatterlist *sg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176 unsigned int blk;
1177 u32 sw_tag = 0;
1178
1179 /*
1180 * Last window's next_win should point to the first window
1181 * and MSC_SW_TAG_LASTWIN should be set.
1182 */
1183 if (msc_is_last_win(win)) {
1184 sw_tag |= MSC_SW_TAG_LASTWIN;
David Brazdil0f672f62019-12-10 10:32:29 +00001185 next_win = list_first_entry(&msc->win_list,
1186 struct msc_window, entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001187 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001188 next_win = list_next_entry(win, entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001189 }
1190
David Brazdil0f672f62019-12-10 10:32:29 +00001191 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1192 struct msc_block_desc *bdesc = sg_virt(sg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001193
1194 memset(bdesc, 0, sizeof(*bdesc));
1195
David Brazdil0f672f62019-12-10 10:32:29 +00001196 bdesc->next_win = msc_win_base_pfn(next_win);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001197
1198 /*
1199 * Similarly to last window, last block should point
1200 * to the first one.
1201 */
David Brazdil0f672f62019-12-10 10:32:29 +00001202 if (blk == win->nr_segs - 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001203 sw_tag |= MSC_SW_TAG_LASTBLK;
David Brazdil0f672f62019-12-10 10:32:29 +00001204 bdesc->next_blk = msc_win_base_pfn(win);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001206 dma_addr_t addr = sg_dma_address(sg_next(sg));
1207
1208 bdesc->next_blk = PFN_DOWN(addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001209 }
1210
1211 bdesc->sw_tag = sw_tag;
David Brazdil0f672f62019-12-10 10:32:29 +00001212 bdesc->block_sz = sg->length / 64;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001213 }
1214 }
1215
1216 /*
1217 * Make the above writes globally visible before tracing is
1218 * enabled to make sure hardware sees them coherently.
1219 */
1220 wmb();
1221}
1222
1223static void msc_buffer_multi_free(struct msc *msc)
1224{
1225 struct msc_window *win, *iter;
1226
1227 list_for_each_entry_safe(win, iter, &msc->win_list, entry)
1228 msc_buffer_win_free(msc, win);
1229}
1230
1231static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1232 unsigned int nr_wins)
1233{
1234 int ret, i;
1235
1236 for (i = 0; i < nr_wins; i++) {
1237 ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1238 if (ret) {
1239 msc_buffer_multi_free(msc);
1240 return ret;
1241 }
1242 }
1243
1244 msc_buffer_relink(msc);
1245
1246 return 0;
1247}
1248
1249/**
1250 * msc_buffer_free() - free buffers for MSC
1251 * @msc: MSC device
1252 *
1253 * Free MSC's storage buffers.
1254 *
1255 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1256 * serialize, so the caller is expected to hold it.
1257 */
1258static void msc_buffer_free(struct msc *msc)
1259{
1260 if (msc->mode == MSC_MODE_SINGLE)
1261 msc_buffer_contig_free(msc);
1262 else if (msc->mode == MSC_MODE_MULTI)
1263 msc_buffer_multi_free(msc);
1264}
1265
1266/**
1267 * msc_buffer_alloc() - allocate a buffer for MSC
1268 * @msc: MSC device
1269 * @size: allocation size in bytes
1270 *
1271 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1272 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1273 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1274 * window per invocation, so in multiblock mode this can be called multiple
1275 * times for the same MSC to allocate multiple windows.
1276 *
1277 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1278 * to serialize, so the caller is expected to hold it.
1279 *
1280 * Return: 0 on success, -errno otherwise.
1281 */
1282static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1283 unsigned int nr_wins)
1284{
1285 int ret;
1286
1287 /* -1: buffer not allocated */
1288 if (atomic_read(&msc->user_count) != -1)
1289 return -EBUSY;
1290
1291 if (msc->mode == MSC_MODE_SINGLE) {
1292 if (nr_wins != 1)
1293 return -EINVAL;
1294
1295 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1296 } else if (msc->mode == MSC_MODE_MULTI) {
1297 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1298 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02001299 ret = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300 }
1301
1302 if (!ret) {
1303 /* allocation should be visible before the counter goes to 0 */
1304 smp_mb__before_atomic();
1305
1306 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1307 return -EINVAL;
1308 }
1309
1310 return ret;
1311}
1312
1313/**
1314 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1315 * @msc: MSC device
1316 *
1317 * This will free MSC buffer unless it is in use or there is no allocated
1318 * buffer.
1319 * Caller needs to hold msc::buf_mutex.
1320 *
1321 * Return: 0 on successful deallocation or if there was no buffer to
1322 * deallocate, -EBUSY if there are active users.
1323 */
1324static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1325{
1326 int count, ret = 0;
1327
1328 count = atomic_cmpxchg(&msc->user_count, 0, -1);
1329
1330 /* > 0: buffer is allocated and has users */
1331 if (count > 0)
1332 ret = -EBUSY;
1333 /* 0: buffer is allocated, no users */
1334 else if (!count)
1335 msc_buffer_free(msc);
1336 /* < 0: no buffer, nothing to do */
1337
1338 return ret;
1339}
1340
1341/**
1342 * msc_buffer_free_unless_used() - free a buffer unless it's in use
1343 * @msc: MSC device
1344 *
1345 * This is a locked version of msc_buffer_unlocked_free_unless_used().
1346 */
1347static int msc_buffer_free_unless_used(struct msc *msc)
1348{
1349 int ret;
1350
1351 mutex_lock(&msc->buf_mutex);
1352 ret = msc_buffer_unlocked_free_unless_used(msc);
1353 mutex_unlock(&msc->buf_mutex);
1354
1355 return ret;
1356}
1357
1358/**
1359 * msc_buffer_get_page() - get MSC buffer page at a given offset
1360 * @msc: MSC device
1361 * @pgoff: page offset into the storage buffer
1362 *
1363 * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1364 * the caller.
1365 *
1366 * Return: page if @pgoff corresponds to a valid buffer page or NULL.
1367 */
1368static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1369{
1370 struct msc_window *win;
David Brazdil0f672f62019-12-10 10:32:29 +00001371 struct scatterlist *sg;
1372 unsigned int blk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001373
1374 if (msc->mode == MSC_MODE_SINGLE)
1375 return msc_buffer_contig_get_page(msc, pgoff);
1376
1377 list_for_each_entry(win, &msc->win_list, entry)
1378 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1379 goto found;
1380
1381 return NULL;
1382
1383found:
1384 pgoff -= win->pgoff;
David Brazdil0f672f62019-12-10 10:32:29 +00001385
1386 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1387 struct page *page = sg_page(sg);
1388 size_t pgsz = PFN_DOWN(sg->length);
1389
1390 if (pgoff < pgsz)
1391 return page + pgoff;
1392
1393 pgoff -= pgsz;
1394 }
1395
1396 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001397}
1398
1399/**
1400 * struct msc_win_to_user_struct - data for copy_to_user() callback
1401 * @buf: userspace buffer to copy data to
1402 * @offset: running offset
1403 */
1404struct msc_win_to_user_struct {
1405 char __user *buf;
1406 unsigned long offset;
1407};
1408
1409/**
1410 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1411 * @data: callback's private data
1412 * @src: source buffer
1413 * @len: amount of data to copy from the source buffer
1414 */
1415static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1416{
1417 struct msc_win_to_user_struct *u = data;
1418 unsigned long ret;
1419
1420 ret = copy_to_user(u->buf + u->offset, src, len);
1421 u->offset += len - ret;
1422
1423 return ret;
1424}
1425
1426
1427/*
1428 * file operations' callbacks
1429 */
1430
1431static int intel_th_msc_open(struct inode *inode, struct file *file)
1432{
1433 struct intel_th_device *thdev = file->private_data;
1434 struct msc *msc = dev_get_drvdata(&thdev->dev);
1435 struct msc_iter *iter;
1436
1437 if (!capable(CAP_SYS_RAWIO))
1438 return -EPERM;
1439
1440 iter = msc_iter_install(msc);
1441 if (IS_ERR(iter))
1442 return PTR_ERR(iter);
1443
1444 file->private_data = iter;
1445
1446 return nonseekable_open(inode, file);
1447}
1448
1449static int intel_th_msc_release(struct inode *inode, struct file *file)
1450{
1451 struct msc_iter *iter = file->private_data;
1452 struct msc *msc = iter->msc;
1453
1454 msc_iter_remove(iter, msc);
1455
1456 return 0;
1457}
1458
1459static ssize_t
1460msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1461{
1462 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1463 unsigned long start = off, tocopy = 0;
1464
1465 if (msc->single_wrap) {
1466 start += msc->single_sz;
1467 if (start < size) {
1468 tocopy = min(rem, size - start);
1469 if (copy_to_user(buf, msc->base + start, tocopy))
1470 return -EFAULT;
1471
1472 buf += tocopy;
1473 rem -= tocopy;
1474 start += tocopy;
1475 }
1476
1477 start &= size - 1;
1478 if (rem) {
1479 tocopy = min(rem, msc->single_sz - start);
1480 if (copy_to_user(buf, msc->base + start, tocopy))
1481 return -EFAULT;
1482
1483 rem -= tocopy;
1484 }
1485
1486 return len - rem;
1487 }
1488
1489 if (copy_to_user(buf, msc->base + start, rem))
1490 return -EFAULT;
1491
1492 return len;
1493}
1494
1495static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1496 size_t len, loff_t *ppos)
1497{
1498 struct msc_iter *iter = file->private_data;
1499 struct msc *msc = iter->msc;
1500 size_t size;
1501 loff_t off = *ppos;
1502 ssize_t ret = 0;
1503
1504 if (!atomic_inc_unless_negative(&msc->user_count))
1505 return 0;
1506
1507 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1508 size = msc->single_sz;
1509 else
1510 size = msc->nr_pages << PAGE_SHIFT;
1511
1512 if (!size)
1513 goto put_count;
1514
1515 if (off >= size)
1516 goto put_count;
1517
1518 if (off + len >= size)
1519 len = size - off;
1520
1521 if (msc->mode == MSC_MODE_SINGLE) {
1522 ret = msc_single_to_user(msc, buf, off, len);
1523 if (ret >= 0)
1524 *ppos += ret;
1525 } else if (msc->mode == MSC_MODE_MULTI) {
1526 struct msc_win_to_user_struct u = {
1527 .buf = buf,
1528 .offset = 0,
1529 };
1530
1531 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1532 if (ret >= 0)
1533 *ppos = iter->offset;
1534 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02001535 ret = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001536 }
1537
1538put_count:
1539 atomic_dec(&msc->user_count);
1540
1541 return ret;
1542}
1543
1544/*
1545 * vm operations callbacks (vm_ops)
1546 */
1547
1548static void msc_mmap_open(struct vm_area_struct *vma)
1549{
1550 struct msc_iter *iter = vma->vm_file->private_data;
1551 struct msc *msc = iter->msc;
1552
1553 atomic_inc(&msc->mmap_count);
1554}
1555
1556static void msc_mmap_close(struct vm_area_struct *vma)
1557{
1558 struct msc_iter *iter = vma->vm_file->private_data;
1559 struct msc *msc = iter->msc;
1560 unsigned long pg;
1561
1562 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1563 return;
1564
1565 /* drop page _refcounts */
1566 for (pg = 0; pg < msc->nr_pages; pg++) {
1567 struct page *page = msc_buffer_get_page(msc, pg);
1568
1569 if (WARN_ON_ONCE(!page))
1570 continue;
1571
1572 if (page->mapping)
1573 page->mapping = NULL;
1574 }
1575
1576 /* last mapping -- drop user_count */
1577 atomic_dec(&msc->user_count);
1578 mutex_unlock(&msc->buf_mutex);
1579}
1580
1581static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1582{
1583 struct msc_iter *iter = vmf->vma->vm_file->private_data;
1584 struct msc *msc = iter->msc;
1585
1586 vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1587 if (!vmf->page)
1588 return VM_FAULT_SIGBUS;
1589
1590 get_page(vmf->page);
1591 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1592 vmf->page->index = vmf->pgoff;
1593
1594 return 0;
1595}
1596
1597static const struct vm_operations_struct msc_mmap_ops = {
1598 .open = msc_mmap_open,
1599 .close = msc_mmap_close,
1600 .fault = msc_mmap_fault,
1601};
1602
1603static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1604{
1605 unsigned long size = vma->vm_end - vma->vm_start;
1606 struct msc_iter *iter = vma->vm_file->private_data;
1607 struct msc *msc = iter->msc;
1608 int ret = -EINVAL;
1609
1610 if (!size || offset_in_page(size))
1611 return -EINVAL;
1612
1613 if (vma->vm_pgoff)
1614 return -EINVAL;
1615
1616 /* grab user_count once per mmap; drop in msc_mmap_close() */
1617 if (!atomic_inc_unless_negative(&msc->user_count))
1618 return -EINVAL;
1619
1620 if (msc->mode != MSC_MODE_SINGLE &&
1621 msc->mode != MSC_MODE_MULTI)
1622 goto out;
1623
1624 if (size >> PAGE_SHIFT != msc->nr_pages)
1625 goto out;
1626
1627 atomic_set(&msc->mmap_count, 1);
1628 ret = 0;
1629
1630out:
1631 if (ret)
1632 atomic_dec(&msc->user_count);
1633
1634 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1635 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1636 vma->vm_ops = &msc_mmap_ops;
1637 return ret;
1638}
1639
1640static const struct file_operations intel_th_msc_fops = {
1641 .open = intel_th_msc_open,
1642 .release = intel_th_msc_release,
1643 .read = intel_th_msc_read,
1644 .mmap = intel_th_msc_mmap,
1645 .llseek = no_llseek,
1646 .owner = THIS_MODULE,
1647};
1648
David Brazdil0f672f62019-12-10 10:32:29 +00001649static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1650{
1651 struct msc *msc = dev_get_drvdata(&thdev->dev);
1652 unsigned long count;
1653 u32 reg;
1654
1655 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1656 count && !(reg & MSCSTS_PLE); count--) {
1657 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1658 cpu_relax();
1659 }
1660
1661 if (!count)
1662 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1663}
1664
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001665static int intel_th_msc_init(struct msc *msc)
1666{
1667 atomic_set(&msc->user_count, -1);
1668
Olivier Deprez0e641232021-09-23 10:07:05 +02001669 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001670 mutex_init(&msc->buf_mutex);
1671 INIT_LIST_HEAD(&msc->win_list);
1672 INIT_LIST_HEAD(&msc->iter_list);
1673
1674 msc->burst_len =
1675 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1676 __ffs(MSC_LEN);
1677
1678 return 0;
1679}
1680
Olivier Deprez0e641232021-09-23 10:07:05 +02001681static int msc_win_switch(struct msc *msc)
David Brazdil0f672f62019-12-10 10:32:29 +00001682{
1683 struct msc_window *first;
1684
Olivier Deprez0e641232021-09-23 10:07:05 +02001685 if (list_empty(&msc->win_list))
1686 return -EINVAL;
1687
David Brazdil0f672f62019-12-10 10:32:29 +00001688 first = list_first_entry(&msc->win_list, struct msc_window, entry);
1689
1690 if (msc_is_last_win(msc->cur_win))
1691 msc->cur_win = first;
1692 else
1693 msc->cur_win = list_next_entry(msc->cur_win, entry);
1694
1695 msc->base = msc_win_base(msc->cur_win);
1696 msc->base_addr = msc_win_base_dma(msc->cur_win);
1697
1698 intel_th_trace_switch(msc->thdev);
Olivier Deprez0e641232021-09-23 10:07:05 +02001699
1700 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001701}
1702
1703/**
1704 * intel_th_msc_window_unlock - put the window back in rotation
1705 * @dev: MSC device to which this relates
1706 * @sgt: buffer's sg_table for the window, does nothing if NULL
1707 */
1708void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1709{
1710 struct msc *msc = dev_get_drvdata(dev);
1711 struct msc_window *win;
1712
1713 if (!sgt)
1714 return;
1715
1716 win = msc_find_window(msc, sgt, false);
1717 if (!win)
1718 return;
1719
1720 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1721}
1722EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1723
1724static void msc_work(struct work_struct *work)
1725{
1726 struct msc *msc = container_of(work, struct msc, work);
1727
1728 intel_th_msc_deactivate(msc->thdev);
1729}
1730
1731static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1732{
1733 struct msc *msc = dev_get_drvdata(&thdev->dev);
1734 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1735 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1736 struct msc_window *win, *next_win;
1737
1738 if (!msc->do_irq || !msc->mbuf)
1739 return IRQ_NONE;
1740
1741 msusts &= mask;
1742
1743 if (!msusts)
1744 return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1745
1746 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1747
1748 if (!msc->enabled)
1749 return IRQ_NONE;
1750
1751 /* grab the window before we do the switch */
1752 win = msc->cur_win;
1753 if (!win)
1754 return IRQ_HANDLED;
1755 next_win = msc_next_window(win);
1756 if (!next_win)
1757 return IRQ_HANDLED;
1758
1759 /* next window: if READY, proceed, if LOCKED, stop the trace */
1760 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1761 schedule_work(&msc->work);
1762 return IRQ_HANDLED;
1763 }
1764
1765 /* current window: INUSE -> LOCKED */
1766 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1767
1768 msc_win_switch(msc);
1769
1770 if (msc->mbuf && msc->mbuf->ready)
1771 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1772 msc_win_total_sz(win));
1773
1774 return IRQ_HANDLED;
1775}
1776
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001777static const char * const msc_mode[] = {
1778 [MSC_MODE_SINGLE] = "single",
1779 [MSC_MODE_MULTI] = "multi",
1780 [MSC_MODE_EXI] = "ExI",
1781 [MSC_MODE_DEBUG] = "debug",
1782};
1783
1784static ssize_t
1785wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1786{
1787 struct msc *msc = dev_get_drvdata(dev);
1788
1789 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1790}
1791
1792static ssize_t
1793wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1794 size_t size)
1795{
1796 struct msc *msc = dev_get_drvdata(dev);
1797 unsigned long val;
1798 int ret;
1799
1800 ret = kstrtoul(buf, 10, &val);
1801 if (ret)
1802 return ret;
1803
1804 msc->wrap = !!val;
1805
1806 return size;
1807}
1808
1809static DEVICE_ATTR_RW(wrap);
1810
David Brazdil0f672f62019-12-10 10:32:29 +00001811static void msc_buffer_unassign(struct msc *msc)
1812{
1813 lockdep_assert_held(&msc->buf_mutex);
1814
1815 if (!msc->mbuf)
1816 return;
1817
1818 msc->mbuf->unassign(msc->mbuf_priv);
1819 msu_buffer_put(msc->mbuf);
1820 msc->mbuf_priv = NULL;
1821 msc->mbuf = NULL;
1822}
1823
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001824static ssize_t
1825mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1826{
1827 struct msc *msc = dev_get_drvdata(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001828 const char *mode = msc_mode[msc->mode];
1829 ssize_t ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001830
David Brazdil0f672f62019-12-10 10:32:29 +00001831 mutex_lock(&msc->buf_mutex);
1832 if (msc->mbuf)
1833 mode = msc->mbuf->name;
1834 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1835 mutex_unlock(&msc->buf_mutex);
1836
1837 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001838}
1839
1840static ssize_t
1841mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1842 size_t size)
1843{
David Brazdil0f672f62019-12-10 10:32:29 +00001844 const struct msu_buffer *mbuf = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001845 struct msc *msc = dev_get_drvdata(dev);
1846 size_t len = size;
David Brazdil0f672f62019-12-10 10:32:29 +00001847 char *cp, *mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001848 int i, ret;
1849
1850 if (!capable(CAP_SYS_RAWIO))
1851 return -EPERM;
1852
1853 cp = memchr(buf, '\n', len);
1854 if (cp)
1855 len = cp - buf;
1856
David Brazdil0f672f62019-12-10 10:32:29 +00001857 mode = kstrndup(buf, len, GFP_KERNEL);
1858 if (!mode)
1859 return -ENOMEM;
1860
1861 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1862 if (i >= 0) {
1863 kfree(mode);
1864 goto found;
1865 }
1866
1867 /* Buffer sinks only work with a usable IRQ */
1868 if (!msc->do_irq) {
1869 kfree(mode);
1870 return -EINVAL;
1871 }
1872
1873 mbuf = msu_buffer_get(mode);
1874 kfree(mode);
1875 if (mbuf)
1876 goto found;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001877
1878 return -EINVAL;
1879
1880found:
Olivier Deprez0e641232021-09-23 10:07:05 +02001881 if (i == MSC_MODE_MULTI && msc->multi_is_broken)
1882 return -EOPNOTSUPP;
1883
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001884 mutex_lock(&msc->buf_mutex);
David Brazdil0f672f62019-12-10 10:32:29 +00001885 ret = 0;
1886
1887 /* Same buffer: do nothing */
1888 if (mbuf && mbuf == msc->mbuf) {
1889 /* put the extra reference we just got */
1890 msu_buffer_put(mbuf);
1891 goto unlock;
1892 }
1893
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001894 ret = msc_buffer_unlocked_free_unless_used(msc);
David Brazdil0f672f62019-12-10 10:32:29 +00001895 if (ret)
1896 goto unlock;
1897
1898 if (mbuf) {
1899 void *mbuf_priv = mbuf->assign(dev, &i);
1900
1901 if (!mbuf_priv) {
1902 ret = -ENOMEM;
1903 goto unlock;
1904 }
1905
1906 msc_buffer_unassign(msc);
1907 msc->mbuf_priv = mbuf_priv;
1908 msc->mbuf = mbuf;
1909 } else {
1910 msc_buffer_unassign(msc);
1911 }
1912
1913 msc->mode = i;
1914
1915unlock:
1916 if (ret && mbuf)
1917 msu_buffer_put(mbuf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001918 mutex_unlock(&msc->buf_mutex);
1919
1920 return ret ? ret : size;
1921}
1922
1923static DEVICE_ATTR_RW(mode);
1924
1925static ssize_t
1926nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1927{
1928 struct msc *msc = dev_get_drvdata(dev);
1929 struct msc_window *win;
1930 size_t count = 0;
1931
1932 mutex_lock(&msc->buf_mutex);
1933
1934 if (msc->mode == MSC_MODE_SINGLE)
1935 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1936 else if (msc->mode == MSC_MODE_MULTI) {
1937 list_for_each_entry(win, &msc->win_list, entry) {
1938 count += scnprintf(buf + count, PAGE_SIZE - count,
1939 "%d%c", win->nr_blocks,
1940 msc_is_last_win(win) ? '\n' : ',');
1941 }
1942 } else {
1943 count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1944 }
1945
1946 mutex_unlock(&msc->buf_mutex);
1947
1948 return count;
1949}
1950
1951static ssize_t
1952nr_pages_store(struct device *dev, struct device_attribute *attr,
1953 const char *buf, size_t size)
1954{
1955 struct msc *msc = dev_get_drvdata(dev);
1956 unsigned long val, *win = NULL, *rewin;
1957 size_t len = size;
1958 const char *p = buf;
1959 char *end, *s;
1960 int ret, nr_wins = 0;
1961
1962 if (!capable(CAP_SYS_RAWIO))
1963 return -EPERM;
1964
1965 ret = msc_buffer_free_unless_used(msc);
1966 if (ret)
1967 return ret;
1968
1969 /* scan the comma-separated list of allocation sizes */
1970 end = memchr(buf, '\n', len);
1971 if (end)
1972 len = end - buf;
1973
1974 do {
1975 end = memchr(p, ',', len);
1976 s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1977 if (!s) {
1978 ret = -ENOMEM;
1979 goto free_win;
1980 }
1981
1982 ret = kstrtoul(s, 10, &val);
1983 kfree(s);
1984
1985 if (ret || !val)
1986 goto free_win;
1987
1988 if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1989 ret = -EINVAL;
1990 goto free_win;
1991 }
1992
1993 nr_wins++;
1994 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1995 if (!rewin) {
1996 kfree(win);
1997 return -ENOMEM;
1998 }
1999
2000 win = rewin;
2001 win[nr_wins - 1] = val;
2002
2003 if (!end)
2004 break;
2005
David Brazdil0f672f62019-12-10 10:32:29 +00002006 /* consume the number and the following comma, hence +1 */
2007 len -= end - p + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002008 p = end + 1;
2009 } while (len);
2010
2011 mutex_lock(&msc->buf_mutex);
2012 ret = msc_buffer_alloc(msc, win, nr_wins);
2013 mutex_unlock(&msc->buf_mutex);
2014
2015free_win:
2016 kfree(win);
2017
2018 return ret ? ret : size;
2019}
2020
2021static DEVICE_ATTR_RW(nr_pages);
2022
David Brazdil0f672f62019-12-10 10:32:29 +00002023static ssize_t
2024win_switch_store(struct device *dev, struct device_attribute *attr,
2025 const char *buf, size_t size)
2026{
2027 struct msc *msc = dev_get_drvdata(dev);
2028 unsigned long val;
2029 int ret;
2030
2031 ret = kstrtoul(buf, 10, &val);
2032 if (ret)
2033 return ret;
2034
2035 if (val != 1)
2036 return -EINVAL;
2037
Olivier Deprez0e641232021-09-23 10:07:05 +02002038 ret = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00002039 mutex_lock(&msc->buf_mutex);
2040 /*
2041 * Window switch can only happen in the "multi" mode.
2042 * If a external buffer is engaged, they have the full
2043 * control over window switching.
2044 */
Olivier Deprez0e641232021-09-23 10:07:05 +02002045 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2046 ret = msc_win_switch(msc);
David Brazdil0f672f62019-12-10 10:32:29 +00002047 mutex_unlock(&msc->buf_mutex);
2048
2049 return ret ? ret : size;
2050}
2051
2052static DEVICE_ATTR_WO(win_switch);
2053
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002054static struct attribute *msc_output_attrs[] = {
2055 &dev_attr_wrap.attr,
2056 &dev_attr_mode.attr,
2057 &dev_attr_nr_pages.attr,
David Brazdil0f672f62019-12-10 10:32:29 +00002058 &dev_attr_win_switch.attr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002059 NULL,
2060};
2061
2062static struct attribute_group msc_output_group = {
2063 .attrs = msc_output_attrs,
2064};
2065
2066static int intel_th_msc_probe(struct intel_th_device *thdev)
2067{
2068 struct device *dev = &thdev->dev;
2069 struct resource *res;
2070 struct msc *msc;
2071 void __iomem *base;
2072 int err;
2073
2074 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
2075 if (!res)
2076 return -ENODEV;
2077
2078 base = devm_ioremap(dev, res->start, resource_size(res));
2079 if (!base)
2080 return -ENOMEM;
2081
2082 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
2083 if (!msc)
2084 return -ENOMEM;
2085
David Brazdil0f672f62019-12-10 10:32:29 +00002086 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
2087 if (!res)
2088 msc->do_irq = 1;
2089
Olivier Deprez0e641232021-09-23 10:07:05 +02002090 if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
2091 msc->multi_is_broken = 1;
2092
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002093 msc->index = thdev->id;
2094
2095 msc->thdev = thdev;
2096 msc->reg_base = base + msc->index * 0x100;
David Brazdil0f672f62019-12-10 10:32:29 +00002097 msc->msu_base = base;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002098
David Brazdil0f672f62019-12-10 10:32:29 +00002099 INIT_WORK(&msc->work, msc_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002100 err = intel_th_msc_init(msc);
2101 if (err)
2102 return err;
2103
2104 dev_set_drvdata(dev, msc);
2105
2106 return 0;
2107}
2108
2109static void intel_th_msc_remove(struct intel_th_device *thdev)
2110{
2111 struct msc *msc = dev_get_drvdata(&thdev->dev);
2112 int ret;
2113
2114 intel_th_msc_deactivate(thdev);
2115
2116 /*
2117 * Buffers should not be used at this point except if the
2118 * output character device is still open and the parent
2119 * device gets detached from its bus, which is a FIXME.
2120 */
2121 ret = msc_buffer_free_unless_used(msc);
2122 WARN_ON_ONCE(ret);
2123}
2124
2125static struct intel_th_driver intel_th_msc_driver = {
2126 .probe = intel_th_msc_probe,
2127 .remove = intel_th_msc_remove,
David Brazdil0f672f62019-12-10 10:32:29 +00002128 .irq = intel_th_msc_interrupt,
2129 .wait_empty = intel_th_msc_wait_empty,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002130 .activate = intel_th_msc_activate,
2131 .deactivate = intel_th_msc_deactivate,
2132 .fops = &intel_th_msc_fops,
2133 .attr_group = &msc_output_group,
2134 .driver = {
2135 .name = "msc",
2136 .owner = THIS_MODULE,
2137 },
2138};
2139
2140module_driver(intel_th_msc_driver,
2141 intel_th_driver_register,
2142 intel_th_driver_unregister);
2143
2144MODULE_LICENSE("GPL v2");
2145MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
2146MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");