blob: dab2af3f2c4f7a79435fee11cfec4a1f53fbca24 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2013 Xenia Ragiadakou
6 *
7 * Author: Xenia Ragiadakou
8 * Email : burzalodowa@gmail.com
9 */
10
11#undef TRACE_SYSTEM
12#define TRACE_SYSTEM xhci-hcd
13
14/*
15 * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
16 * legitimate C variable. It is not exported to user space.
17 */
18#undef TRACE_SYSTEM_VAR
19#define TRACE_SYSTEM_VAR xhci_hcd
20
21#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
22#define __XHCI_TRACE_H
23
24#include <linux/tracepoint.h>
25#include "xhci.h"
26#include "xhci-dbgcap.h"
27
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028DECLARE_EVENT_CLASS(xhci_log_msg,
29 TP_PROTO(struct va_format *vaf),
30 TP_ARGS(vaf),
31 TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
32 TP_fast_assign(
33 vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
34 ),
35 TP_printk("%s", __get_str(msg))
36);
37
38DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
39 TP_PROTO(struct va_format *vaf),
40 TP_ARGS(vaf)
41);
42
43DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
44 TP_PROTO(struct va_format *vaf),
45 TP_ARGS(vaf)
46);
47
48DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
49 TP_PROTO(struct va_format *vaf),
50 TP_ARGS(vaf)
51);
52
53DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
54 TP_PROTO(struct va_format *vaf),
55 TP_ARGS(vaf)
56);
57
58DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
59 TP_PROTO(struct va_format *vaf),
60 TP_ARGS(vaf)
61);
62
63DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
64 TP_PROTO(struct va_format *vaf),
65 TP_ARGS(vaf)
66);
67
68DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
69 TP_PROTO(struct va_format *vaf),
70 TP_ARGS(vaf)
71);
72
73DECLARE_EVENT_CLASS(xhci_log_ctx,
74 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
75 unsigned int ep_num),
76 TP_ARGS(xhci, ctx, ep_num),
77 TP_STRUCT__entry(
78 __field(int, ctx_64)
79 __field(unsigned, ctx_type)
80 __field(dma_addr_t, ctx_dma)
81 __field(u8 *, ctx_va)
82 __field(unsigned, ctx_ep_num)
83 __field(int, slot_id)
84 __dynamic_array(u32, ctx_data,
85 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
86 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
87 ),
88 TP_fast_assign(
89 struct usb_device *udev;
90
91 udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
92 __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
93 __entry->ctx_type = ctx->type;
94 __entry->ctx_dma = ctx->dma;
95 __entry->ctx_va = ctx->bytes;
96 __entry->slot_id = udev->slot_id;
97 __entry->ctx_ep_num = ep_num;
98 memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
99 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
100 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
101 ),
102 TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
103 __entry->ctx_64, __entry->ctx_type,
104 (unsigned long long) __entry->ctx_dma, __entry->ctx_va
105 )
106);
107
108DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
109 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
110 unsigned int ep_num),
111 TP_ARGS(xhci, ctx, ep_num)
112);
113
114DECLARE_EVENT_CLASS(xhci_log_trb,
115 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
116 TP_ARGS(ring, trb),
117 TP_STRUCT__entry(
118 __field(u32, type)
119 __field(u32, field0)
120 __field(u32, field1)
121 __field(u32, field2)
122 __field(u32, field3)
Olivier Deprez0e641232021-09-23 10:07:05 +0200123 __dynamic_array(char, str, XHCI_MSG_MAX)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124 ),
125 TP_fast_assign(
126 __entry->type = ring->type;
127 __entry->field0 = le32_to_cpu(trb->field[0]);
128 __entry->field1 = le32_to_cpu(trb->field[1]);
129 __entry->field2 = le32_to_cpu(trb->field[2]);
130 __entry->field3 = le32_to_cpu(trb->field[3]);
131 ),
132 TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
Olivier Deprez0e641232021-09-23 10:07:05 +0200133 xhci_decode_trb(__get_str(str), XHCI_MSG_MAX, __entry->field0, __entry->field1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 __entry->field2, __entry->field3)
135 )
136);
137
138DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
139 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
140 TP_ARGS(ring, trb)
141);
142
143DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
144 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
145 TP_ARGS(ring, trb)
146);
147
148DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
149 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
150 TP_ARGS(ring, trb)
151);
152
153DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
154 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
155 TP_ARGS(ring, trb)
156);
157
158DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
159 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
160 TP_ARGS(ring, trb)
161);
162
163DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
164 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
165 TP_ARGS(ring, trb)
166);
167
168DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
169 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
170 TP_ARGS(ring, trb)
171);
172
173DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
174 TP_PROTO(struct xhci_virt_device *vdev),
175 TP_ARGS(vdev),
176 TP_STRUCT__entry(
177 __field(void *, vdev)
178 __field(unsigned long long, out_ctx)
179 __field(unsigned long long, in_ctx)
180 __field(u8, fake_port)
181 __field(u8, real_port)
182 __field(u16, current_mel)
183
184 ),
185 TP_fast_assign(
186 __entry->vdev = vdev;
187 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
188 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
189 __entry->fake_port = (u8) vdev->fake_port;
190 __entry->real_port = (u8) vdev->real_port;
191 __entry->current_mel = (u16) vdev->current_mel;
192 ),
193 TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
194 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
195 __entry->fake_port, __entry->real_port, __entry->current_mel
196 )
197);
198
199DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
200 TP_PROTO(struct xhci_virt_device *vdev),
201 TP_ARGS(vdev)
202);
203
204DECLARE_EVENT_CLASS(xhci_log_virt_dev,
205 TP_PROTO(struct xhci_virt_device *vdev),
206 TP_ARGS(vdev),
207 TP_STRUCT__entry(
208 __field(void *, vdev)
209 __field(unsigned long long, out_ctx)
210 __field(unsigned long long, in_ctx)
211 __field(int, devnum)
212 __field(int, state)
213 __field(int, speed)
214 __field(u8, portnum)
215 __field(u8, level)
216 __field(int, slot_id)
217 ),
218 TP_fast_assign(
219 __entry->vdev = vdev;
220 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
221 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
222 __entry->devnum = vdev->udev->devnum;
223 __entry->state = vdev->udev->state;
224 __entry->speed = vdev->udev->speed;
225 __entry->portnum = vdev->udev->portnum;
226 __entry->level = vdev->udev->level;
227 __entry->slot_id = vdev->udev->slot_id;
228 ),
229 TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
230 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
231 __entry->devnum, __entry->state, __entry->speed,
232 __entry->portnum, __entry->level, __entry->slot_id
233 )
234);
235
236DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
237 TP_PROTO(struct xhci_virt_device *vdev),
238 TP_ARGS(vdev)
239);
240
241DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
242 TP_PROTO(struct xhci_virt_device *vdev),
243 TP_ARGS(vdev)
244);
245
246DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
247 TP_PROTO(struct xhci_virt_device *vdev),
248 TP_ARGS(vdev)
249);
250
251DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
252 TP_PROTO(struct xhci_virt_device *vdev),
253 TP_ARGS(vdev)
254);
255
256DECLARE_EVENT_CLASS(xhci_log_urb,
257 TP_PROTO(struct urb *urb),
258 TP_ARGS(urb),
259 TP_STRUCT__entry(
260 __field(void *, urb)
261 __field(unsigned int, pipe)
262 __field(unsigned int, stream)
263 __field(int, status)
264 __field(unsigned int, flags)
265 __field(int, num_mapped_sgs)
266 __field(int, num_sgs)
267 __field(int, length)
268 __field(int, actual)
269 __field(int, epnum)
270 __field(int, dir_in)
271 __field(int, type)
272 __field(int, slot_id)
273 ),
274 TP_fast_assign(
275 __entry->urb = urb;
276 __entry->pipe = urb->pipe;
277 __entry->stream = urb->stream_id;
278 __entry->status = urb->status;
279 __entry->flags = urb->transfer_flags;
280 __entry->num_mapped_sgs = urb->num_mapped_sgs;
281 __entry->num_sgs = urb->num_sgs;
282 __entry->length = urb->transfer_buffer_length;
283 __entry->actual = urb->actual_length;
284 __entry->epnum = usb_endpoint_num(&urb->ep->desc);
285 __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
286 __entry->type = usb_endpoint_type(&urb->ep->desc);
287 __entry->slot_id = urb->dev->slot_id;
288 ),
289 TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
290 __entry->epnum, __entry->dir_in ? "in" : "out",
Olivier Deprez0e641232021-09-23 10:07:05 +0200291 __print_symbolic(__entry->type,
292 { USB_ENDPOINT_XFER_INT, "intr" },
293 { USB_ENDPOINT_XFER_CONTROL, "control" },
294 { USB_ENDPOINT_XFER_BULK, "bulk" },
295 { USB_ENDPOINT_XFER_ISOC, "isoc" }),
296 __entry->urb, __entry->pipe, __entry->slot_id,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297 __entry->actual, __entry->length, __entry->num_mapped_sgs,
298 __entry->num_sgs, __entry->stream, __entry->flags
299 )
300);
301
302DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
303 TP_PROTO(struct urb *urb),
304 TP_ARGS(urb)
305);
306
307DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
308 TP_PROTO(struct urb *urb),
309 TP_ARGS(urb)
310);
311
312DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
313 TP_PROTO(struct urb *urb),
314 TP_ARGS(urb)
315);
316
317DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
318 TP_PROTO(struct xhci_ep_ctx *ctx),
319 TP_ARGS(ctx),
320 TP_STRUCT__entry(
321 __field(u32, info)
322 __field(u32, info2)
323 __field(u64, deq)
324 __field(u32, tx_info)
325 ),
326 TP_fast_assign(
327 __entry->info = le32_to_cpu(ctx->ep_info);
328 __entry->info2 = le32_to_cpu(ctx->ep_info2);
329 __entry->deq = le64_to_cpu(ctx->deq);
330 __entry->tx_info = le32_to_cpu(ctx->tx_info);
331 ),
332 TP_printk("%s", xhci_decode_ep_context(__entry->info,
333 __entry->info2, __entry->deq, __entry->tx_info)
334 )
335);
336
337DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
338 TP_PROTO(struct xhci_ep_ctx *ctx),
339 TP_ARGS(ctx)
340);
341
342DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
343 TP_PROTO(struct xhci_ep_ctx *ctx),
344 TP_ARGS(ctx)
345);
346
347DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
348 TP_PROTO(struct xhci_ep_ctx *ctx),
349 TP_ARGS(ctx)
350);
351
352DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
353 TP_PROTO(struct xhci_ep_ctx *ctx),
354 TP_ARGS(ctx)
355);
356
David Brazdil0f672f62019-12-10 10:32:29 +0000357DEFINE_EVENT(xhci_log_ep_ctx, xhci_add_endpoint,
358 TP_PROTO(struct xhci_ep_ctx *ctx),
359 TP_ARGS(ctx)
360);
361
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
363 TP_PROTO(struct xhci_slot_ctx *ctx),
364 TP_ARGS(ctx),
365 TP_STRUCT__entry(
366 __field(u32, info)
367 __field(u32, info2)
368 __field(u32, tt_info)
369 __field(u32, state)
370 ),
371 TP_fast_assign(
372 __entry->info = le32_to_cpu(ctx->dev_info);
373 __entry->info2 = le32_to_cpu(ctx->dev_info2);
374 __entry->tt_info = le64_to_cpu(ctx->tt_info);
375 __entry->state = le32_to_cpu(ctx->dev_state);
376 ),
377 TP_printk("%s", xhci_decode_slot_context(__entry->info,
378 __entry->info2, __entry->tt_info,
379 __entry->state)
380 )
381);
382
383DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
384 TP_PROTO(struct xhci_slot_ctx *ctx),
385 TP_ARGS(ctx)
386);
387
388DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
389 TP_PROTO(struct xhci_slot_ctx *ctx),
390 TP_ARGS(ctx)
391);
392
393DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
394 TP_PROTO(struct xhci_slot_ctx *ctx),
395 TP_ARGS(ctx)
396);
397
398DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
399 TP_PROTO(struct xhci_slot_ctx *ctx),
400 TP_ARGS(ctx)
401);
402
403DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
404 TP_PROTO(struct xhci_slot_ctx *ctx),
405 TP_ARGS(ctx)
406);
407
408DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
409 TP_PROTO(struct xhci_slot_ctx *ctx),
410 TP_ARGS(ctx)
411);
412
413DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
414 TP_PROTO(struct xhci_slot_ctx *ctx),
415 TP_ARGS(ctx)
416);
417
418DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
419 TP_PROTO(struct xhci_slot_ctx *ctx),
420 TP_ARGS(ctx)
421);
422
423DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
424 TP_PROTO(struct xhci_slot_ctx *ctx),
425 TP_ARGS(ctx)
426);
427
David Brazdil0f672f62019-12-10 10:32:29 +0000428DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx,
429 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
430 TP_ARGS(ctrl_ctx),
431 TP_STRUCT__entry(
432 __field(u32, drop)
433 __field(u32, add)
434 ),
435 TP_fast_assign(
436 __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags);
437 __entry->add = le32_to_cpu(ctrl_ctx->add_flags);
438 ),
439 TP_printk("%s", xhci_decode_ctrl_ctx(__entry->drop, __entry->add)
440 )
441);
442
443DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_address_ctrl_ctx,
444 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
445 TP_ARGS(ctrl_ctx)
446);
447
448DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_configure_endpoint_ctrl_ctx,
449 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx),
450 TP_ARGS(ctrl_ctx)
451);
452
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453DECLARE_EVENT_CLASS(xhci_log_ring,
454 TP_PROTO(struct xhci_ring *ring),
455 TP_ARGS(ring),
456 TP_STRUCT__entry(
457 __field(u32, type)
458 __field(void *, ring)
459 __field(dma_addr_t, enq)
460 __field(dma_addr_t, deq)
461 __field(dma_addr_t, enq_seg)
462 __field(dma_addr_t, deq_seg)
463 __field(unsigned int, num_segs)
464 __field(unsigned int, stream_id)
465 __field(unsigned int, cycle_state)
466 __field(unsigned int, num_trbs_free)
467 __field(unsigned int, bounce_buf_len)
468 ),
469 TP_fast_assign(
470 __entry->ring = ring;
471 __entry->type = ring->type;
472 __entry->num_segs = ring->num_segs;
473 __entry->stream_id = ring->stream_id;
474 __entry->enq_seg = ring->enq_seg->dma;
475 __entry->deq_seg = ring->deq_seg->dma;
476 __entry->cycle_state = ring->cycle_state;
477 __entry->num_trbs_free = ring->num_trbs_free;
478 __entry->bounce_buf_len = ring->bounce_buf_len;
479 __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
480 __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
481 ),
482 TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
483 xhci_ring_type_string(__entry->type), __entry->ring,
484 &__entry->enq, &__entry->enq_seg,
485 &__entry->deq, &__entry->deq_seg,
486 __entry->num_segs,
487 __entry->stream_id,
488 __entry->num_trbs_free,
489 __entry->bounce_buf_len,
490 __entry->cycle_state
491 )
492);
493
494DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
495 TP_PROTO(struct xhci_ring *ring),
496 TP_ARGS(ring)
497);
498
499DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
500 TP_PROTO(struct xhci_ring *ring),
501 TP_ARGS(ring)
502);
503
504DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
505 TP_PROTO(struct xhci_ring *ring),
506 TP_ARGS(ring)
507);
508
509DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
510 TP_PROTO(struct xhci_ring *ring),
511 TP_ARGS(ring)
512);
513
514DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
515 TP_PROTO(struct xhci_ring *ring),
516 TP_ARGS(ring)
517);
518
519DECLARE_EVENT_CLASS(xhci_log_portsc,
520 TP_PROTO(u32 portnum, u32 portsc),
521 TP_ARGS(portnum, portsc),
522 TP_STRUCT__entry(
523 __field(u32, portnum)
524 __field(u32, portsc)
Olivier Deprez0e641232021-09-23 10:07:05 +0200525 __dynamic_array(char, str, XHCI_MSG_MAX)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000526 ),
527 TP_fast_assign(
528 __entry->portnum = portnum;
529 __entry->portsc = portsc;
530 ),
531 TP_printk("port-%d: %s",
532 __entry->portnum,
Olivier Deprez0e641232021-09-23 10:07:05 +0200533 xhci_decode_portsc(__get_str(str), __entry->portsc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 )
535);
536
537DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
538 TP_PROTO(u32 portnum, u32 portsc),
539 TP_ARGS(portnum, portsc)
540);
541
542DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
543 TP_PROTO(u32 portnum, u32 portsc),
544 TP_ARGS(portnum, portsc)
545);
546
547DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
548 TP_PROTO(u32 portnum, u32 portsc),
549 TP_ARGS(portnum, portsc)
550);
551
552DECLARE_EVENT_CLASS(xhci_dbc_log_request,
553 TP_PROTO(struct dbc_request *req),
554 TP_ARGS(req),
555 TP_STRUCT__entry(
556 __field(struct dbc_request *, req)
557 __field(bool, dir)
558 __field(unsigned int, actual)
559 __field(unsigned int, length)
560 __field(int, status)
561 ),
562 TP_fast_assign(
563 __entry->req = req;
564 __entry->dir = req->direction;
565 __entry->actual = req->actual;
566 __entry->length = req->length;
567 __entry->status = req->status;
568 ),
569 TP_printk("%s: req %p length %u/%u ==> %d",
570 __entry->dir ? "bulk-in" : "bulk-out",
571 __entry->req, __entry->actual,
572 __entry->length, __entry->status
573 )
574);
575
576DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
577 TP_PROTO(struct dbc_request *req),
578 TP_ARGS(req)
579);
580
581DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
582 TP_PROTO(struct dbc_request *req),
583 TP_ARGS(req)
584);
585
586DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
587 TP_PROTO(struct dbc_request *req),
588 TP_ARGS(req)
589);
590
591DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
592 TP_PROTO(struct dbc_request *req),
593 TP_ARGS(req)
594);
595#endif /* __XHCI_TRACE_H */
596
597/* this part must be outside header guard */
598
599#undef TRACE_INCLUDE_PATH
600#define TRACE_INCLUDE_PATH .
601
602#undef TRACE_INCLUDE_FILE
603#define TRACE_INCLUDE_FILE xhci-trace
604
605#include <trace/define_trace.h>