Line data Source code
1 : #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2 : #define _I915_TRACE_H_
3 :
4 : #ifdef __linux__
5 : #include <linux/stringify.h>
6 : #include <linux/types.h>
7 : #include <linux/tracepoint.h>
8 : #endif
9 :
10 : #include <dev/pci/drm/drmP.h>
11 : #include "i915_drv.h"
12 : #include "intel_drv.h"
13 : #include "intel_ringbuffer.h"
14 :
15 : #undef TRACE_SYSTEM
16 : #define TRACE_SYSTEM i915
17 : #define TRACE_INCLUDE_FILE i915_trace
18 :
19 : /* pipe updates */
20 :
21 0 : TRACE_EVENT(i915_pipe_update_start,
22 : TP_PROTO(struct intel_crtc *crtc),
23 : TP_ARGS(crtc),
24 :
25 : TP_STRUCT__entry(
26 : __field(enum pipe, pipe)
27 : __field(u32, frame)
28 : __field(u32, scanline)
29 : __field(u32, min)
30 : __field(u32, max)
31 : ),
32 :
33 : TP_fast_assign(
34 : __entry->pipe = crtc->pipe;
35 : __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
36 : crtc->pipe);
37 : __entry->scanline = intel_get_crtc_scanline(crtc);
38 : __entry->min = crtc->debug.min_vbl;
39 : __entry->max = crtc->debug.max_vbl;
40 : ),
41 :
42 : TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
43 : pipe_name(__entry->pipe), __entry->frame,
44 : __entry->scanline, __entry->min, __entry->max)
45 : );
46 :
47 0 : TRACE_EVENT(i915_pipe_update_vblank_evaded,
48 : TP_PROTO(struct intel_crtc *crtc),
49 : TP_ARGS(crtc),
50 :
51 : TP_STRUCT__entry(
52 : __field(enum pipe, pipe)
53 : __field(u32, frame)
54 : __field(u32, scanline)
55 : __field(u32, min)
56 : __field(u32, max)
57 : ),
58 :
59 : TP_fast_assign(
60 : __entry->pipe = crtc->pipe;
61 : __entry->frame = crtc->debug.start_vbl_count;
62 : __entry->scanline = crtc->debug.scanline_start;
63 : __entry->min = crtc->debug.min_vbl;
64 : __entry->max = crtc->debug.max_vbl;
65 : ),
66 :
67 : TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
68 : pipe_name(__entry->pipe), __entry->frame,
69 : __entry->scanline, __entry->min, __entry->max)
70 : );
71 :
72 0 : TRACE_EVENT(i915_pipe_update_end,
73 : TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
74 : TP_ARGS(crtc, frame, scanline_end),
75 :
76 : TP_STRUCT__entry(
77 : __field(enum pipe, pipe)
78 : __field(u32, frame)
79 : __field(u32, scanline)
80 : ),
81 :
82 : TP_fast_assign(
83 : __entry->pipe = crtc->pipe;
84 : __entry->frame = frame;
85 : __entry->scanline = scanline_end;
86 : ),
87 :
88 : TP_printk("pipe %c, frame=%u, scanline=%u",
89 : pipe_name(__entry->pipe), __entry->frame,
90 : __entry->scanline)
91 : );
92 :
93 : /* object tracking */
94 :
95 0 : TRACE_EVENT(i915_gem_object_create,
96 : TP_PROTO(struct drm_i915_gem_object *obj),
97 : TP_ARGS(obj),
98 :
99 : TP_STRUCT__entry(
100 : __field(struct drm_i915_gem_object *, obj)
101 : __field(u32, size)
102 : ),
103 :
104 : TP_fast_assign(
105 : __entry->obj = obj;
106 : __entry->size = obj->base.size;
107 : ),
108 :
109 : TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
110 : );
111 :
112 : TRACE_EVENT(i915_gem_shrink,
113 : TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
114 : TP_ARGS(i915, target, flags),
115 :
116 : TP_STRUCT__entry(
117 : __field(int, dev)
118 : __field(unsigned long, target)
119 : __field(unsigned, flags)
120 : ),
121 :
122 : TP_fast_assign(
123 : __entry->dev = i915->dev->primary->index;
124 : __entry->target = target;
125 : __entry->flags = flags;
126 : ),
127 :
128 : TP_printk("dev=%d, target=%lu, flags=%x",
129 : __entry->dev, __entry->target, __entry->flags)
130 : );
131 :
132 0 : TRACE_EVENT(i915_vma_bind,
133 : TP_PROTO(struct i915_vma *vma, unsigned flags),
134 : TP_ARGS(vma, flags),
135 :
136 : TP_STRUCT__entry(
137 : __field(struct drm_i915_gem_object *, obj)
138 : __field(struct i915_address_space *, vm)
139 : __field(u64, offset)
140 : __field(u32, size)
141 : __field(unsigned, flags)
142 : ),
143 :
144 : TP_fast_assign(
145 : __entry->obj = vma->obj;
146 : __entry->vm = vma->vm;
147 : __entry->offset = vma->node.start;
148 : __entry->size = vma->node.size;
149 : __entry->flags = flags;
150 : ),
151 :
152 : TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
153 : __entry->obj, __entry->offset, __entry->size,
154 : __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
155 : __entry->vm)
156 : );
157 :
158 0 : TRACE_EVENT(i915_vma_unbind,
159 : TP_PROTO(struct i915_vma *vma),
160 : TP_ARGS(vma),
161 :
162 : TP_STRUCT__entry(
163 : __field(struct drm_i915_gem_object *, obj)
164 : __field(struct i915_address_space *, vm)
165 : __field(u64, offset)
166 : __field(u32, size)
167 : ),
168 :
169 : TP_fast_assign(
170 : __entry->obj = vma->obj;
171 : __entry->vm = vma->vm;
172 : __entry->offset = vma->node.start;
173 : __entry->size = vma->node.size;
174 : ),
175 :
176 : TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
177 : __entry->obj, __entry->offset, __entry->size, __entry->vm)
178 : );
179 :
180 : #define VM_TO_TRACE_NAME(vm) \
181 : (i915_is_ggtt(vm) ? "G" : \
182 : "P")
183 :
184 : DECLARE_EVENT_CLASS(i915_va,
185 : TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
186 : TP_ARGS(vm, start, length, name),
187 :
188 : TP_STRUCT__entry(
189 : __field(struct i915_address_space *, vm)
190 : __field(u64, start)
191 : __field(u64, end)
192 : __string(name, name)
193 : ),
194 :
195 : TP_fast_assign(
196 : __entry->vm = vm;
197 : __entry->start = start;
198 : __entry->end = start + length - 1;
199 : __assign_str(name, name);
200 : ),
201 :
202 : TP_printk("vm=%p (%s), 0x%llx-0x%llx",
203 : __entry->vm, __get_str(name), __entry->start, __entry->end)
204 : );
205 :
206 0 : DEFINE_EVENT(i915_va, i915_va_alloc,
207 : TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
208 : TP_ARGS(vm, start, length, name)
209 : );
210 :
211 : DECLARE_EVENT_CLASS(i915_px_entry,
212 : TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
213 : TP_ARGS(vm, px, start, px_shift),
214 :
215 : TP_STRUCT__entry(
216 : __field(struct i915_address_space *, vm)
217 : __field(u32, px)
218 : __field(u64, start)
219 : __field(u64, end)
220 : ),
221 :
222 : TP_fast_assign(
223 : __entry->vm = vm;
224 : __entry->px = px;
225 : __entry->start = start;
226 : __entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
227 : ),
228 :
229 : TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
230 : __entry->vm, __entry->px, __entry->start, __entry->end)
231 : );
232 :
233 0 : DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
234 : TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
235 : TP_ARGS(vm, pde, start, pde_shift)
236 : );
237 :
238 0 : DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
239 : TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
240 : TP_ARGS(vm, pdpe, start, pdpe_shift),
241 :
242 : TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
243 : __entry->vm, __entry->px, __entry->start, __entry->end)
244 : );
245 :
246 0 : DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
247 : TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
248 : TP_ARGS(vm, pml4e, start, pml4e_shift),
249 :
250 : TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
251 : __entry->vm, __entry->px, __entry->start, __entry->end)
252 : );
253 :
254 : /* Avoid extra math because we only support two sizes. The format is defined by
255 : * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
256 : #define TRACE_PT_SIZE(bits) \
257 : ((((bits) == 1024) ? 288 : 144) + 1)
258 :
259 : DECLARE_EVENT_CLASS(i915_page_table_entry_update,
260 : TP_PROTO(struct i915_address_space *vm, u32 pde,
261 : struct i915_page_table *pt, u32 first, u32 count, u32 bits),
262 : TP_ARGS(vm, pde, pt, first, count, bits),
263 :
264 : TP_STRUCT__entry(
265 : __field(struct i915_address_space *, vm)
266 : __field(u32, pde)
267 : __field(u32, first)
268 : __field(u32, last)
269 : __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
270 : ),
271 :
272 : TP_fast_assign(
273 : __entry->vm = vm;
274 : __entry->pde = pde;
275 : __entry->first = first;
276 : __entry->last = first + count - 1;
277 : scnprintf(__get_str(cur_ptes),
278 : TRACE_PT_SIZE(bits),
279 : "%*pb",
280 : bits,
281 : pt->used_ptes);
282 : ),
283 :
284 : TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
285 : __entry->vm, __entry->pde, __entry->last, __entry->first,
286 : __get_str(cur_ptes))
287 : );
288 :
289 0 : DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
290 : TP_PROTO(struct i915_address_space *vm, u32 pde,
291 : struct i915_page_table *pt, u32 first, u32 count, u32 bits),
292 : TP_ARGS(vm, pde, pt, first, count, bits)
293 : );
294 :
295 0 : TRACE_EVENT(i915_gem_object_change_domain,
296 : TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
297 : TP_ARGS(obj, old_read, old_write),
298 :
299 : TP_STRUCT__entry(
300 : __field(struct drm_i915_gem_object *, obj)
301 : __field(u32, read_domains)
302 : __field(u32, write_domain)
303 : ),
304 :
305 : TP_fast_assign(
306 : __entry->obj = obj;
307 : __entry->read_domains = obj->base.read_domains | (old_read << 16);
308 : __entry->write_domain = obj->base.write_domain | (old_write << 16);
309 : ),
310 :
311 : TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
312 : __entry->obj,
313 : __entry->read_domains >> 16,
314 : __entry->read_domains & 0xffff,
315 : __entry->write_domain >> 16,
316 : __entry->write_domain & 0xffff)
317 : );
318 :
319 0 : TRACE_EVENT(i915_gem_object_pwrite,
320 : TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
321 : TP_ARGS(obj, offset, len),
322 :
323 : TP_STRUCT__entry(
324 : __field(struct drm_i915_gem_object *, obj)
325 : __field(u32, offset)
326 : __field(u32, len)
327 : ),
328 :
329 : TP_fast_assign(
330 : __entry->obj = obj;
331 : __entry->offset = offset;
332 : __entry->len = len;
333 : ),
334 :
335 : TP_printk("obj=%p, offset=%u, len=%u",
336 : __entry->obj, __entry->offset, __entry->len)
337 : );
338 :
339 0 : TRACE_EVENT(i915_gem_object_pread,
340 : TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
341 : TP_ARGS(obj, offset, len),
342 :
343 : TP_STRUCT__entry(
344 : __field(struct drm_i915_gem_object *, obj)
345 : __field(u32, offset)
346 : __field(u32, len)
347 : ),
348 :
349 : TP_fast_assign(
350 : __entry->obj = obj;
351 : __entry->offset = offset;
352 : __entry->len = len;
353 : ),
354 :
355 : TP_printk("obj=%p, offset=%u, len=%u",
356 : __entry->obj, __entry->offset, __entry->len)
357 : );
358 :
359 : TRACE_EVENT(i915_gem_object_fault,
360 : TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
361 : TP_ARGS(obj, index, gtt, write),
362 :
363 : TP_STRUCT__entry(
364 : __field(struct drm_i915_gem_object *, obj)
365 : __field(u32, index)
366 : __field(bool, gtt)
367 : __field(bool, write)
368 : ),
369 :
370 : TP_fast_assign(
371 : __entry->obj = obj;
372 : __entry->index = index;
373 : __entry->gtt = gtt;
374 : __entry->write = write;
375 : ),
376 :
377 : TP_printk("obj=%p, %s index=%u %s",
378 : __entry->obj,
379 : __entry->gtt ? "GTT" : "CPU",
380 : __entry->index,
381 : __entry->write ? ", writable" : "")
382 : );
383 :
384 : DECLARE_EVENT_CLASS(i915_gem_object,
385 : TP_PROTO(struct drm_i915_gem_object *obj),
386 : TP_ARGS(obj),
387 :
388 : TP_STRUCT__entry(
389 : __field(struct drm_i915_gem_object *, obj)
390 : ),
391 :
392 : TP_fast_assign(
393 : __entry->obj = obj;
394 : ),
395 :
396 : TP_printk("obj=%p", __entry->obj)
397 : );
398 :
399 0 : DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
400 : TP_PROTO(struct drm_i915_gem_object *obj),
401 : TP_ARGS(obj)
402 : );
403 :
404 0 : DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
405 : TP_PROTO(struct drm_i915_gem_object *obj),
406 : TP_ARGS(obj)
407 : );
408 :
409 0 : TRACE_EVENT(i915_gem_evict,
410 : TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
411 : TP_ARGS(dev, size, align, flags),
412 :
413 : TP_STRUCT__entry(
414 : __field(u32, dev)
415 : __field(u32, size)
416 : __field(u32, align)
417 : __field(unsigned, flags)
418 : ),
419 :
420 : TP_fast_assign(
421 : __entry->dev = dev->primary->index;
422 : __entry->size = size;
423 : __entry->align = align;
424 : __entry->flags = flags;
425 : ),
426 :
427 : TP_printk("dev=%d, size=%d, align=%d %s",
428 : __entry->dev, __entry->size, __entry->align,
429 : __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
430 : );
431 :
432 : TRACE_EVENT(i915_gem_evict_everything,
433 : TP_PROTO(struct drm_device *dev),
434 : TP_ARGS(dev),
435 :
436 : TP_STRUCT__entry(
437 : __field(u32, dev)
438 : ),
439 :
440 : TP_fast_assign(
441 : __entry->dev = dev->primary->index;
442 : ),
443 :
444 : TP_printk("dev=%d", __entry->dev)
445 : );
446 :
447 0 : TRACE_EVENT(i915_gem_evict_vm,
448 : TP_PROTO(struct i915_address_space *vm),
449 : TP_ARGS(vm),
450 :
451 : TP_STRUCT__entry(
452 : __field(u32, dev)
453 : __field(struct i915_address_space *, vm)
454 : ),
455 :
456 : TP_fast_assign(
457 : __entry->dev = vm->dev->primary->index;
458 : __entry->vm = vm;
459 : ),
460 :
461 : TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
462 : );
463 :
464 0 : TRACE_EVENT(i915_gem_ring_sync_to,
465 : TP_PROTO(struct drm_i915_gem_request *to_req,
466 : struct intel_engine_cs *from,
467 : struct drm_i915_gem_request *req),
468 : TP_ARGS(to_req, from, req),
469 :
470 : TP_STRUCT__entry(
471 : __field(u32, dev)
472 : __field(u32, sync_from)
473 : __field(u32, sync_to)
474 : __field(u32, seqno)
475 : ),
476 :
477 : TP_fast_assign(
478 : __entry->dev = from->dev->primary->index;
479 : __entry->sync_from = from->id;
480 : __entry->sync_to = to_req->ring->id;
481 : __entry->seqno = i915_gem_request_get_seqno(req);
482 : ),
483 :
484 : TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
485 : __entry->dev,
486 : __entry->sync_from, __entry->sync_to,
487 : __entry->seqno)
488 : );
489 :
490 0 : TRACE_EVENT(i915_gem_ring_dispatch,
491 : TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
492 : TP_ARGS(req, flags),
493 :
494 : TP_STRUCT__entry(
495 : __field(u32, dev)
496 : __field(u32, ring)
497 : __field(u32, seqno)
498 : __field(u32, flags)
499 : ),
500 :
501 : TP_fast_assign(
502 : struct intel_engine_cs *ring =
503 : i915_gem_request_get_ring(req);
504 : __entry->dev = ring->dev->primary->index;
505 : __entry->ring = ring->id;
506 : __entry->seqno = i915_gem_request_get_seqno(req);
507 : __entry->flags = flags;
508 : i915_trace_irq_get(ring, req);
509 : ),
510 :
511 : TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
512 : __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
513 : );
514 :
515 0 : TRACE_EVENT(i915_gem_ring_flush,
516 : TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
517 : TP_ARGS(req, invalidate, flush),
518 :
519 : TP_STRUCT__entry(
520 : __field(u32, dev)
521 : __field(u32, ring)
522 : __field(u32, invalidate)
523 : __field(u32, flush)
524 : ),
525 :
526 : TP_fast_assign(
527 : __entry->dev = req->ring->dev->primary->index;
528 : __entry->ring = req->ring->id;
529 : __entry->invalidate = invalidate;
530 : __entry->flush = flush;
531 : ),
532 :
533 : TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
534 : __entry->dev, __entry->ring,
535 : __entry->invalidate, __entry->flush)
536 : );
537 :
538 : DECLARE_EVENT_CLASS(i915_gem_request,
539 : TP_PROTO(struct drm_i915_gem_request *req),
540 : TP_ARGS(req),
541 :
542 : TP_STRUCT__entry(
543 : __field(u32, dev)
544 : __field(u32, ring)
545 : __field(u32, seqno)
546 : ),
547 :
548 : TP_fast_assign(
549 : struct intel_engine_cs *ring =
550 : i915_gem_request_get_ring(req);
551 : __entry->dev = ring->dev->primary->index;
552 : __entry->ring = ring->id;
553 : __entry->seqno = i915_gem_request_get_seqno(req);
554 : ),
555 :
556 : TP_printk("dev=%u, ring=%u, seqno=%u",
557 : __entry->dev, __entry->ring, __entry->seqno)
558 : );
559 :
560 0 : DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
561 : TP_PROTO(struct drm_i915_gem_request *req),
562 : TP_ARGS(req)
563 : );
564 :
565 0 : TRACE_EVENT(i915_gem_request_notify,
566 : TP_PROTO(struct intel_engine_cs *ring),
567 : TP_ARGS(ring),
568 :
569 : TP_STRUCT__entry(
570 : __field(u32, dev)
571 : __field(u32, ring)
572 : __field(u32, seqno)
573 : ),
574 :
575 : TP_fast_assign(
576 : __entry->dev = ring->dev->primary->index;
577 : __entry->ring = ring->id;
578 : __entry->seqno = ring->get_seqno(ring, false);
579 : ),
580 :
581 : TP_printk("dev=%u, ring=%u, seqno=%u",
582 : __entry->dev, __entry->ring, __entry->seqno)
583 : );
584 :
585 0 : DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
586 : TP_PROTO(struct drm_i915_gem_request *req),
587 : TP_ARGS(req)
588 : );
589 :
590 : DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
591 : TP_PROTO(struct drm_i915_gem_request *req),
592 : TP_ARGS(req)
593 : );
594 :
595 0 : TRACE_EVENT(i915_gem_request_wait_begin,
596 : TP_PROTO(struct drm_i915_gem_request *req),
597 : TP_ARGS(req),
598 :
599 : TP_STRUCT__entry(
600 : __field(u32, dev)
601 : __field(u32, ring)
602 : __field(u32, seqno)
603 : __field(bool, blocking)
604 : ),
605 :
606 : /* NB: the blocking information is racy since mutex_is_locked
607 : * doesn't check that the current thread holds the lock. The only
608 : * other option would be to pass the boolean information of whether
609 : * or not the class was blocking down through the stack which is
610 : * less desirable.
611 : */
612 : TP_fast_assign(
613 : struct intel_engine_cs *ring =
614 : i915_gem_request_get_ring(req);
615 : __entry->dev = ring->dev->primary->index;
616 : __entry->ring = ring->id;
617 : __entry->seqno = i915_gem_request_get_seqno(req);
618 : __entry->blocking =
619 : mutex_is_locked(&ring->dev->struct_mutex);
620 : ),
621 :
622 : TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
623 : __entry->dev, __entry->ring,
624 : __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
625 : );
626 :
627 0 : DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
628 : TP_PROTO(struct drm_i915_gem_request *req),
629 : TP_ARGS(req)
630 : );
631 :
632 0 : TRACE_EVENT(i915_flip_request,
633 : TP_PROTO(int plane, struct drm_i915_gem_object *obj),
634 :
635 : TP_ARGS(plane, obj),
636 :
637 : TP_STRUCT__entry(
638 : __field(int, plane)
639 : __field(struct drm_i915_gem_object *, obj)
640 : ),
641 :
642 : TP_fast_assign(
643 : __entry->plane = plane;
644 : __entry->obj = obj;
645 : ),
646 :
647 : TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
648 : );
649 :
650 0 : TRACE_EVENT(i915_flip_complete,
651 : TP_PROTO(int plane, struct drm_i915_gem_object *obj),
652 :
653 : TP_ARGS(plane, obj),
654 :
655 : TP_STRUCT__entry(
656 : __field(int, plane)
657 : __field(struct drm_i915_gem_object *, obj)
658 : ),
659 :
660 : TP_fast_assign(
661 : __entry->plane = plane;
662 : __entry->obj = obj;
663 : ),
664 :
665 : TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
666 : );
667 :
668 0 : TRACE_EVENT_CONDITION(i915_reg_rw,
669 : TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
670 :
671 : TP_ARGS(write, reg, val, len, trace),
672 :
673 : TP_CONDITION(trace),
674 :
675 : TP_STRUCT__entry(
676 : __field(u64, val)
677 : __field(u32, reg)
678 : __field(u16, write)
679 : __field(u16, len)
680 : ),
681 :
682 : TP_fast_assign(
683 : __entry->val = (u64)val;
684 : __entry->reg = reg;
685 : __entry->write = write;
686 : __entry->len = len;
687 : ),
688 :
689 : TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
690 : __entry->write ? "write" : "read",
691 : __entry->reg, __entry->len,
692 : (u32)(__entry->val & 0xffffffff),
693 : (u32)(__entry->val >> 32))
694 : );
695 :
696 0 : TRACE_EVENT(intel_gpu_freq_change,
697 : TP_PROTO(u32 freq),
698 : TP_ARGS(freq),
699 :
700 : TP_STRUCT__entry(
701 : __field(u32, freq)
702 : ),
703 :
704 : TP_fast_assign(
705 : __entry->freq = freq;
706 : ),
707 :
708 : TP_printk("new_freq=%u", __entry->freq)
709 : );
710 :
711 : /**
712 : * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
713 : *
714 : * With full ppgtt enabled each process using drm will allocate at least one
715 : * translation table. With these traces it is possible to keep track of the
716 : * allocation and of the lifetime of the tables; this can be used during
717 : * testing/debug to verify that we are not leaking ppgtts.
718 : * These traces identify the ppgtt through the vm pointer, which is also printed
719 : * by the i915_vma_bind and i915_vma_unbind tracepoints.
720 : */
721 : DECLARE_EVENT_CLASS(i915_ppgtt,
722 : TP_PROTO(struct i915_address_space *vm),
723 : TP_ARGS(vm),
724 :
725 : TP_STRUCT__entry(
726 : __field(struct i915_address_space *, vm)
727 : __field(u32, dev)
728 : ),
729 :
730 : TP_fast_assign(
731 : __entry->vm = vm;
732 : __entry->dev = vm->dev->primary->index;
733 : ),
734 :
735 : TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
736 : )
737 :
738 0 : DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
739 : TP_PROTO(struct i915_address_space *vm),
740 : TP_ARGS(vm)
741 : );
742 :
743 0 : DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
744 : TP_PROTO(struct i915_address_space *vm),
745 : TP_ARGS(vm)
746 : );
747 :
748 : /**
749 : * DOC: i915_context_create and i915_context_free tracepoints
750 : *
751 : * These tracepoints are used to track creation and deletion of contexts.
752 : * If full ppgtt is enabled, they also print the address of the vm assigned to
753 : * the context.
754 : */
755 : DECLARE_EVENT_CLASS(i915_context,
756 : TP_PROTO(struct intel_context *ctx),
757 : TP_ARGS(ctx),
758 :
759 : TP_STRUCT__entry(
760 : __field(u32, dev)
761 : __field(struct intel_context *, ctx)
762 : __field(struct i915_address_space *, vm)
763 : ),
764 :
765 : TP_fast_assign(
766 : __entry->ctx = ctx;
767 : __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
768 : __entry->dev = ctx->i915->dev->primary->index;
769 : ),
770 :
771 : TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
772 : __entry->dev, __entry->ctx, __entry->vm)
773 : )
774 :
775 0 : DEFINE_EVENT(i915_context, i915_context_create,
776 : TP_PROTO(struct intel_context *ctx),
777 : TP_ARGS(ctx)
778 : );
779 :
780 0 : DEFINE_EVENT(i915_context, i915_context_free,
781 : TP_PROTO(struct intel_context *ctx),
782 : TP_ARGS(ctx)
783 : );
784 :
785 : /**
786 : * DOC: switch_mm tracepoint
787 : *
788 : * This tracepoint allows tracking of the mm switch, which is an important point
789 : * in the lifetime of the vm in the legacy submission path. This tracepoint is
790 : * called only if full ppgtt is enabled.
791 : */
792 0 : TRACE_EVENT(switch_mm,
793 : TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
794 :
795 : TP_ARGS(ring, to),
796 :
797 : TP_STRUCT__entry(
798 : __field(u32, ring)
799 : __field(struct intel_context *, to)
800 : __field(struct i915_address_space *, vm)
801 : __field(u32, dev)
802 : ),
803 :
804 : TP_fast_assign(
805 : __entry->ring = ring->id;
806 : __entry->to = to;
807 : __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
808 : __entry->dev = ring->dev->primary->index;
809 : ),
810 :
811 : TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
812 : __entry->dev, __entry->ring, __entry->to, __entry->vm)
813 : );
814 :
815 : #endif /* _I915_TRACE_H_ */
816 :
817 : /* This part must be outside protection */
818 : #undef TRACE_INCLUDE_PATH
819 : #define TRACE_INCLUDE_PATH .
820 : #ifdef __linux__
821 : #include <trace/define_trace.h>
822 : #endif
|