Warning: This file is not a C or C++ file. It does not have highlighting.
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | #undef TRACE_SYSTEM |
3 | #define TRACE_SYSTEM io_uring |
4 | |
5 | #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ) |
6 | #define _TRACE_IO_URING_H |
7 | |
8 | #include <linux/tracepoint.h> |
9 | #include <uapi/linux/io_uring.h> |
10 | #include <linux/io_uring_types.h> |
11 | #include <linux/io_uring.h> |
12 | |
13 | struct io_wq_work; |
14 | |
15 | /** |
16 | * io_uring_create - called after a new io_uring context was prepared |
17 | * |
18 | * @fd: corresponding file descriptor |
19 | * @ctx: pointer to a ring context structure |
20 | * @sq_entries: actual SQ size |
21 | * @cq_entries: actual CQ size |
22 | * @flags: SQ ring flags, provided to io_uring_setup(2) |
23 | * |
24 | * Allows to trace io_uring creation and provide pointer to a context, that can |
25 | * be used later to find correlated events. |
26 | */ |
27 | TRACE_EVENT(io_uring_create, |
28 | |
29 | TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), |
30 | |
31 | TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), |
32 | |
33 | TP_STRUCT__entry ( |
34 | __field( int, fd ) |
35 | __field( void *, ctx ) |
36 | __field( u32, sq_entries ) |
37 | __field( u32, cq_entries ) |
38 | __field( u32, flags ) |
39 | ), |
40 | |
41 | TP_fast_assign( |
42 | __entry->fd = fd; |
43 | __entry->ctx = ctx; |
44 | __entry->sq_entries = sq_entries; |
45 | __entry->cq_entries = cq_entries; |
46 | __entry->flags = flags; |
47 | ), |
48 | |
49 | TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x", |
50 | __entry->ctx, __entry->fd, __entry->sq_entries, |
51 | __entry->cq_entries, __entry->flags) |
52 | ); |
53 | |
54 | /** |
55 | * io_uring_register - called after a buffer/file/eventfd was successfully |
56 | * registered for a ring |
57 | * |
58 | * @ctx: pointer to a ring context structure |
59 | * @opcode: describes which operation to perform |
60 | * @nr_user_files: number of registered files |
61 | * @nr_user_bufs: number of registered buffers |
62 | * @ret: return code |
63 | * |
64 | * Allows to trace fixed files/buffers, that could be registered to |
65 | * avoid an overhead of getting references to them for every operation. This |
66 | * event, together with io_uring_file_get, can provide a full picture of how |
67 | * much overhead one can reduce via fixing. |
68 | */ |
69 | TRACE_EVENT(io_uring_register, |
70 | |
71 | TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files, |
72 | unsigned nr_bufs, long ret), |
73 | |
74 | TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret), |
75 | |
76 | TP_STRUCT__entry ( |
77 | __field( void *, ctx ) |
78 | __field( unsigned, opcode ) |
79 | __field( unsigned, nr_files) |
80 | __field( unsigned, nr_bufs ) |
81 | __field( long, ret ) |
82 | ), |
83 | |
84 | TP_fast_assign( |
85 | __entry->ctx = ctx; |
86 | __entry->opcode = opcode; |
87 | __entry->nr_files = nr_files; |
88 | __entry->nr_bufs = nr_bufs; |
89 | __entry->ret = ret; |
90 | ), |
91 | |
92 | TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " |
93 | "ret %ld", |
94 | __entry->ctx, __entry->opcode, __entry->nr_files, |
95 | __entry->nr_bufs, __entry->ret) |
96 | ); |
97 | |
98 | /** |
99 | * io_uring_file_get - called before getting references to an SQE file |
100 | * |
101 | * @req: pointer to a submitted request |
102 | * @fd: SQE file descriptor |
103 | * |
104 | * Allows to trace out how often an SQE file reference is obtained, which can |
105 | * help figuring out if it makes sense to use fixed files, or check that fixed |
106 | * files are used correctly. |
107 | */ |
108 | TRACE_EVENT(io_uring_file_get, |
109 | |
110 | TP_PROTO(struct io_kiocb *req, int fd), |
111 | |
112 | TP_ARGS(req, fd), |
113 | |
114 | TP_STRUCT__entry ( |
115 | __field( void *, ctx ) |
116 | __field( void *, req ) |
117 | __field( u64, user_data ) |
118 | __field( int, fd ) |
119 | ), |
120 | |
121 | TP_fast_assign( |
122 | __entry->ctx = req->ctx; |
123 | __entry->req = req; |
124 | __entry->user_data = req->cqe.user_data; |
125 | __entry->fd = fd; |
126 | ), |
127 | |
128 | TP_printk("ring %p, req %p, user_data 0x%llx, fd %d", |
129 | __entry->ctx, __entry->req, __entry->user_data, __entry->fd) |
130 | ); |
131 | |
132 | /** |
133 | * io_uring_queue_async_work - called before submitting a new async work |
134 | * |
135 | * @req: pointer to a submitted request |
136 | * @rw: type of workqueue, hashed or normal |
137 | * |
138 | * Allows to trace asynchronous work submission. |
139 | */ |
140 | TRACE_EVENT(io_uring_queue_async_work, |
141 | |
142 | TP_PROTO(struct io_kiocb *req, int rw), |
143 | |
144 | TP_ARGS(req, rw), |
145 | |
146 | TP_STRUCT__entry ( |
147 | __field( void *, ctx ) |
148 | __field( void *, req ) |
149 | __field( u64, user_data ) |
150 | __field( u8, opcode ) |
151 | __field( unsigned long long, flags ) |
152 | __field( struct io_wq_work *, work ) |
153 | __field( int, rw ) |
154 | |
155 | __string( op_str, io_uring_get_opcode(req->opcode) ) |
156 | ), |
157 | |
158 | TP_fast_assign( |
159 | __entry->ctx = req->ctx; |
160 | __entry->req = req; |
161 | __entry->user_data = req->cqe.user_data; |
162 | __entry->flags = (__force unsigned long long) req->flags; |
163 | __entry->opcode = req->opcode; |
164 | __entry->work = &req->work; |
165 | __entry->rw = rw; |
166 | |
167 | __assign_str(op_str, io_uring_get_opcode(req->opcode)); |
168 | ), |
169 | |
170 | TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p", |
171 | __entry->ctx, __entry->req, __entry->user_data, |
172 | __get_str(op_str), __entry->flags, |
173 | __entry->rw ? "hashed" : "normal", __entry->work) |
174 | ); |
175 | |
176 | /** |
177 | * io_uring_defer - called when an io_uring request is deferred |
178 | * |
179 | * @req: pointer to a deferred request |
180 | * |
181 | * Allows to track deferred requests, to get an insight about what requests are |
182 | * not started immediately. |
183 | */ |
184 | TRACE_EVENT(io_uring_defer, |
185 | |
186 | TP_PROTO(struct io_kiocb *req), |
187 | |
188 | TP_ARGS(req), |
189 | |
190 | TP_STRUCT__entry ( |
191 | __field( void *, ctx ) |
192 | __field( void *, req ) |
193 | __field( unsigned long long, data ) |
194 | __field( u8, opcode ) |
195 | |
196 | __string( op_str, io_uring_get_opcode(req->opcode) ) |
197 | ), |
198 | |
199 | TP_fast_assign( |
200 | __entry->ctx = req->ctx; |
201 | __entry->req = req; |
202 | __entry->data = req->cqe.user_data; |
203 | __entry->opcode = req->opcode; |
204 | |
205 | __assign_str(op_str, io_uring_get_opcode(req->opcode)); |
206 | ), |
207 | |
208 | TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", |
209 | __entry->ctx, __entry->req, __entry->data, |
210 | __get_str(op_str)) |
211 | ); |
212 | |
213 | /** |
214 | * io_uring_link - called before the io_uring request added into link_list of |
215 | * another request |
216 | * |
217 | * @req: pointer to a linked request |
218 | * @target_req: pointer to a previous request, that would contain @req |
219 | * |
220 | * Allows to track linked requests, to understand dependencies between requests |
221 | * and how does it influence their execution flow. |
222 | */ |
223 | TRACE_EVENT(io_uring_link, |
224 | |
225 | TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req), |
226 | |
227 | TP_ARGS(req, target_req), |
228 | |
229 | TP_STRUCT__entry ( |
230 | __field( void *, ctx ) |
231 | __field( void *, req ) |
232 | __field( void *, target_req ) |
233 | ), |
234 | |
235 | TP_fast_assign( |
236 | __entry->ctx = req->ctx; |
237 | __entry->req = req; |
238 | __entry->target_req = target_req; |
239 | ), |
240 | |
241 | TP_printk("ring %p, request %p linked after %p", |
242 | __entry->ctx, __entry->req, __entry->target_req) |
243 | ); |
244 | |
245 | /** |
246 | * io_uring_cqring_wait - called before start waiting for an available CQE |
247 | * |
248 | * @ctx: pointer to a ring context structure |
249 | * @min_events: minimal number of events to wait for |
250 | * |
251 | * Allows to track waiting for CQE, so that we can e.g. troubleshoot |
252 | * situations, when an application wants to wait for an event, that never |
253 | * comes. |
254 | */ |
255 | TRACE_EVENT(io_uring_cqring_wait, |
256 | |
257 | TP_PROTO(void *ctx, int min_events), |
258 | |
259 | TP_ARGS(ctx, min_events), |
260 | |
261 | TP_STRUCT__entry ( |
262 | __field( void *, ctx ) |
263 | __field( int, min_events ) |
264 | ), |
265 | |
266 | TP_fast_assign( |
267 | __entry->ctx = ctx; |
268 | __entry->min_events = min_events; |
269 | ), |
270 | |
271 | TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events) |
272 | ); |
273 | |
274 | /** |
275 | * io_uring_fail_link - called before failing a linked request |
276 | * |
277 | * @req: request, which links were cancelled |
278 | * @link: cancelled link |
279 | * |
280 | * Allows to track linked requests cancellation, to see not only that some work |
281 | * was cancelled, but also which request was the reason. |
282 | */ |
283 | TRACE_EVENT(io_uring_fail_link, |
284 | |
285 | TP_PROTO(struct io_kiocb *req, struct io_kiocb *link), |
286 | |
287 | TP_ARGS(req, link), |
288 | |
289 | TP_STRUCT__entry ( |
290 | __field( void *, ctx ) |
291 | __field( void *, req ) |
292 | __field( unsigned long long, user_data ) |
293 | __field( u8, opcode ) |
294 | __field( void *, link ) |
295 | |
296 | __string( op_str, io_uring_get_opcode(req->opcode) ) |
297 | ), |
298 | |
299 | TP_fast_assign( |
300 | __entry->ctx = req->ctx; |
301 | __entry->req = req; |
302 | __entry->user_data = req->cqe.user_data; |
303 | __entry->opcode = req->opcode; |
304 | __entry->link = link; |
305 | |
306 | __assign_str(op_str, io_uring_get_opcode(req->opcode)); |
307 | ), |
308 | |
309 | TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", |
310 | __entry->ctx, __entry->req, __entry->user_data, |
311 | __get_str(op_str), __entry->link) |
312 | ); |
313 | |
314 | /** |
315 | * io_uring_complete - called when completing an SQE |
316 | * |
317 | * @ctx: pointer to a ring context structure |
318 | * @req: pointer to a submitted request |
319 | * @user_data: user data associated with the request |
320 | * @res: result of the request |
321 | * @cflags: completion flags |
322 | * @extra1: extra 64-bit data for CQE32 |
323 | * @extra2: extra 64-bit data for CQE32 |
324 | * |
325 | */ |
326 | TRACE_EVENT(io_uring_complete, |
327 | |
328 | TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags, |
329 | u64 extra1, u64 extra2), |
330 | |
331 | TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2), |
332 | |
333 | TP_STRUCT__entry ( |
334 | __field( void *, ctx ) |
335 | __field( void *, req ) |
336 | __field( u64, user_data ) |
337 | __field( int, res ) |
338 | __field( unsigned, cflags ) |
339 | __field( u64, extra1 ) |
340 | __field( u64, extra2 ) |
341 | ), |
342 | |
343 | TP_fast_assign( |
344 | __entry->ctx = ctx; |
345 | __entry->req = req; |
346 | __entry->user_data = user_data; |
347 | __entry->res = res; |
348 | __entry->cflags = cflags; |
349 | __entry->extra1 = extra1; |
350 | __entry->extra2 = extra2; |
351 | ), |
352 | |
353 | TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x " |
354 | "extra1 %llu extra2 %llu ", |
355 | __entry->ctx, __entry->req, |
356 | __entry->user_data, |
357 | __entry->res, __entry->cflags, |
358 | (unsigned long long) __entry->extra1, |
359 | (unsigned long long) __entry->extra2) |
360 | ); |
361 | |
362 | /** |
363 | * io_uring_submit_req - called before submitting a request |
364 | * |
365 | * @req: pointer to a submitted request |
366 | * |
367 | * Allows to track SQE submitting, to understand what was the source of it, SQ |
368 | * thread or io_uring_enter call. |
369 | */ |
370 | TRACE_EVENT(io_uring_submit_req, |
371 | |
372 | TP_PROTO(struct io_kiocb *req), |
373 | |
374 | TP_ARGS(req), |
375 | |
376 | TP_STRUCT__entry ( |
377 | __field( void *, ctx ) |
378 | __field( void *, req ) |
379 | __field( unsigned long long, user_data ) |
380 | __field( u8, opcode ) |
381 | __field( unsigned long long, flags ) |
382 | __field( bool, sq_thread ) |
383 | |
384 | __string( op_str, io_uring_get_opcode(req->opcode) ) |
385 | ), |
386 | |
387 | TP_fast_assign( |
388 | __entry->ctx = req->ctx; |
389 | __entry->req = req; |
390 | __entry->user_data = req->cqe.user_data; |
391 | __entry->opcode = req->opcode; |
392 | __entry->flags = (__force unsigned long long) req->flags; |
393 | __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL; |
394 | |
395 | __assign_str(op_str, io_uring_get_opcode(req->opcode)); |
396 | ), |
397 | |
398 | TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, " |
399 | "sq_thread %d", __entry->ctx, __entry->req, |
400 | __entry->user_data, __get_str(op_str), __entry->flags, |
401 | __entry->sq_thread) |
402 | ); |
403 | |
404 | /* |
405 | * io_uring_poll_arm - called after arming a poll wait if successful |
406 | * |
407 | * @req: pointer to the armed request |
408 | * @mask: request poll events mask |
409 | * @events: registered events of interest |
410 | * |
411 | * Allows to track which fds are waiting for and what are the events of |
412 | * interest. |
413 | */ |
414 | TRACE_EVENT(io_uring_poll_arm, |
415 | |
416 | TP_PROTO(struct io_kiocb *req, int mask, int events), |
417 | |
418 | TP_ARGS(req, mask, events), |
419 | |
420 | TP_STRUCT__entry ( |
421 | __field( void *, ctx ) |
422 | __field( void *, req ) |
423 | __field( unsigned long long, user_data ) |
424 | __field( u8, opcode ) |
425 | __field( int, mask ) |
426 | __field( int, events ) |
427 | |
428 | __string( op_str, io_uring_get_opcode(req->opcode) ) |
429 | ), |
430 | |
431 | TP_fast_assign( |
432 | __entry->ctx = req->ctx; |
433 | __entry->req = req; |
434 | __entry->user_data = req->cqe.user_data; |
435 | __entry->opcode = req->opcode; |
436 | __entry->mask = mask; |
437 | __entry->events = events; |
438 | |
439 | __assign_str(op_str, io_uring_get_opcode(req->opcode)); |
440 | ), |
441 | |
442 | TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", |
443 | __entry->ctx, __entry->req, __entry->user_data, |
444 | __get_str(op_str), |
445 | __entry->mask, __entry->events) |
446 | ); |
447 | |
448 | /* |
449 | * io_uring_task_add - called after adding a task |
450 | * |
451 | * @req: pointer to request |
452 | * @mask: request poll events mask |
453 | * |
454 | */ |
455 | TRACE_EVENT(io_uring_task_add, |
456 | |
457 | TP_PROTO(struct io_kiocb *req, int mask), |
458 | |
459 | TP_ARGS(req, mask), |
460 | |
461 | TP_STRUCT__entry ( |
462 | __field( void *, ctx ) |
463 | __field( void *, req ) |
464 | __field( unsigned long long, user_data ) |
465 | __field( u8, opcode ) |
466 | __field( int, mask ) |
467 | |
468 | __string( op_str, io_uring_get_opcode(req->opcode) ) |
469 | ), |
470 | |
471 | TP_fast_assign( |
472 | __entry->ctx = req->ctx; |
473 | __entry->req = req; |
474 | __entry->user_data = req->cqe.user_data; |
475 | __entry->opcode = req->opcode; |
476 | __entry->mask = mask; |
477 | |
478 | __assign_str(op_str, io_uring_get_opcode(req->opcode)); |
479 | ), |
480 | |
481 | TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", |
482 | __entry->ctx, __entry->req, __entry->user_data, |
483 | __get_str(op_str), |
484 | __entry->mask) |
485 | ); |
486 | |
487 | /* |
488 | * io_uring_req_failed - called when an sqe is errored dring submission |
489 | * |
490 | * @sqe: pointer to the io_uring_sqe that failed |
491 | * @req: pointer to request |
492 | * @error: error it failed with |
493 | * |
494 | * Allows easier diagnosing of malformed requests in production systems. |
495 | */ |
496 | TRACE_EVENT(io_uring_req_failed, |
497 | |
498 | TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error), |
499 | |
500 | TP_ARGS(sqe, req, error), |
501 | |
502 | TP_STRUCT__entry ( |
503 | __field( void *, ctx ) |
504 | __field( void *, req ) |
505 | __field( unsigned long long, user_data ) |
506 | __field( u8, opcode ) |
507 | __field( u8, flags ) |
508 | __field( u8, ioprio ) |
509 | __field( u64, off ) |
510 | __field( u64, addr ) |
511 | __field( u32, len ) |
512 | __field( u32, op_flags ) |
513 | __field( u16, buf_index ) |
514 | __field( u16, personality ) |
515 | __field( u32, file_index ) |
516 | __field( u64, pad1 ) |
517 | __field( u64, addr3 ) |
518 | __field( int, error ) |
519 | |
520 | __string( op_str, io_uring_get_opcode(sqe->opcode) ) |
521 | ), |
522 | |
523 | TP_fast_assign( |
524 | __entry->ctx = req->ctx; |
525 | __entry->req = req; |
526 | __entry->user_data = sqe->user_data; |
527 | __entry->opcode = sqe->opcode; |
528 | __entry->flags = sqe->flags; |
529 | __entry->ioprio = sqe->ioprio; |
530 | __entry->off = sqe->off; |
531 | __entry->addr = sqe->addr; |
532 | __entry->len = sqe->len; |
533 | __entry->op_flags = sqe->poll32_events; |
534 | __entry->buf_index = sqe->buf_index; |
535 | __entry->personality = sqe->personality; |
536 | __entry->file_index = sqe->file_index; |
537 | __entry->pad1 = sqe->__pad2[0]; |
538 | __entry->addr3 = sqe->addr3; |
539 | __entry->error = error; |
540 | |
541 | __assign_str(op_str, io_uring_get_opcode(sqe->opcode)); |
542 | ), |
543 | |
544 | TP_printk("ring %p, req %p, user_data 0x%llx, " |
545 | "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, " |
546 | "len=%u, rw_flags=0x%x, buf_index=%d, " |
547 | "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, " |
548 | "error=%d", |
549 | __entry->ctx, __entry->req, __entry->user_data, |
550 | __get_str(op_str), |
551 | __entry->flags, __entry->ioprio, |
552 | (unsigned long long)__entry->off, |
553 | (unsigned long long) __entry->addr, __entry->len, |
554 | __entry->op_flags, |
555 | __entry->buf_index, __entry->personality, __entry->file_index, |
556 | (unsigned long long) __entry->pad1, |
557 | (unsigned long long) __entry->addr3, __entry->error) |
558 | ); |
559 | |
560 | |
561 | /* |
562 | * io_uring_cqe_overflow - a CQE overflowed |
563 | * |
564 | * @ctx: pointer to a ring context structure |
565 | * @user_data: user data associated with the request |
566 | * @res: CQE result |
567 | * @cflags: CQE flags |
568 | * @ocqe: pointer to the overflow cqe (if available) |
569 | * |
570 | */ |
571 | TRACE_EVENT(io_uring_cqe_overflow, |
572 | |
573 | TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags, |
574 | void *ocqe), |
575 | |
576 | TP_ARGS(ctx, user_data, res, cflags, ocqe), |
577 | |
578 | TP_STRUCT__entry ( |
579 | __field( void *, ctx ) |
580 | __field( unsigned long long, user_data ) |
581 | __field( s32, res ) |
582 | __field( u32, cflags ) |
583 | __field( void *, ocqe ) |
584 | ), |
585 | |
586 | TP_fast_assign( |
587 | __entry->ctx = ctx; |
588 | __entry->user_data = user_data; |
589 | __entry->res = res; |
590 | __entry->cflags = cflags; |
591 | __entry->ocqe = ocqe; |
592 | ), |
593 | |
594 | TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, " |
595 | "overflow_cqe %p", |
596 | __entry->ctx, __entry->user_data, __entry->res, |
597 | __entry->cflags, __entry->ocqe) |
598 | ); |
599 | |
600 | /* |
601 | * io_uring_task_work_run - ran task work |
602 | * |
603 | * @tctx: pointer to a io_uring_task |
604 | * @count: how many functions it ran |
605 | * |
606 | */ |
607 | TRACE_EVENT(io_uring_task_work_run, |
608 | |
609 | TP_PROTO(void *tctx, unsigned int count), |
610 | |
611 | TP_ARGS(tctx, count), |
612 | |
613 | TP_STRUCT__entry ( |
614 | __field( void *, tctx ) |
615 | __field( unsigned int, count ) |
616 | ), |
617 | |
618 | TP_fast_assign( |
619 | __entry->tctx = tctx; |
620 | __entry->count = count; |
621 | ), |
622 | |
623 | TP_printk("tctx %p, count %u", __entry->tctx, __entry->count) |
624 | ); |
625 | |
626 | TRACE_EVENT(io_uring_short_write, |
627 | |
628 | TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got), |
629 | |
630 | TP_ARGS(ctx, fpos, wanted, got), |
631 | |
632 | TP_STRUCT__entry( |
633 | __field(void *, ctx) |
634 | __field(u64, fpos) |
635 | __field(u64, wanted) |
636 | __field(u64, got) |
637 | ), |
638 | |
639 | TP_fast_assign( |
640 | __entry->ctx = ctx; |
641 | __entry->fpos = fpos; |
642 | __entry->wanted = wanted; |
643 | __entry->got = got; |
644 | ), |
645 | |
646 | TP_printk("ring %p, fpos %lld, wanted %lld, got %lld", |
647 | __entry->ctx, __entry->fpos, |
648 | __entry->wanted, __entry->got) |
649 | ); |
650 | |
651 | /* |
652 | * io_uring_local_work_run - ran ring local task work |
653 | * |
654 | * @tctx: pointer to a io_uring_ctx |
655 | * @count: how many functions it ran |
656 | * @loops: how many loops it ran |
657 | * |
658 | */ |
659 | TRACE_EVENT(io_uring_local_work_run, |
660 | |
661 | TP_PROTO(void *ctx, int count, unsigned int loops), |
662 | |
663 | TP_ARGS(ctx, count, loops), |
664 | |
665 | TP_STRUCT__entry ( |
666 | __field(void *, ctx ) |
667 | __field(int, count ) |
668 | __field(unsigned int, loops ) |
669 | ), |
670 | |
671 | TP_fast_assign( |
672 | __entry->ctx = ctx; |
673 | __entry->count = count; |
674 | __entry->loops = loops; |
675 | ), |
676 | |
677 | TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops) |
678 | ); |
679 | |
680 | #endif /* _TRACE_IO_URING_H */ |
681 | |
682 | /* This part must be outside protection */ |
683 | #include <trace/define_trace.h> |
684 |
Warning: This file is not a C or C++ file. It does not have highlighting.