| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_RING_BUFFER_H |
| 3 | #define _LINUX_RING_BUFFER_H |
| 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/seq_file.h> |
| 7 | #include <linux/poll.h> |
| 8 | |
| 9 | #include <uapi/linux/trace_mmap.h> |
| 10 | |
| 11 | struct trace_buffer; |
| 12 | struct ring_buffer_iter; |
| 13 | |
| 14 | /* |
| 15 | * Don't refer to this struct directly, use functions below. |
| 16 | */ |
| 17 | struct ring_buffer_event { |
| 18 | u32 type_len:5, time_delta:27; |
| 19 | |
| 20 | u32 array[]; |
| 21 | }; |
| 22 | |
| 23 | /** |
| 24 | * enum ring_buffer_type - internal ring buffer types |
| 25 | * |
| 26 | * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event |
| 27 | * If time_delta is 0: |
| 28 | * array is ignored |
| 29 | * size is variable depending on how much |
| 30 | * padding is needed |
| 31 | * If time_delta is non zero: |
| 32 | * array[0] holds the actual length |
| 33 | * size = 4 + length (bytes) |
| 34 | * |
| 35 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
| 36 | * array[0] = time delta (28 .. 59) |
| 37 | * size = 8 bytes |
| 38 | * |
| 39 | * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp |
| 40 | * Same format as TIME_EXTEND except that the |
| 41 | * value is an absolute timestamp, not a delta |
| 42 | * event.time_delta contains bottom 27 bits |
| 43 | * array[0] = top (28 .. 59) bits |
| 44 | * size = 8 bytes |
| 45 | * |
| 46 | * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: |
| 47 | * Data record |
| 48 | * If type_len is zero: |
| 49 | * array[0] holds the actual length |
| 50 | * array[1..(length+3)/4] holds data |
| 51 | * size = 4 + length (bytes) |
| 52 | * else |
| 53 | * length = type_len << 2 |
| 54 | * array[0..(length+3)/4-1] holds data |
| 55 | * size = 4 + length (bytes) |
| 56 | */ |
| 57 | enum ring_buffer_type { |
| 58 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, |
| 59 | RINGBUF_TYPE_PADDING, |
| 60 | RINGBUF_TYPE_TIME_EXTEND, |
| 61 | RINGBUF_TYPE_TIME_STAMP, |
| 62 | }; |
| 63 | |
| 64 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); |
| 65 | void *ring_buffer_event_data(struct ring_buffer_event *event); |
| 66 | u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, |
| 67 | struct ring_buffer_event *event); |
| 68 | |
| 69 | /* |
| 70 | * ring_buffer_discard_commit will remove an event that has not |
| 71 | * been committed yet. If this is used, then ring_buffer_unlock_commit |
| 72 | * must not be called on the discarded event. This function |
| 73 | * will try to remove the event from the ring buffer completely |
| 74 | * if another event has not been written after it. |
| 75 | * |
| 76 | * Example use: |
| 77 | * |
| 78 | * if (some_condition) |
| 79 | * ring_buffer_discard_commit(buffer, event); |
| 80 | * else |
| 81 | * ring_buffer_unlock_commit(buffer, event); |
| 82 | */ |
| 83 | void ring_buffer_discard_commit(struct trace_buffer *buffer, |
| 84 | struct ring_buffer_event *event); |
| 85 | |
| 86 | /* |
| 87 | * size is in bytes for each per CPU buffer. |
| 88 | */ |
| 89 | struct trace_buffer * |
| 90 | __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); |
| 91 | |
| 92 | struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags, |
| 93 | int order, unsigned long start, |
| 94 | unsigned long range_size, |
| 95 | unsigned long scratch_size, |
| 96 | struct lock_class_key *key); |
| 97 | |
| 98 | void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size); |
| 99 | |
| 100 | /* |
| 101 | * Because the ring buffer is generic, if other users of the ring buffer get |
| 102 | * traced by ftrace, it can produce lockdep warnings. We need to keep each |
| 103 | * ring buffer's lock class separate. |
| 104 | */ |
| 105 | #define ring_buffer_alloc(size, flags) \ |
| 106 | ({ \ |
| 107 | static struct lock_class_key __key; \ |
| 108 | __ring_buffer_alloc((size), (flags), &__key); \ |
| 109 | }) |
| 110 | |
| 111 | /* |
| 112 | * Because the ring buffer is generic, if other users of the ring buffer get |
| 113 | * traced by ftrace, it can produce lockdep warnings. We need to keep each |
| 114 | * ring buffer's lock class separate. |
| 115 | */ |
| 116 | #define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size) \ |
| 117 | ({ \ |
| 118 | static struct lock_class_key __key; \ |
| 119 | __ring_buffer_alloc_range((size), (flags), (order), (start), \ |
| 120 | (range_size), (s_size), &__key); \ |
| 121 | }) |
| 122 | |
| 123 | typedef bool (*ring_buffer_cond_fn)(void *data); |
| 124 | int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, |
| 125 | ring_buffer_cond_fn cond, void *data); |
| 126 | __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, |
| 127 | struct file *filp, poll_table *poll_table, int full); |
| 128 | void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu); |
| 129 | |
| 130 | #define RING_BUFFER_ALL_CPUS -1 |
| 131 | |
| 132 | void ring_buffer_free(struct trace_buffer *buffer); |
| 133 | |
| 134 | int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu); |
| 135 | |
| 136 | void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); |
| 137 | |
| 138 | struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, |
| 139 | unsigned long length); |
| 140 | int ring_buffer_unlock_commit(struct trace_buffer *buffer); |
| 141 | int ring_buffer_write(struct trace_buffer *buffer, |
| 142 | unsigned long length, void *data); |
| 143 | |
| 144 | void ring_buffer_nest_start(struct trace_buffer *buffer); |
| 145 | void ring_buffer_nest_end(struct trace_buffer *buffer); |
| 146 | |
| 147 | struct ring_buffer_event * |
| 148 | ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, |
| 149 | unsigned long *lost_events); |
| 150 | struct ring_buffer_event * |
| 151 | ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, |
| 152 | unsigned long *lost_events); |
| 153 | |
| 154 | struct ring_buffer_iter * |
| 155 | ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags); |
| 156 | void ring_buffer_read_prepare_sync(void); |
| 157 | void ring_buffer_read_start(struct ring_buffer_iter *iter); |
| 158 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); |
| 159 | |
| 160 | struct ring_buffer_event * |
| 161 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); |
| 162 | void ring_buffer_iter_advance(struct ring_buffer_iter *iter); |
| 163 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter); |
| 164 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter); |
| 165 | bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); |
| 166 | |
| 167 | unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); |
| 168 | unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer); |
| 169 | |
| 170 | void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); |
| 171 | void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); |
| 172 | void ring_buffer_reset(struct trace_buffer *buffer); |
| 173 | |
| 174 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP |
| 175 | int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, |
| 176 | struct trace_buffer *buffer_b, int cpu); |
| 177 | #else |
| 178 | static inline int |
| 179 | ring_buffer_swap_cpu(struct trace_buffer *buffer_a, |
| 180 | struct trace_buffer *buffer_b, int cpu) |
| 181 | { |
| 182 | return -ENODEV; |
| 183 | } |
| 184 | #endif |
| 185 | |
| 186 | bool ring_buffer_empty(struct trace_buffer *buffer); |
| 187 | bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); |
| 188 | |
| 189 | void ring_buffer_record_disable(struct trace_buffer *buffer); |
| 190 | void ring_buffer_record_enable(struct trace_buffer *buffer); |
| 191 | void ring_buffer_record_off(struct trace_buffer *buffer); |
| 192 | void ring_buffer_record_on(struct trace_buffer *buffer); |
| 193 | bool ring_buffer_record_is_on(struct trace_buffer *buffer); |
| 194 | bool ring_buffer_record_is_set_on(struct trace_buffer *buffer); |
| 195 | bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu); |
| 196 | void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu); |
| 197 | void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu); |
| 198 | |
| 199 | u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu); |
| 200 | unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu); |
| 201 | unsigned long ring_buffer_entries(struct trace_buffer *buffer); |
| 202 | unsigned long ring_buffer_overruns(struct trace_buffer *buffer); |
| 203 | unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu); |
| 204 | unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu); |
| 205 | unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu); |
| 206 | unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); |
| 207 | unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); |
| 208 | |
| 209 | u64 ring_buffer_time_stamp(struct trace_buffer *buffer); |
| 210 | void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, |
| 211 | int cpu, u64 *ts); |
| 212 | void ring_buffer_set_clock(struct trace_buffer *buffer, |
| 213 | u64 (*clock)(void)); |
| 214 | void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); |
| 215 | bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); |
| 216 | |
| 217 | size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); |
| 218 | |
| 219 | struct buffer_data_read_page; |
| 220 | struct buffer_data_read_page * |
| 221 | ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); |
| 222 | void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, |
| 223 | struct buffer_data_read_page *page); |
| 224 | int ring_buffer_read_page(struct trace_buffer *buffer, |
| 225 | struct buffer_data_read_page *data_page, |
| 226 | size_t len, int cpu, int full); |
| 227 | void *ring_buffer_read_page_data(struct buffer_data_read_page *page); |
| 228 | |
| 229 | struct trace_seq; |
| 230 | |
| 231 | int (struct trace_seq *s); |
| 232 | int (struct trace_buffer *buffer, struct trace_seq *s); |
| 233 | |
| 234 | int ring_buffer_subbuf_order_get(struct trace_buffer *buffer); |
| 235 | int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order); |
| 236 | int ring_buffer_subbuf_size_get(struct trace_buffer *buffer); |
| 237 | |
| 238 | enum ring_buffer_flags { |
| 239 | RB_FL_OVERWRITE = 1 << 0, |
| 240 | }; |
| 241 | |
| 242 | #ifdef CONFIG_RING_BUFFER |
| 243 | int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); |
| 244 | #else |
| 245 | #define trace_rb_cpu_prepare NULL |
| 246 | #endif |
| 247 | |
| 248 | int ring_buffer_map(struct trace_buffer *buffer, int cpu, |
| 249 | struct vm_area_struct *vma); |
| 250 | int ring_buffer_unmap(struct trace_buffer *buffer, int cpu); |
| 251 | int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu); |
| 252 | #endif /* _LINUX_RING_BUFFER_H */ |
| 253 | |