1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* Copyright(c) 2023 Intel Corporation */ |
3 | |
4 | #include <linux/bitfield.h> |
5 | #include <linux/debugfs.h> |
6 | #include <linux/kernel.h> |
7 | |
8 | #include "adf_accel_devices.h" |
9 | #include "adf_admin.h" |
10 | #include "adf_common_drv.h" |
11 | #include "adf_cnv_dbgfs.h" |
12 | #include "qat_compression.h" |
13 | |
14 | #define CNV_DEBUGFS_FILENAME "cnv_errors" |
15 | #define CNV_MIN_PADDING 16 |
16 | |
17 | #define CNV_ERR_INFO_MASK GENMASK(11, 0) |
18 | #define CNV_ERR_TYPE_MASK GENMASK(15, 12) |
19 | #define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 |
20 | #define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 |
21 | |
22 | enum cnv_error_type { |
23 | CNV_ERR_TYPE_NONE, |
24 | CNV_ERR_TYPE_CHECKSUM, |
25 | CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH, |
26 | CNV_ERR_TYPE_DECOMPRESSION, |
27 | CNV_ERR_TYPE_TRANSLATION, |
28 | CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH, |
29 | CNV_ERR_TYPE_UNKNOWN, |
30 | CNV_ERR_TYPES_COUNT |
31 | }; |
32 | |
33 | #define CNV_ERROR_TYPE_GET(latest_err) \ |
34 | min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN) |
35 | |
36 | #define CNV_GET_DELTA_ERR_INFO(latest_error) \ |
37 | sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX) |
38 | |
39 | #define CNV_GET_SLICE_ERR_INFO(latest_error) \ |
40 | sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX) |
41 | |
42 | #define CNV_GET_DEFAULT_ERR_INFO(latest_error) \ |
43 | u16_get_bits(latest_error, CNV_ERR_INFO_MASK) |
44 | |
45 | enum cnv_fields { |
46 | CNV_ERR_COUNT, |
47 | CNV_LATEST_ERR, |
48 | CNV_FIELDS_COUNT |
49 | }; |
50 | |
51 | static const char * const cnv_field_names[CNV_FIELDS_COUNT] = { |
52 | [CNV_ERR_COUNT] = "Total Errors", |
53 | [CNV_LATEST_ERR] = "Last Error", |
54 | }; |
55 | |
56 | static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = { |
57 | [CNV_ERR_TYPE_NONE] = "No Error", |
58 | [CNV_ERR_TYPE_CHECKSUM] = "Checksum Error", |
59 | [CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P", |
60 | [CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error", |
61 | [CNV_ERR_TYPE_TRANSLATION] = "Xlat Error", |
62 | [CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C", |
63 | [CNV_ERR_TYPE_UNKNOWN] = "Unknown Error", |
64 | }; |
65 | |
66 | struct ae_cnv_errors { |
67 | u16 ae; |
68 | u16 err_cnt; |
69 | u16 latest_err; |
70 | bool is_comp_ae; |
71 | }; |
72 | |
73 | struct cnv_err_stats { |
74 | u16 ae_count; |
75 | struct ae_cnv_errors ae_cnv_errors[]; |
76 | }; |
77 | |
78 | static s16 get_err_info(u8 error_type, u16 latest) |
79 | { |
80 | switch (error_type) { |
81 | case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH: |
82 | case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH: |
83 | return CNV_GET_DELTA_ERR_INFO(latest); |
84 | case CNV_ERR_TYPE_DECOMPRESSION: |
85 | case CNV_ERR_TYPE_TRANSLATION: |
86 | return CNV_GET_SLICE_ERR_INFO(latest); |
87 | default: |
88 | return CNV_GET_DEFAULT_ERR_INFO(latest); |
89 | } |
90 | } |
91 | |
92 | static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos) |
93 | { |
94 | struct cnv_err_stats *err_stats = sfile->private; |
95 | |
96 | if (*pos == 0) |
97 | return SEQ_START_TOKEN; |
98 | |
99 | if (*pos > err_stats->ae_count) |
100 | return NULL; |
101 | |
102 | return &err_stats->ae_cnv_errors[*pos - 1]; |
103 | } |
104 | |
105 | static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v, |
106 | loff_t *pos) |
107 | { |
108 | struct cnv_err_stats *err_stats = sfile->private; |
109 | |
110 | (*pos)++; |
111 | |
112 | if (*pos > err_stats->ae_count) |
113 | return NULL; |
114 | |
115 | return &err_stats->ae_cnv_errors[*pos - 1]; |
116 | } |
117 | |
118 | static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v) |
119 | { |
120 | } |
121 | |
122 | static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v) |
123 | { |
124 | struct ae_cnv_errors *ae_errors; |
125 | unsigned int i; |
126 | s16 err_info; |
127 | u8 err_type; |
128 | |
129 | if (v == SEQ_START_TOKEN) { |
130 | seq_puts(m: sfile, s: "AE "); |
131 | for (i = 0; i < CNV_FIELDS_COUNT; ++i) |
132 | seq_printf(m: sfile, fmt: " %*s", CNV_MIN_PADDING, |
133 | cnv_field_names[i]); |
134 | } else { |
135 | ae_errors = v; |
136 | |
137 | if (!ae_errors->is_comp_ae) |
138 | return 0; |
139 | |
140 | err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err); |
141 | err_info = get_err_info(error_type: err_type, latest: ae_errors->latest_err); |
142 | |
143 | seq_printf(m: sfile, fmt: "%d:", ae_errors->ae); |
144 | seq_printf(m: sfile, fmt: " %*d", CNV_MIN_PADDING, ae_errors->err_cnt); |
145 | seq_printf(m: sfile, fmt: "%*s [%d]", CNV_MIN_PADDING, |
146 | cnv_error_names[err_type], err_info); |
147 | } |
148 | seq_putc(m: sfile, c: '\n'); |
149 | |
150 | return 0; |
151 | } |
152 | |
153 | static const struct seq_operations qat_cnv_errors_sops = { |
154 | .start = qat_cnv_errors_seq_start, |
155 | .next = qat_cnv_errors_seq_next, |
156 | .stop = qat_cnv_errors_seq_stop, |
157 | .show = qat_cnv_errors_seq_show, |
158 | }; |
159 | |
160 | /** |
161 | * cnv_err_stats_alloc() - Get CNV stats for the provided device. |
162 | * @accel_dev: Pointer to a QAT acceleration device |
163 | * |
164 | * Allocates and populates table of CNV errors statistics for each non-admin AE |
165 | * available through the supplied acceleration device. The caller becomes the |
166 | * owner of such memory and is responsible for the deallocation through a call |
167 | * to kfree(). |
168 | * |
169 | * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success |
170 | * or a negative value on error. |
171 | */ |
172 | static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev) |
173 | { |
174 | struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); |
175 | struct cnv_err_stats *err_stats; |
176 | unsigned long ae_count; |
177 | unsigned long ae_mask; |
178 | size_t err_stats_size; |
179 | unsigned long ae; |
180 | unsigned int i; |
181 | u16 latest_err; |
182 | u16 err_cnt; |
183 | int ret; |
184 | |
185 | if (!adf_dev_started(accel_dev)) { |
186 | dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); |
187 | return ERR_PTR(error: -EBUSY); |
188 | } |
189 | |
190 | /* Ignore the admin AEs */ |
191 | ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask; |
192 | ae_count = hweight_long(w: ae_mask); |
193 | if (unlikely(!ae_count)) |
194 | return ERR_PTR(error: -EINVAL); |
195 | |
196 | err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count); |
197 | err_stats = kmalloc(err_stats_size, GFP_KERNEL); |
198 | if (!err_stats) |
199 | return ERR_PTR(error: -ENOMEM); |
200 | |
201 | err_stats->ae_count = ae_count; |
202 | |
203 | i = 0; |
204 | for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { |
205 | ret = adf_get_cnv_stats(accel_dev, ae, err_cnt: &err_cnt, latest_err: &latest_err); |
206 | if (ret) { |
207 | dev_dbg(&GET_DEV(accel_dev), |
208 | "Failed to get CNV stats for ae %ld, [%d].\n", |
209 | ae, ret); |
210 | err_stats->ae_cnv_errors[i++].is_comp_ae = false; |
211 | continue; |
212 | } |
213 | err_stats->ae_cnv_errors[i].is_comp_ae = true; |
214 | err_stats->ae_cnv_errors[i].latest_err = latest_err; |
215 | err_stats->ae_cnv_errors[i].err_cnt = err_cnt; |
216 | err_stats->ae_cnv_errors[i].ae = ae; |
217 | i++; |
218 | } |
219 | |
220 | return err_stats; |
221 | } |
222 | |
223 | static int qat_cnv_errors_file_open(struct inode *inode, struct file *file) |
224 | { |
225 | struct adf_accel_dev *accel_dev = inode->i_private; |
226 | struct seq_file *cnv_errors_seq_file; |
227 | struct cnv_err_stats *cnv_err_stats; |
228 | int ret; |
229 | |
230 | cnv_err_stats = cnv_err_stats_alloc(accel_dev); |
231 | if (IS_ERR(ptr: cnv_err_stats)) |
232 | return PTR_ERR(ptr: cnv_err_stats); |
233 | |
234 | ret = seq_open(file, &qat_cnv_errors_sops); |
235 | if (unlikely(ret)) { |
236 | kfree(objp: cnv_err_stats); |
237 | return ret; |
238 | } |
239 | |
240 | cnv_errors_seq_file = file->private_data; |
241 | cnv_errors_seq_file->private = cnv_err_stats; |
242 | return ret; |
243 | } |
244 | |
245 | static int qat_cnv_errors_file_release(struct inode *inode, struct file *file) |
246 | { |
247 | struct seq_file *cnv_errors_seq_file = file->private_data; |
248 | |
249 | kfree(objp: cnv_errors_seq_file->private); |
250 | cnv_errors_seq_file->private = NULL; |
251 | |
252 | return seq_release(inode, file); |
253 | } |
254 | |
255 | static const struct file_operations qat_cnv_fops = { |
256 | .owner = THIS_MODULE, |
257 | .open = qat_cnv_errors_file_open, |
258 | .read = seq_read, |
259 | .llseek = seq_lseek, |
260 | .release = qat_cnv_errors_file_release, |
261 | }; |
262 | |
263 | static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count, |
264 | loff_t *pos) |
265 | { |
266 | char *file_msg = "No engine configured for comp\n"; |
267 | |
268 | return simple_read_from_buffer(to: buf, count, ppos: pos, from: file_msg, |
269 | strlen(file_msg)); |
270 | } |
271 | |
272 | static const struct file_operations qat_cnv_no_comp_fops = { |
273 | .owner = THIS_MODULE, |
274 | .read = no_comp_file_read, |
275 | }; |
276 | |
277 | void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev) |
278 | { |
279 | const struct file_operations *fops; |
280 | void *data; |
281 | |
282 | if (adf_hw_dev_has_compression(accel_dev)) { |
283 | fops = &qat_cnv_fops; |
284 | data = accel_dev; |
285 | } else { |
286 | fops = &qat_cnv_no_comp_fops; |
287 | data = NULL; |
288 | } |
289 | |
290 | accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400, |
291 | accel_dev->debugfs_dir, |
292 | data, fops); |
293 | } |
294 | |
295 | void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev) |
296 | { |
297 | debugfs_remove(dentry: accel_dev->cnv_dbgfile); |
298 | accel_dev->cnv_dbgfile = NULL; |
299 | } |
300 |
Definitions
- cnv_error_type
- cnv_fields
- cnv_field_names
- cnv_error_names
- ae_cnv_errors
- cnv_err_stats
- get_err_info
- qat_cnv_errors_seq_start
- qat_cnv_errors_seq_next
- qat_cnv_errors_seq_stop
- qat_cnv_errors_seq_show
- qat_cnv_errors_sops
- cnv_err_stats_alloc
- qat_cnv_errors_file_open
- qat_cnv_errors_file_release
- qat_cnv_fops
- no_comp_file_read
- qat_cnv_no_comp_fops
- adf_cnv_dbgfs_add
Improve your Profiling and Debugging skills
Find out more