| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2023 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include "xe_devcoredump.h" |
| 7 | #include "xe_devcoredump_types.h" |
| 8 | |
| 9 | #include <linux/ascii85.h> |
| 10 | #include <linux/devcoredump.h> |
| 11 | #include <generated/utsrelease.h> |
| 12 | |
| 13 | #include <drm/drm_managed.h> |
| 14 | |
| 15 | #include "xe_device.h" |
| 16 | #include "xe_exec_queue.h" |
| 17 | #include "xe_force_wake.h" |
| 18 | #include "xe_gt.h" |
| 19 | #include "xe_gt_printk.h" |
| 20 | #include "xe_guc_capture.h" |
| 21 | #include "xe_guc_ct.h" |
| 22 | #include "xe_guc_log.h" |
| 23 | #include "xe_guc_submit.h" |
| 24 | #include "xe_hw_engine.h" |
| 25 | #include "xe_module.h" |
| 26 | #include "xe_pm.h" |
| 27 | #include "xe_sched_job.h" |
| 28 | #include "xe_vm.h" |
| 29 | |
| 30 | /** |
| 31 | * DOC: Xe device coredump |
| 32 | * |
| 33 | * Xe uses dev_coredump infrastructure for exposing the crash errors in a |
| 34 | * standardized way. Once a crash occurs, devcoredump exposes a temporary |
| 35 | * node under ``/sys/class/devcoredump/devcd<m>/``. The same node is also |
| 36 | * accessible in ``/sys/class/drm/card<n>/device/devcoredump/``. The |
| 37 | * ``failing_device`` symlink points to the device that crashed and created the |
| 38 | * coredump. |
| 39 | * |
| 40 | * The following characteristics are observed by xe when creating a device |
| 41 | * coredump: |
| 42 | * |
| 43 | * **Snapshot at hang**: |
| 44 | * The 'data' file contains a snapshot of the HW and driver states at the time |
| 45 | * the hang happened. Due to the driver recovering from resets/crashes, it may |
| 46 | * not correspond to the state of the system when the file is read by |
| 47 | * userspace. |
| 48 | * |
| 49 | * **Coredump release**: |
| 50 | * After a coredump is generated, it stays in kernel memory until released by |
| 51 | * userspace by writing anything to it, or after an internal timer expires. The |
| 52 | * exact timeout may vary and should not be relied upon. Example to release |
| 53 | * a coredump: |
| 54 | * |
| 55 | * .. code-block:: shell |
| 56 | * |
| 57 | * $ > /sys/class/drm/card0/device/devcoredump/data |
| 58 | * |
| 59 | * **First failure only**: |
| 60 | * In general, the first hang is the most critical one since the following |
| 61 | * hangs can be a consequence of the initial hang. For this reason a snapshot |
| 62 | * is taken only for the first failure. Until the devcoredump is released by |
| 63 | * userspace or kernel, all subsequent hangs do not override the snapshot nor |
| 64 | * create new ones. Devcoredump has a delayed work queue that will eventually |
| 65 | * delete the file node and free all the dump information. |
| 66 | */ |
| 67 | |
| 68 | #ifdef CONFIG_DEV_COREDUMP |
| 69 | |
| 70 | /* 1 hour timeout */ |
| 71 | #define XE_COREDUMP_TIMEOUT_JIFFIES (60 * 60 * HZ) |
| 72 | |
| 73 | static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump) |
| 74 | { |
| 75 | return container_of(coredump, struct xe_device, devcoredump); |
| 76 | } |
| 77 | |
| 78 | static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q) |
| 79 | { |
| 80 | return &q->gt->uc.guc; |
| 81 | } |
| 82 | |
| 83 | static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count, |
| 84 | ssize_t start, |
| 85 | struct xe_devcoredump *coredump) |
| 86 | { |
| 87 | struct xe_device *xe; |
| 88 | struct xe_devcoredump_snapshot *ss; |
| 89 | struct drm_printer p; |
| 90 | struct drm_print_iterator iter; |
| 91 | struct timespec64 ts; |
| 92 | int i; |
| 93 | |
| 94 | xe = coredump_to_xe(coredump); |
| 95 | ss = &coredump->snapshot; |
| 96 | |
| 97 | iter.data = buffer; |
| 98 | iter.start = start; |
| 99 | iter.remain = count; |
| 100 | |
| 101 | p = drm_coredump_printer(iter: &iter); |
| 102 | |
| 103 | drm_puts(p: &p, str: "**** Xe Device Coredump ****\n" ); |
| 104 | drm_printf(p: &p, f: "Reason: %s\n" , ss->reason); |
| 105 | drm_puts(p: &p, str: "kernel: " UTS_RELEASE "\n" ); |
| 106 | drm_puts(p: &p, str: "module: " KBUILD_MODNAME "\n" ); |
| 107 | |
| 108 | ts = ktime_to_timespec64(ss->snapshot_time); |
| 109 | drm_printf(p: &p, f: "Snapshot time: %lld.%09ld\n" , ts.tv_sec, ts.tv_nsec); |
| 110 | ts = ktime_to_timespec64(ss->boot_time); |
| 111 | drm_printf(p: &p, f: "Uptime: %lld.%09ld\n" , ts.tv_sec, ts.tv_nsec); |
| 112 | drm_printf(p: &p, f: "Process: %s [%d]\n" , ss->process_name, ss->pid); |
| 113 | xe_device_snapshot_print(xe, p: &p); |
| 114 | |
| 115 | drm_printf(p: &p, f: "\n**** GT #%d ****\n" , ss->gt->info.id); |
| 116 | drm_printf(p: &p, f: "\tTile: %d\n" , ss->gt->tile->id); |
| 117 | |
| 118 | drm_puts(p: &p, str: "\n**** GuC Log ****\n" ); |
| 119 | xe_guc_log_snapshot_print(snapshot: ss->guc.log, p: &p); |
| 120 | drm_puts(p: &p, str: "\n**** GuC CT ****\n" ); |
| 121 | xe_guc_ct_snapshot_print(snapshot: ss->guc.ct, p: &p); |
| 122 | |
| 123 | drm_puts(p: &p, str: "\n**** Contexts ****\n" ); |
| 124 | xe_guc_exec_queue_snapshot_print(snapshot: ss->ge, p: &p); |
| 125 | |
| 126 | drm_puts(p: &p, str: "\n**** Job ****\n" ); |
| 127 | xe_sched_job_snapshot_print(snapshot: ss->job, p: &p); |
| 128 | |
| 129 | drm_puts(p: &p, str: "\n**** HW Engines ****\n" ); |
| 130 | for (i = 0; i < XE_NUM_HW_ENGINES; i++) |
| 131 | if (ss->hwe[i]) |
| 132 | xe_engine_snapshot_print(snapshot: ss->hwe[i], p: &p); |
| 133 | |
| 134 | drm_puts(p: &p, str: "\n**** VM state ****\n" ); |
| 135 | xe_vm_snapshot_print(snap: ss->vm, p: &p); |
| 136 | |
| 137 | return count - iter.remain; |
| 138 | } |
| 139 | |
| 140 | static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) |
| 141 | { |
| 142 | int i; |
| 143 | |
| 144 | kfree(objp: ss->reason); |
| 145 | ss->reason = NULL; |
| 146 | |
| 147 | xe_guc_log_snapshot_free(snapshot: ss->guc.log); |
| 148 | ss->guc.log = NULL; |
| 149 | |
| 150 | xe_guc_ct_snapshot_free(snapshot: ss->guc.ct); |
| 151 | ss->guc.ct = NULL; |
| 152 | |
| 153 | xe_guc_capture_put_matched_nodes(guc: &ss->gt->uc.guc); |
| 154 | ss->matched_node = NULL; |
| 155 | |
| 156 | xe_guc_exec_queue_snapshot_free(snapshot: ss->ge); |
| 157 | ss->ge = NULL; |
| 158 | |
| 159 | xe_sched_job_snapshot_free(snapshot: ss->job); |
| 160 | ss->job = NULL; |
| 161 | |
| 162 | for (i = 0; i < XE_NUM_HW_ENGINES; i++) |
| 163 | if (ss->hwe[i]) { |
| 164 | xe_hw_engine_snapshot_free(snapshot: ss->hwe[i]); |
| 165 | ss->hwe[i] = NULL; |
| 166 | } |
| 167 | |
| 168 | xe_vm_snapshot_free(snap: ss->vm); |
| 169 | ss->vm = NULL; |
| 170 | } |
| 171 | |
| 172 | #define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G) |
| 173 | |
| 174 | static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, |
| 175 | size_t count, void *data, size_t datalen) |
| 176 | { |
| 177 | struct xe_devcoredump *coredump = data; |
| 178 | struct xe_devcoredump_snapshot *ss; |
| 179 | ssize_t byte_copied; |
| 180 | u32 chunk_offset; |
| 181 | ssize_t new_chunk_position; |
| 182 | |
| 183 | if (!coredump) |
| 184 | return -ENODEV; |
| 185 | |
| 186 | ss = &coredump->snapshot; |
| 187 | |
| 188 | /* Ensure delayed work is captured before continuing */ |
| 189 | flush_work(work: &ss->work); |
| 190 | |
| 191 | if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) |
| 192 | xe_pm_runtime_get(gt_to_xe(ss->gt)); |
| 193 | |
| 194 | mutex_lock(&coredump->lock); |
| 195 | |
| 196 | if (!ss->read.buffer) { |
| 197 | mutex_unlock(lock: &coredump->lock); |
| 198 | return -ENODEV; |
| 199 | } |
| 200 | |
| 201 | if (offset >= ss->read.size) { |
| 202 | mutex_unlock(lock: &coredump->lock); |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | new_chunk_position = div_u64_rem(dividend: offset, |
| 207 | XE_DEVCOREDUMP_CHUNK_MAX, |
| 208 | remainder: &chunk_offset); |
| 209 | |
| 210 | if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX || |
| 211 | offset < ss->read.chunk_position) { |
| 212 | ss->read.chunk_position = new_chunk_position * |
| 213 | XE_DEVCOREDUMP_CHUNK_MAX; |
| 214 | |
| 215 | __xe_devcoredump_read(buffer: ss->read.buffer, |
| 216 | XE_DEVCOREDUMP_CHUNK_MAX, |
| 217 | start: ss->read.chunk_position, coredump); |
| 218 | } |
| 219 | |
| 220 | byte_copied = count < ss->read.size - offset ? count : |
| 221 | ss->read.size - offset; |
| 222 | memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied); |
| 223 | |
| 224 | mutex_unlock(lock: &coredump->lock); |
| 225 | |
| 226 | if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) |
| 227 | xe_pm_runtime_put(gt_to_xe(ss->gt)); |
| 228 | |
| 229 | return byte_copied; |
| 230 | } |
| 231 | |
| 232 | static void xe_devcoredump_free(void *data) |
| 233 | { |
| 234 | struct xe_devcoredump *coredump = data; |
| 235 | |
| 236 | /* Our device is gone. Nothing to do... */ |
| 237 | if (!data || !coredump_to_xe(coredump)) |
| 238 | return; |
| 239 | |
| 240 | cancel_work_sync(work: &coredump->snapshot.work); |
| 241 | |
| 242 | mutex_lock(&coredump->lock); |
| 243 | |
| 244 | xe_devcoredump_snapshot_free(ss: &coredump->snapshot); |
| 245 | kvfree(addr: coredump->snapshot.read.buffer); |
| 246 | |
| 247 | /* To prevent stale data on next snapshot, clear everything */ |
| 248 | memset(&coredump->snapshot, 0, sizeof(coredump->snapshot)); |
| 249 | coredump->captured = false; |
| 250 | drm_info(&coredump_to_xe(coredump)->drm, |
| 251 | "Xe device coredump has been deleted.\n" ); |
| 252 | |
| 253 | mutex_unlock(lock: &coredump->lock); |
| 254 | } |
| 255 | |
| 256 | static void xe_devcoredump_deferred_snap_work(struct work_struct *work) |
| 257 | { |
| 258 | struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); |
| 259 | struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); |
| 260 | struct xe_device *xe = coredump_to_xe(coredump); |
| 261 | unsigned int fw_ref; |
| 262 | |
| 263 | /* |
| 264 | * NB: Despite passing a GFP_ flags parameter here, more allocations are done |
| 265 | * internally using GFP_KERNEL explicitly. Hence this call must be in the worker |
| 266 | * thread and not in the initial capture call. |
| 267 | */ |
| 268 | dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, data: coredump, datalen: 0, GFP_KERNEL, |
| 269 | read: xe_devcoredump_read, free: xe_devcoredump_free, |
| 270 | XE_COREDUMP_TIMEOUT_JIFFIES); |
| 271 | |
| 272 | xe_pm_runtime_get(xe); |
| 273 | |
| 274 | /* keep going if fw fails as we still want to save the memory and SW data */ |
| 275 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt: ss->gt), domains: XE_FORCEWAKE_ALL); |
| 276 | if (!xe_force_wake_ref_has_domain(fw_ref, domain: XE_FORCEWAKE_ALL)) |
| 277 | xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n" ); |
| 278 | xe_vm_snapshot_capture_delayed(snap: ss->vm); |
| 279 | xe_guc_exec_queue_snapshot_capture_delayed(snapshot: ss->ge); |
| 280 | xe_force_wake_put(fw: gt_to_fw(gt: ss->gt), fw_ref); |
| 281 | |
| 282 | ss->read.chunk_position = 0; |
| 283 | |
| 284 | /* Calculate devcoredump size */ |
| 285 | ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, start: 0, coredump); |
| 286 | |
| 287 | if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) { |
| 288 | ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX, |
| 289 | GFP_USER); |
| 290 | if (!ss->read.buffer) |
| 291 | goto put_pm; |
| 292 | |
| 293 | __xe_devcoredump_read(buffer: ss->read.buffer, |
| 294 | XE_DEVCOREDUMP_CHUNK_MAX, |
| 295 | start: 0, coredump); |
| 296 | } else { |
| 297 | ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); |
| 298 | if (!ss->read.buffer) |
| 299 | goto put_pm; |
| 300 | |
| 301 | __xe_devcoredump_read(buffer: ss->read.buffer, count: ss->read.size, start: 0, |
| 302 | coredump); |
| 303 | xe_devcoredump_snapshot_free(ss); |
| 304 | } |
| 305 | |
| 306 | put_pm: |
| 307 | xe_pm_runtime_put(xe); |
| 308 | } |
| 309 | |
| 310 | static void devcoredump_snapshot(struct xe_devcoredump *coredump, |
| 311 | struct xe_exec_queue *q, |
| 312 | struct xe_sched_job *job) |
| 313 | { |
| 314 | struct xe_devcoredump_snapshot *ss = &coredump->snapshot; |
| 315 | struct xe_guc *guc = exec_queue_to_guc(q); |
| 316 | u32 adj_logical_mask = q->logical_mask; |
| 317 | u32 width_mask = (0x1 << q->width) - 1; |
| 318 | const char *process_name = "no process" ; |
| 319 | |
| 320 | unsigned int fw_ref; |
| 321 | bool cookie; |
| 322 | int i; |
| 323 | |
| 324 | ss->snapshot_time = ktime_get_real(); |
| 325 | ss->boot_time = ktime_get_boottime(); |
| 326 | |
| 327 | if (q->vm && q->vm->xef) { |
| 328 | process_name = q->vm->xef->process_name; |
| 329 | ss->pid = q->vm->xef->pid; |
| 330 | } |
| 331 | |
| 332 | strscpy(ss->process_name, process_name); |
| 333 | |
| 334 | ss->gt = q->gt; |
| 335 | INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); |
| 336 | |
| 337 | cookie = dma_fence_begin_signalling(); |
| 338 | for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { |
| 339 | if (adj_logical_mask & BIT(i)) { |
| 340 | adj_logical_mask |= width_mask << i; |
| 341 | i += q->width; |
| 342 | } else { |
| 343 | ++i; |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | /* keep going if fw fails as we still want to save the memory and SW data */ |
| 348 | fw_ref = xe_force_wake_get(fw: gt_to_fw(gt: q->gt), domains: XE_FORCEWAKE_ALL); |
| 349 | |
| 350 | ss->guc.log = xe_guc_log_snapshot_capture(log: &guc->log, atomic: true); |
| 351 | ss->guc.ct = xe_guc_ct_snapshot_capture(ct: &guc->ct); |
| 352 | ss->ge = xe_guc_exec_queue_snapshot_capture(q); |
| 353 | if (job) |
| 354 | ss->job = xe_sched_job_snapshot_capture(job); |
| 355 | ss->vm = xe_vm_snapshot_capture(vm: q->vm); |
| 356 | |
| 357 | xe_engine_snapshot_capture_for_queue(q); |
| 358 | |
| 359 | queue_work(wq: system_unbound_wq, work: &ss->work); |
| 360 | |
| 361 | xe_force_wake_put(fw: gt_to_fw(gt: q->gt), fw_ref); |
| 362 | dma_fence_end_signalling(cookie); |
| 363 | } |
| 364 | |
| 365 | /** |
| 366 | * xe_devcoredump - Take the required snapshots and initialize coredump device. |
| 367 | * @q: The faulty xe_exec_queue, where the issue was detected. |
| 368 | * @job: The faulty xe_sched_job, where the issue was detected. |
| 369 | * @fmt: Printf format + args to describe the reason for the core dump |
| 370 | * |
| 371 | * This function should be called at the crash time within the serialized |
| 372 | * gt_reset. It is skipped if we still have the core dump device available |
| 373 | * with the information of the 'first' snapshot. |
| 374 | */ |
| 375 | __printf(3, 4) |
| 376 | void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...) |
| 377 | { |
| 378 | struct xe_device *xe = gt_to_xe(q->gt); |
| 379 | struct xe_devcoredump *coredump = &xe->devcoredump; |
| 380 | va_list varg; |
| 381 | |
| 382 | mutex_lock(&coredump->lock); |
| 383 | |
| 384 | if (coredump->captured) { |
| 385 | drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n" ); |
| 386 | mutex_unlock(lock: &coredump->lock); |
| 387 | return; |
| 388 | } |
| 389 | |
| 390 | coredump->captured = true; |
| 391 | |
| 392 | va_start(varg, fmt); |
| 393 | coredump->snapshot.reason = kvasprintf(GFP_ATOMIC, fmt, args: varg); |
| 394 | va_end(varg); |
| 395 | |
| 396 | devcoredump_snapshot(coredump, q, job); |
| 397 | |
| 398 | drm_info(&xe->drm, "Xe device coredump has been created\n" ); |
| 399 | drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n" , |
| 400 | xe->drm.primary->index); |
| 401 | |
| 402 | mutex_unlock(lock: &coredump->lock); |
| 403 | } |
| 404 | |
| 405 | static void xe_driver_devcoredump_fini(void *arg) |
| 406 | { |
| 407 | struct drm_device *drm = arg; |
| 408 | |
| 409 | dev_coredump_put(dev: drm->dev); |
| 410 | } |
| 411 | |
| 412 | int xe_devcoredump_init(struct xe_device *xe) |
| 413 | { |
| 414 | int err; |
| 415 | |
| 416 | err = drmm_mutex_init(&xe->drm, &xe->devcoredump.lock); |
| 417 | if (err) |
| 418 | return err; |
| 419 | |
| 420 | if (IS_ENABLED(CONFIG_LOCKDEP)) { |
| 421 | fs_reclaim_acquire(GFP_KERNEL); |
| 422 | might_lock(&xe->devcoredump.lock); |
| 423 | fs_reclaim_release(GFP_KERNEL); |
| 424 | } |
| 425 | |
| 426 | return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm); |
| 427 | } |
| 428 | |
| 429 | #endif |
| 430 | |
| 431 | /** |
| 432 | * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85 |
| 433 | * |
| 434 | * The output is split into multiple calls to drm_puts() because some print |
| 435 | * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may |
| 436 | * add newlines, as is the case with dmesg: each drm_puts() call creates a |
| 437 | * separate line. |
| 438 | * |
| 439 | * There is also a scheduler yield call to prevent the 'task has been stuck for |
| 440 | * 120s' kernel hang check feature from firing when printing to a slow target |
| 441 | * such as dmesg over a serial port. |
| 442 | * |
| 443 | * @p: the printer object to output to |
| 444 | * @prefix: optional prefix to add to output string |
| 445 | * @suffix: optional suffix to add at the end. 0 disables it and is |
| 446 | * not added to the output, which is useful when using multiple calls |
| 447 | * to dump data to @p |
| 448 | * @blob: the Binary Large OBject to dump out |
| 449 | * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32) |
| 450 | * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32) |
| 451 | */ |
| 452 | void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix, |
| 453 | const void *blob, size_t offset, size_t size) |
| 454 | { |
| 455 | const u32 *blob32 = (const u32 *)blob; |
| 456 | char buff[ASCII85_BUFSZ], *line_buff; |
| 457 | size_t line_pos = 0; |
| 458 | |
| 459 | #define DMESG_MAX_LINE_LEN 800 |
| 460 | /* Always leave space for the suffix char and the \0 */ |
| 461 | #define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */ |
| 462 | |
| 463 | if (size & 3) |
| 464 | drm_printf(p, f: "Size not word aligned: %zu" , size); |
| 465 | if (offset & 3) |
| 466 | drm_printf(p, f: "Offset not word aligned: %zu" , offset); |
| 467 | |
| 468 | line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC); |
| 469 | if (!line_buff) { |
| 470 | drm_printf(p, f: "Failed to allocate line buffer\n" ); |
| 471 | return; |
| 472 | } |
| 473 | |
| 474 | blob32 += offset / sizeof(*blob32); |
| 475 | size /= sizeof(*blob32); |
| 476 | |
| 477 | if (prefix) { |
| 478 | strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2); |
| 479 | line_pos = strlen(line_buff); |
| 480 | |
| 481 | line_buff[line_pos++] = ':'; |
| 482 | line_buff[line_pos++] = ' '; |
| 483 | } |
| 484 | |
| 485 | while (size--) { |
| 486 | u32 val = *(blob32++); |
| 487 | |
| 488 | strscpy(line_buff + line_pos, ascii85_encode(val, buff), |
| 489 | DMESG_MAX_LINE_LEN - line_pos); |
| 490 | line_pos += strlen(line_buff + line_pos); |
| 491 | |
| 492 | if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) { |
| 493 | line_buff[line_pos++] = 0; |
| 494 | |
| 495 | drm_puts(p, str: line_buff); |
| 496 | |
| 497 | line_pos = 0; |
| 498 | |
| 499 | /* Prevent 'stuck thread' time out errors */ |
| 500 | cond_resched(); |
| 501 | } |
| 502 | } |
| 503 | |
| 504 | if (suffix) |
| 505 | line_buff[line_pos++] = suffix; |
| 506 | |
| 507 | if (line_pos) { |
| 508 | line_buff[line_pos++] = 0; |
| 509 | drm_puts(p, str: line_buff); |
| 510 | } |
| 511 | |
| 512 | kfree(objp: line_buff); |
| 513 | |
| 514 | #undef MIN_SPACE |
| 515 | #undef DMESG_MAX_LINE_LEN |
| 516 | } |
| 517 | |