1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Provide a pstore intermediate backend, organized into kernel memory |
4 | * allocated zones that are then mapped and flushed into a single |
5 | * contiguous region on a storage backend of some kind (block, mtd, etc). |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/mount.h> |
14 | #include <linux/printk.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/pstore_zone.h> |
17 | #include <linux/kdev_t.h> |
18 | #include <linux/device.h> |
19 | #include <linux/namei.h> |
20 | #include <linux/fcntl.h> |
21 | #include <linux/uio.h> |
22 | #include <linux/writeback.h> |
23 | #include "internal.h" |
24 | |
25 | /** |
26 | * struct psz_buffer - header of zone to flush to storage |
27 | * |
28 | * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value) |
29 | * @datalen: length of data in @data |
30 | * @start: offset into @data where the beginning of the stored bytes begin |
31 | * @data: zone data. |
32 | */ |
33 | struct psz_buffer { |
34 | #define PSZ_SIG (0x43474244) /* DBGC */ |
35 | uint32_t sig; |
36 | atomic_t datalen; |
37 | atomic_t start; |
38 | uint8_t data[]; |
39 | }; |
40 | |
41 | /** |
42 | * struct psz_kmsg_header - kmsg dump-specific header to flush to storage |
43 | * |
44 | * @magic: magic num for kmsg dump header |
45 | * @time: kmsg dump trigger time |
46 | * @compressed: whether conpressed |
47 | * @counter: kmsg dump counter |
48 | * @reason: the kmsg dump reason (e.g. oops, panic, etc) |
49 | * @data: pointer to log data |
50 | * |
51 | * This is a sub-header for a kmsg dump, trailing after &psz_buffer. |
52 | */ |
53 | struct { |
54 | #define 0x4dfc3ae5 /* Just a random number */ |
55 | uint32_t ; |
56 | struct timespec64 ; |
57 | bool ; |
58 | uint32_t ; |
59 | enum kmsg_dump_reason ; |
60 | uint8_t []; |
61 | }; |
62 | |
63 | /** |
64 | * struct pstore_zone - single stored buffer |
65 | * |
66 | * @off: zone offset of storage |
67 | * @type: front-end type for this zone |
68 | * @name: front-end name for this zone |
69 | * @buffer: pointer to data buffer managed by this zone |
70 | * @oldbuf: pointer to old data buffer |
71 | * @buffer_size: bytes in @buffer->data |
72 | * @should_recover: whether this zone should recover from storage |
73 | * @dirty: whether the data in @buffer dirty |
74 | * |
75 | * zone structure in memory. |
76 | */ |
77 | struct pstore_zone { |
78 | loff_t off; |
79 | const char *name; |
80 | enum pstore_type_id type; |
81 | |
82 | struct psz_buffer *buffer; |
83 | struct psz_buffer *oldbuf; |
84 | size_t buffer_size; |
85 | bool should_recover; |
86 | atomic_t dirty; |
87 | }; |
88 | |
89 | /** |
90 | * struct psz_context - all about running state of pstore/zone |
91 | * |
92 | * @kpszs: kmsg dump storage zones |
93 | * @ppsz: pmsg storage zone |
94 | * @cpsz: console storage zone |
95 | * @fpszs: ftrace storage zones |
96 | * @kmsg_max_cnt: max count of @kpszs |
97 | * @kmsg_read_cnt: counter of total read kmsg dumps |
98 | * @kmsg_write_cnt: counter of total kmsg dump writes |
99 | * @pmsg_read_cnt: counter of total read pmsg zone |
100 | * @console_read_cnt: counter of total read console zone |
101 | * @ftrace_max_cnt: max count of @fpszs |
102 | * @ftrace_read_cnt: counter of max read ftrace zone |
103 | * @oops_counter: counter of oops dumps |
104 | * @panic_counter: counter of panic dumps |
105 | * @recovered: whether finished recovering data from storage |
106 | * @on_panic: whether panic is happening |
107 | * @pstore_zone_info_lock: lock to @pstore_zone_info |
108 | * @pstore_zone_info: information from backend |
109 | * @pstore: structure for pstore |
110 | */ |
111 | struct psz_context { |
112 | struct pstore_zone **kpszs; |
113 | struct pstore_zone *ppsz; |
114 | struct pstore_zone *cpsz; |
115 | struct pstore_zone **fpszs; |
116 | unsigned int kmsg_max_cnt; |
117 | unsigned int kmsg_read_cnt; |
118 | unsigned int kmsg_write_cnt; |
119 | unsigned int pmsg_read_cnt; |
120 | unsigned int console_read_cnt; |
121 | unsigned int ftrace_max_cnt; |
122 | unsigned int ftrace_read_cnt; |
123 | /* |
124 | * These counters should be calculated during recovery. |
125 | * It records the oops/panic times after crashes rather than boots. |
126 | */ |
127 | unsigned int oops_counter; |
128 | unsigned int panic_counter; |
129 | atomic_t recovered; |
130 | atomic_t on_panic; |
131 | |
132 | /* |
133 | * pstore_zone_info_lock protects this entire structure during calls |
134 | * to register_pstore_zone()/unregister_pstore_zone(). |
135 | */ |
136 | struct mutex pstore_zone_info_lock; |
137 | struct pstore_zone_info *pstore_zone_info; |
138 | struct pstore_info pstore; |
139 | }; |
140 | static struct psz_context pstore_zone_cxt; |
141 | |
142 | static void psz_flush_all_dirty_zones(struct work_struct *); |
143 | static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones); |
144 | |
145 | /** |
146 | * enum psz_flush_mode - flush mode for psz_zone_write() |
147 | * |
148 | * @FLUSH_NONE: do not flush to storage but update data on memory |
149 | * @FLUSH_PART: just flush part of data including meta data to storage |
150 | * @FLUSH_META: just flush meta data of zone to storage |
151 | * @FLUSH_ALL: flush all of zone |
152 | */ |
153 | enum psz_flush_mode { |
154 | FLUSH_NONE = 0, |
155 | FLUSH_PART, |
156 | FLUSH_META, |
157 | FLUSH_ALL, |
158 | }; |
159 | |
160 | static inline int buffer_datalen(struct pstore_zone *zone) |
161 | { |
162 | return atomic_read(v: &zone->buffer->datalen); |
163 | } |
164 | |
165 | static inline int buffer_start(struct pstore_zone *zone) |
166 | { |
167 | return atomic_read(v: &zone->buffer->start); |
168 | } |
169 | |
170 | static inline bool is_on_panic(void) |
171 | { |
172 | return atomic_read(v: &pstore_zone_cxt.on_panic); |
173 | } |
174 | |
175 | static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf, |
176 | size_t len, unsigned long off) |
177 | { |
178 | if (!buf || !zone || !zone->buffer) |
179 | return -EINVAL; |
180 | if (off > zone->buffer_size) |
181 | return -EINVAL; |
182 | len = min_t(size_t, len, zone->buffer_size - off); |
183 | memcpy(buf, zone->buffer->data + off, len); |
184 | return len; |
185 | } |
186 | |
187 | static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, |
188 | size_t len, unsigned long off) |
189 | { |
190 | if (!buf || !zone || !zone->oldbuf) |
191 | return -EINVAL; |
192 | if (off > zone->buffer_size) |
193 | return -EINVAL; |
194 | len = min_t(size_t, len, zone->buffer_size - off); |
195 | memcpy(buf, zone->oldbuf->data + off, len); |
196 | return 0; |
197 | } |
198 | |
199 | static int psz_zone_write(struct pstore_zone *zone, |
200 | enum psz_flush_mode flush_mode, const char *buf, |
201 | size_t len, unsigned long off) |
202 | { |
203 | struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info; |
204 | ssize_t wcnt = 0; |
205 | ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos); |
206 | size_t wlen; |
207 | |
208 | if (off > zone->buffer_size) |
209 | return -EINVAL; |
210 | |
211 | wlen = min_t(size_t, len, zone->buffer_size - off); |
212 | if (buf && wlen) { |
213 | memcpy(zone->buffer->data + off, buf, wlen); |
214 | atomic_set(v: &zone->buffer->datalen, i: wlen + off); |
215 | } |
216 | |
217 | /* avoid to damage old records */ |
218 | if (!is_on_panic() && !atomic_read(v: &pstore_zone_cxt.recovered)) |
219 | goto dirty; |
220 | |
221 | writeop = is_on_panic() ? info->panic_write : info->write; |
222 | if (!writeop) |
223 | goto dirty; |
224 | |
225 | switch (flush_mode) { |
226 | case FLUSH_NONE: |
227 | if (unlikely(buf && wlen)) |
228 | goto dirty; |
229 | return 0; |
230 | case FLUSH_PART: |
231 | wcnt = writeop((const char *)zone->buffer->data + off, wlen, |
232 | zone->off + sizeof(*zone->buffer) + off); |
233 | if (wcnt != wlen) |
234 | goto dirty; |
235 | fallthrough; |
236 | case FLUSH_META: |
237 | wlen = sizeof(struct psz_buffer); |
238 | wcnt = writeop((const char *)zone->buffer, wlen, zone->off); |
239 | if (wcnt != wlen) |
240 | goto dirty; |
241 | break; |
242 | case FLUSH_ALL: |
243 | wlen = zone->buffer_size + sizeof(*zone->buffer); |
244 | wcnt = writeop((const char *)zone->buffer, wlen, zone->off); |
245 | if (wcnt != wlen) |
246 | goto dirty; |
247 | break; |
248 | } |
249 | |
250 | return 0; |
251 | dirty: |
252 | /* no need to mark dirty if going to try next zone */ |
253 | if (wcnt == -ENOMSG) |
254 | return -ENOMSG; |
255 | atomic_set(v: &zone->dirty, i: true); |
256 | /* flush dirty zones nicely */ |
257 | if (wcnt == -EBUSY && !is_on_panic()) |
258 | schedule_delayed_work(dwork: &psz_cleaner, delay: msecs_to_jiffies(m: 500)); |
259 | return -EBUSY; |
260 | } |
261 | |
262 | static int psz_flush_dirty_zone(struct pstore_zone *zone) |
263 | { |
264 | int ret; |
265 | |
266 | if (unlikely(!zone)) |
267 | return -EINVAL; |
268 | |
269 | if (unlikely(!atomic_read(&pstore_zone_cxt.recovered))) |
270 | return -EBUSY; |
271 | |
272 | if (!atomic_xchg(v: &zone->dirty, new: false)) |
273 | return 0; |
274 | |
275 | ret = psz_zone_write(zone, flush_mode: FLUSH_ALL, NULL, len: 0, off: 0); |
276 | if (ret) |
277 | atomic_set(v: &zone->dirty, i: true); |
278 | return ret; |
279 | } |
280 | |
281 | static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt) |
282 | { |
283 | int i, ret; |
284 | struct pstore_zone *zone; |
285 | |
286 | if (!zones) |
287 | return -EINVAL; |
288 | |
289 | for (i = 0; i < cnt; i++) { |
290 | zone = zones[i]; |
291 | if (!zone) |
292 | return -EINVAL; |
293 | ret = psz_flush_dirty_zone(zone); |
294 | if (ret) |
295 | return ret; |
296 | } |
297 | return 0; |
298 | } |
299 | |
300 | static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new) |
301 | { |
302 | const char *data = (const char *)old->buffer->data; |
303 | int ret; |
304 | |
305 | ret = psz_zone_write(zone: new, flush_mode: FLUSH_ALL, buf: data, len: buffer_datalen(zone: old), off: 0); |
306 | if (ret) { |
307 | atomic_set(v: &new->buffer->datalen, i: 0); |
308 | atomic_set(v: &new->dirty, i: false); |
309 | return ret; |
310 | } |
311 | atomic_set(v: &old->buffer->datalen, i: 0); |
312 | return 0; |
313 | } |
314 | |
315 | static void psz_flush_all_dirty_zones(struct work_struct *work) |
316 | { |
317 | struct psz_context *cxt = &pstore_zone_cxt; |
318 | int ret = 0; |
319 | |
320 | if (cxt->ppsz) |
321 | ret |= psz_flush_dirty_zone(zone: cxt->ppsz); |
322 | if (cxt->cpsz) |
323 | ret |= psz_flush_dirty_zone(zone: cxt->cpsz); |
324 | if (cxt->kpszs) |
325 | ret |= psz_flush_dirty_zones(zones: cxt->kpszs, cnt: cxt->kmsg_max_cnt); |
326 | if (cxt->fpszs) |
327 | ret |= psz_flush_dirty_zones(zones: cxt->fpszs, cnt: cxt->ftrace_max_cnt); |
328 | if (ret && cxt->pstore_zone_info) |
329 | schedule_delayed_work(dwork: &psz_cleaner, delay: msecs_to_jiffies(m: 1000)); |
330 | } |
331 | |
332 | static int psz_kmsg_recover_data(struct psz_context *cxt) |
333 | { |
334 | struct pstore_zone_info *info = cxt->pstore_zone_info; |
335 | struct pstore_zone *zone = NULL; |
336 | struct psz_buffer *buf; |
337 | unsigned long i; |
338 | ssize_t rcnt; |
339 | |
340 | if (!info->read) |
341 | return -EINVAL; |
342 | |
343 | for (i = 0; i < cxt->kmsg_max_cnt; i++) { |
344 | zone = cxt->kpszs[i]; |
345 | if (unlikely(!zone)) |
346 | return -EINVAL; |
347 | if (atomic_read(v: &zone->dirty)) { |
348 | unsigned int wcnt = cxt->kmsg_write_cnt; |
349 | struct pstore_zone *new = cxt->kpszs[wcnt]; |
350 | int ret; |
351 | |
352 | ret = psz_move_zone(old: zone, new); |
353 | if (ret) { |
354 | pr_err("move zone from %lu to %d failed\n" , |
355 | i, wcnt); |
356 | return ret; |
357 | } |
358 | cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt; |
359 | } |
360 | if (!zone->should_recover) |
361 | continue; |
362 | buf = zone->buffer; |
363 | rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf), |
364 | zone->off); |
365 | if (rcnt != zone->buffer_size + sizeof(*buf)) |
366 | return rcnt < 0 ? rcnt : -EIO; |
367 | } |
368 | return 0; |
369 | } |
370 | |
371 | static int psz_kmsg_recover_meta(struct psz_context *cxt) |
372 | { |
373 | struct pstore_zone_info *info = cxt->pstore_zone_info; |
374 | struct pstore_zone *zone; |
375 | ssize_t rcnt, len; |
376 | struct psz_buffer *buf; |
377 | struct psz_kmsg_header *hdr; |
378 | struct timespec64 time = { }; |
379 | unsigned long i; |
380 | /* |
381 | * Recover may on panic, we can't allocate any memory by kmalloc. |
382 | * So, we use local array instead. |
383 | */ |
384 | char [sizeof(*buf) + sizeof(*hdr)] = {0}; |
385 | |
386 | if (!info->read) |
387 | return -EINVAL; |
388 | |
389 | len = sizeof(*buf) + sizeof(*hdr); |
390 | buf = (struct psz_buffer *)buffer_header; |
391 | for (i = 0; i < cxt->kmsg_max_cnt; i++) { |
392 | zone = cxt->kpszs[i]; |
393 | if (unlikely(!zone)) |
394 | return -EINVAL; |
395 | |
396 | rcnt = info->read((char *)buf, len, zone->off); |
397 | if (rcnt == -ENOMSG) { |
398 | pr_debug("%s with id %lu may be broken, skip\n" , |
399 | zone->name, i); |
400 | continue; |
401 | } else if (rcnt != len) { |
402 | pr_err("read %s with id %lu failed\n" , zone->name, i); |
403 | return rcnt < 0 ? rcnt : -EIO; |
404 | } |
405 | |
406 | if (buf->sig != zone->buffer->sig) { |
407 | pr_debug("no valid data in kmsg dump zone %lu\n" , i); |
408 | continue; |
409 | } |
410 | |
411 | if (zone->buffer_size < atomic_read(v: &buf->datalen)) { |
412 | pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n" , |
413 | zone->name, i, zone->off, |
414 | zone->buffer_size); |
415 | continue; |
416 | } |
417 | |
418 | hdr = (struct psz_kmsg_header *)buf->data; |
419 | if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) { |
420 | pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n" , |
421 | zone->name, i, zone->off, |
422 | zone->buffer_size); |
423 | continue; |
424 | } |
425 | |
426 | /* |
427 | * we get the newest zone, and the next one must be the oldest |
428 | * or unused zone, because we do write one by one like a circle. |
429 | */ |
430 | if (hdr->time.tv_sec >= time.tv_sec) { |
431 | time.tv_sec = hdr->time.tv_sec; |
432 | cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt; |
433 | } |
434 | |
435 | if (hdr->reason == KMSG_DUMP_OOPS) |
436 | cxt->oops_counter = |
437 | max(cxt->oops_counter, hdr->counter); |
438 | else if (hdr->reason == KMSG_DUMP_PANIC) |
439 | cxt->panic_counter = |
440 | max(cxt->panic_counter, hdr->counter); |
441 | |
442 | if (!atomic_read(v: &buf->datalen)) { |
443 | pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n" , |
444 | zone->name, i, zone->off, |
445 | zone->buffer_size, |
446 | atomic_read(&buf->datalen)); |
447 | continue; |
448 | } |
449 | |
450 | if (!is_on_panic()) |
451 | zone->should_recover = true; |
452 | pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n" , |
453 | zone->name, i, zone->off, |
454 | zone->buffer_size, atomic_read(&buf->datalen)); |
455 | } |
456 | |
457 | return 0; |
458 | } |
459 | |
460 | static int psz_kmsg_recover(struct psz_context *cxt) |
461 | { |
462 | int ret; |
463 | |
464 | if (!cxt->kpszs) |
465 | return 0; |
466 | |
467 | ret = psz_kmsg_recover_meta(cxt); |
468 | if (ret) |
469 | goto recover_fail; |
470 | |
471 | ret = psz_kmsg_recover_data(cxt); |
472 | if (ret) |
473 | goto recover_fail; |
474 | |
475 | return 0; |
476 | recover_fail: |
477 | pr_debug("psz_recover_kmsg failed\n" ); |
478 | return ret; |
479 | } |
480 | |
481 | static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone) |
482 | { |
483 | struct pstore_zone_info *info = cxt->pstore_zone_info; |
484 | struct psz_buffer *oldbuf, tmpbuf; |
485 | int ret = 0; |
486 | char *buf; |
487 | ssize_t rcnt, len, start, off; |
488 | |
489 | if (!zone || zone->oldbuf) |
490 | return 0; |
491 | |
492 | if (is_on_panic()) { |
493 | /* save data as much as possible */ |
494 | psz_flush_dirty_zone(zone); |
495 | return 0; |
496 | } |
497 | |
498 | if (unlikely(!info->read)) |
499 | return -EINVAL; |
500 | |
501 | len = sizeof(struct psz_buffer); |
502 | rcnt = info->read((char *)&tmpbuf, len, zone->off); |
503 | if (rcnt != len) { |
504 | pr_debug("read zone %s failed\n" , zone->name); |
505 | return rcnt < 0 ? rcnt : -EIO; |
506 | } |
507 | |
508 | if (tmpbuf.sig != zone->buffer->sig) { |
509 | pr_debug("no valid data in zone %s\n" , zone->name); |
510 | return 0; |
511 | } |
512 | |
513 | if (zone->buffer_size < atomic_read(v: &tmpbuf.datalen) || |
514 | zone->buffer_size < atomic_read(v: &tmpbuf.start)) { |
515 | pr_info("found overtop zone: %s: off %lld, size %zu\n" , |
516 | zone->name, zone->off, zone->buffer_size); |
517 | /* just keep going */ |
518 | return 0; |
519 | } |
520 | |
521 | if (!atomic_read(v: &tmpbuf.datalen)) { |
522 | pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n" , |
523 | zone->name, zone->off, zone->buffer_size, |
524 | atomic_read(&tmpbuf.datalen)); |
525 | return 0; |
526 | } |
527 | |
528 | pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n" , |
529 | zone->name, zone->off, zone->buffer_size, |
530 | atomic_read(&tmpbuf.datalen)); |
531 | |
532 | len = atomic_read(v: &tmpbuf.datalen) + sizeof(*oldbuf); |
533 | oldbuf = kzalloc(size: len, GFP_KERNEL); |
534 | if (!oldbuf) |
535 | return -ENOMEM; |
536 | |
537 | memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf)); |
538 | buf = (char *)oldbuf + sizeof(*oldbuf); |
539 | len = atomic_read(v: &oldbuf->datalen); |
540 | start = atomic_read(v: &oldbuf->start); |
541 | off = zone->off + sizeof(*oldbuf); |
542 | |
543 | /* get part of data */ |
544 | rcnt = info->read(buf, len - start, off + start); |
545 | if (rcnt != len - start) { |
546 | pr_err("read zone %s failed\n" , zone->name); |
547 | ret = rcnt < 0 ? rcnt : -EIO; |
548 | goto free_oldbuf; |
549 | } |
550 | |
551 | /* get the rest of data */ |
552 | rcnt = info->read(buf + len - start, start, off); |
553 | if (rcnt != start) { |
554 | pr_err("read zone %s failed\n" , zone->name); |
555 | ret = rcnt < 0 ? rcnt : -EIO; |
556 | goto free_oldbuf; |
557 | } |
558 | |
559 | zone->oldbuf = oldbuf; |
560 | psz_flush_dirty_zone(zone); |
561 | return 0; |
562 | |
563 | free_oldbuf: |
564 | kfree(objp: oldbuf); |
565 | return ret; |
566 | } |
567 | |
568 | static int psz_recover_zones(struct psz_context *cxt, |
569 | struct pstore_zone **zones, unsigned int cnt) |
570 | { |
571 | int ret; |
572 | unsigned int i; |
573 | struct pstore_zone *zone; |
574 | |
575 | if (!zones) |
576 | return 0; |
577 | |
578 | for (i = 0; i < cnt; i++) { |
579 | zone = zones[i]; |
580 | if (unlikely(!zone)) |
581 | continue; |
582 | ret = psz_recover_zone(cxt, zone); |
583 | if (ret) |
584 | goto recover_fail; |
585 | } |
586 | |
587 | return 0; |
588 | recover_fail: |
589 | pr_debug("recover %s[%u] failed\n" , zone->name, i); |
590 | return ret; |
591 | } |
592 | |
593 | /** |
594 | * psz_recovery() - recover data from storage |
595 | * @cxt: the context of pstore/zone |
596 | * |
597 | * recovery means reading data back from storage after rebooting |
598 | * |
599 | * Return: 0 on success, others on failure. |
600 | */ |
601 | static inline int psz_recovery(struct psz_context *cxt) |
602 | { |
603 | int ret; |
604 | |
605 | if (atomic_read(v: &cxt->recovered)) |
606 | return 0; |
607 | |
608 | ret = psz_kmsg_recover(cxt); |
609 | if (ret) |
610 | goto out; |
611 | |
612 | ret = psz_recover_zone(cxt, zone: cxt->ppsz); |
613 | if (ret) |
614 | goto out; |
615 | |
616 | ret = psz_recover_zone(cxt, zone: cxt->cpsz); |
617 | if (ret) |
618 | goto out; |
619 | |
620 | ret = psz_recover_zones(cxt, zones: cxt->fpszs, cnt: cxt->ftrace_max_cnt); |
621 | |
622 | out: |
623 | if (unlikely(ret)) |
624 | pr_err("recover failed\n" ); |
625 | else { |
626 | pr_debug("recover end!\n" ); |
627 | atomic_set(v: &cxt->recovered, i: 1); |
628 | } |
629 | return ret; |
630 | } |
631 | |
632 | static int psz_pstore_open(struct pstore_info *psi) |
633 | { |
634 | struct psz_context *cxt = psi->data; |
635 | |
636 | cxt->kmsg_read_cnt = 0; |
637 | cxt->pmsg_read_cnt = 0; |
638 | cxt->console_read_cnt = 0; |
639 | cxt->ftrace_read_cnt = 0; |
640 | return 0; |
641 | } |
642 | |
643 | static inline bool psz_old_ok(struct pstore_zone *zone) |
644 | { |
645 | if (zone && zone->oldbuf && atomic_read(v: &zone->oldbuf->datalen)) |
646 | return true; |
647 | return false; |
648 | } |
649 | |
650 | static inline bool psz_ok(struct pstore_zone *zone) |
651 | { |
652 | if (zone && zone->buffer && buffer_datalen(zone)) |
653 | return true; |
654 | return false; |
655 | } |
656 | |
657 | static inline int psz_kmsg_erase(struct psz_context *cxt, |
658 | struct pstore_zone *zone, struct pstore_record *record) |
659 | { |
660 | struct psz_buffer *buffer = zone->buffer; |
661 | struct psz_kmsg_header *hdr = |
662 | (struct psz_kmsg_header *)buffer->data; |
663 | size_t size; |
664 | |
665 | if (unlikely(!psz_ok(zone))) |
666 | return 0; |
667 | |
668 | /* this zone is already updated, no need to erase */ |
669 | if (record->count != hdr->counter) |
670 | return 0; |
671 | |
672 | size = buffer_datalen(zone) + sizeof(*zone->buffer); |
673 | atomic_set(v: &zone->buffer->datalen, i: 0); |
674 | if (cxt->pstore_zone_info->erase) |
675 | return cxt->pstore_zone_info->erase(size, zone->off); |
676 | else |
677 | return psz_zone_write(zone, flush_mode: FLUSH_META, NULL, len: 0, off: 0); |
678 | } |
679 | |
680 | static inline int psz_record_erase(struct psz_context *cxt, |
681 | struct pstore_zone *zone) |
682 | { |
683 | if (unlikely(!psz_old_ok(zone))) |
684 | return 0; |
685 | |
686 | kfree(objp: zone->oldbuf); |
687 | zone->oldbuf = NULL; |
688 | /* |
689 | * if there are new data in zone buffer, that means the old data |
690 | * are already invalid. It is no need to flush 0 (erase) to |
691 | * block device. |
692 | */ |
693 | if (!buffer_datalen(zone)) |
694 | return psz_zone_write(zone, flush_mode: FLUSH_META, NULL, len: 0, off: 0); |
695 | psz_flush_dirty_zone(zone); |
696 | return 0; |
697 | } |
698 | |
699 | static int psz_pstore_erase(struct pstore_record *record) |
700 | { |
701 | struct psz_context *cxt = record->psi->data; |
702 | |
703 | switch (record->type) { |
704 | case PSTORE_TYPE_DMESG: |
705 | if (record->id >= cxt->kmsg_max_cnt) |
706 | return -EINVAL; |
707 | return psz_kmsg_erase(cxt, zone: cxt->kpszs[record->id], record); |
708 | case PSTORE_TYPE_PMSG: |
709 | return psz_record_erase(cxt, zone: cxt->ppsz); |
710 | case PSTORE_TYPE_CONSOLE: |
711 | return psz_record_erase(cxt, zone: cxt->cpsz); |
712 | case PSTORE_TYPE_FTRACE: |
713 | if (record->id >= cxt->ftrace_max_cnt) |
714 | return -EINVAL; |
715 | return psz_record_erase(cxt, zone: cxt->fpszs[record->id]); |
716 | default: return -EINVAL; |
717 | } |
718 | } |
719 | |
720 | static void psz_write_kmsg_hdr(struct pstore_zone *zone, |
721 | struct pstore_record *record) |
722 | { |
723 | struct psz_context *cxt = record->psi->data; |
724 | struct psz_buffer *buffer = zone->buffer; |
725 | struct psz_kmsg_header *hdr = |
726 | (struct psz_kmsg_header *)buffer->data; |
727 | |
728 | hdr->magic = PSTORE_KMSG_HEADER_MAGIC; |
729 | hdr->compressed = record->compressed; |
730 | hdr->time.tv_sec = record->time.tv_sec; |
731 | hdr->time.tv_nsec = record->time.tv_nsec; |
732 | hdr->reason = record->reason; |
733 | if (hdr->reason == KMSG_DUMP_OOPS) |
734 | hdr->counter = ++cxt->oops_counter; |
735 | else if (hdr->reason == KMSG_DUMP_PANIC) |
736 | hdr->counter = ++cxt->panic_counter; |
737 | else |
738 | hdr->counter = 0; |
739 | } |
740 | |
741 | /* |
742 | * In case zone is broken, which may occur to MTD device, we try each zones, |
743 | * start at cxt->kmsg_write_cnt. |
744 | */ |
745 | static inline int notrace psz_kmsg_write_record(struct psz_context *cxt, |
746 | struct pstore_record *record) |
747 | { |
748 | size_t size, hlen; |
749 | struct pstore_zone *zone; |
750 | unsigned int i; |
751 | |
752 | for (i = 0; i < cxt->kmsg_max_cnt; i++) { |
753 | unsigned int zonenum, len; |
754 | int ret; |
755 | |
756 | zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt; |
757 | zone = cxt->kpszs[zonenum]; |
758 | if (unlikely(!zone)) |
759 | return -ENOSPC; |
760 | |
761 | /* avoid destroying old data, allocate a new one */ |
762 | len = zone->buffer_size + sizeof(*zone->buffer); |
763 | zone->oldbuf = zone->buffer; |
764 | zone->buffer = kzalloc(size: len, GFP_ATOMIC); |
765 | if (!zone->buffer) { |
766 | zone->buffer = zone->oldbuf; |
767 | return -ENOMEM; |
768 | } |
769 | zone->buffer->sig = zone->oldbuf->sig; |
770 | |
771 | pr_debug("write %s to zone id %d\n" , zone->name, zonenum); |
772 | psz_write_kmsg_hdr(zone, record); |
773 | hlen = sizeof(struct psz_kmsg_header); |
774 | size = min_t(size_t, record->size, zone->buffer_size - hlen); |
775 | ret = psz_zone_write(zone, flush_mode: FLUSH_ALL, buf: record->buf, len: size, off: hlen); |
776 | if (likely(!ret || ret != -ENOMSG)) { |
777 | cxt->kmsg_write_cnt = zonenum + 1; |
778 | cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt; |
779 | /* no need to try next zone, free last zone buffer */ |
780 | kfree(objp: zone->oldbuf); |
781 | zone->oldbuf = NULL; |
782 | return ret; |
783 | } |
784 | |
785 | pr_debug("zone %u may be broken, try next dmesg zone\n" , |
786 | zonenum); |
787 | kfree(objp: zone->buffer); |
788 | zone->buffer = zone->oldbuf; |
789 | zone->oldbuf = NULL; |
790 | } |
791 | |
792 | return -EBUSY; |
793 | } |
794 | |
795 | static int notrace psz_kmsg_write(struct psz_context *cxt, |
796 | struct pstore_record *record) |
797 | { |
798 | int ret; |
799 | |
800 | /* |
801 | * Explicitly only take the first part of any new crash. |
802 | * If our buffer is larger than kmsg_bytes, this can never happen, |
803 | * and if our buffer is smaller than kmsg_bytes, we don't want the |
804 | * report split across multiple records. |
805 | */ |
806 | if (record->part != 1) |
807 | return -ENOSPC; |
808 | |
809 | if (!cxt->kpszs) |
810 | return -ENOSPC; |
811 | |
812 | ret = psz_kmsg_write_record(cxt, record); |
813 | if (!ret && is_on_panic()) { |
814 | /* ensure all data are flushed to storage when panic */ |
815 | pr_debug("try to flush other dirty zones\n" ); |
816 | psz_flush_all_dirty_zones(NULL); |
817 | } |
818 | |
819 | /* always return 0 as we had handled it on buffer */ |
820 | return 0; |
821 | } |
822 | |
823 | static int notrace psz_record_write(struct pstore_zone *zone, |
824 | struct pstore_record *record) |
825 | { |
826 | size_t start, rem; |
827 | bool is_full_data = false; |
828 | char *buf; |
829 | int cnt; |
830 | |
831 | if (!zone || !record) |
832 | return -ENOSPC; |
833 | |
834 | if (atomic_read(v: &zone->buffer->datalen) >= zone->buffer_size) |
835 | is_full_data = true; |
836 | |
837 | cnt = record->size; |
838 | buf = record->buf; |
839 | if (unlikely(cnt > zone->buffer_size)) { |
840 | buf += cnt - zone->buffer_size; |
841 | cnt = zone->buffer_size; |
842 | } |
843 | |
844 | start = buffer_start(zone); |
845 | rem = zone->buffer_size - start; |
846 | if (unlikely(rem < cnt)) { |
847 | psz_zone_write(zone, flush_mode: FLUSH_PART, buf, len: rem, off: start); |
848 | buf += rem; |
849 | cnt -= rem; |
850 | start = 0; |
851 | is_full_data = true; |
852 | } |
853 | |
854 | atomic_set(v: &zone->buffer->start, i: cnt + start); |
855 | psz_zone_write(zone, flush_mode: FLUSH_PART, buf, len: cnt, off: start); |
856 | |
857 | /** |
858 | * psz_zone_write will set datalen as start + cnt. |
859 | * It work if actual data length lesser than buffer size. |
860 | * If data length greater than buffer size, pmsg will rewrite to |
861 | * beginning of zone, which make buffer->datalen wrongly. |
862 | * So we should reset datalen as buffer size once actual data length |
863 | * greater than buffer size. |
864 | */ |
865 | if (is_full_data) { |
866 | atomic_set(v: &zone->buffer->datalen, i: zone->buffer_size); |
867 | psz_zone_write(zone, flush_mode: FLUSH_META, NULL, len: 0, off: 0); |
868 | } |
869 | return 0; |
870 | } |
871 | |
872 | static int notrace psz_pstore_write(struct pstore_record *record) |
873 | { |
874 | struct psz_context *cxt = record->psi->data; |
875 | |
876 | if (record->type == PSTORE_TYPE_DMESG && |
877 | record->reason == KMSG_DUMP_PANIC) |
878 | atomic_set(v: &cxt->on_panic, i: 1); |
879 | |
880 | /* |
881 | * if on panic, do not write except panic records |
882 | * Fix case that panic_write prints log which wakes up console backend. |
883 | */ |
884 | if (is_on_panic() && record->type != PSTORE_TYPE_DMESG) |
885 | return -EBUSY; |
886 | |
887 | switch (record->type) { |
888 | case PSTORE_TYPE_DMESG: |
889 | return psz_kmsg_write(cxt, record); |
890 | case PSTORE_TYPE_CONSOLE: |
891 | return psz_record_write(zone: cxt->cpsz, record); |
892 | case PSTORE_TYPE_PMSG: |
893 | return psz_record_write(zone: cxt->ppsz, record); |
894 | case PSTORE_TYPE_FTRACE: { |
895 | int zonenum = smp_processor_id(); |
896 | |
897 | if (!cxt->fpszs) |
898 | return -ENOSPC; |
899 | return psz_record_write(zone: cxt->fpszs[zonenum], record); |
900 | } |
901 | default: |
902 | return -EINVAL; |
903 | } |
904 | } |
905 | |
906 | static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt) |
907 | { |
908 | struct pstore_zone *zone = NULL; |
909 | |
910 | while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) { |
911 | zone = cxt->kpszs[cxt->kmsg_read_cnt++]; |
912 | if (psz_ok(zone)) |
913 | return zone; |
914 | } |
915 | |
916 | if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt) |
917 | /* |
918 | * No need psz_old_ok(). Let psz_ftrace_read() do so for |
919 | * combination. psz_ftrace_read() should traverse over |
920 | * all zones in case of some zone without data. |
921 | */ |
922 | return cxt->fpszs[cxt->ftrace_read_cnt++]; |
923 | |
924 | if (cxt->pmsg_read_cnt == 0) { |
925 | cxt->pmsg_read_cnt++; |
926 | zone = cxt->ppsz; |
927 | if (psz_old_ok(zone)) |
928 | return zone; |
929 | } |
930 | |
931 | if (cxt->console_read_cnt == 0) { |
932 | cxt->console_read_cnt++; |
933 | zone = cxt->cpsz; |
934 | if (psz_old_ok(zone)) |
935 | return zone; |
936 | } |
937 | |
938 | return NULL; |
939 | } |
940 | |
941 | static int psz_kmsg_read_hdr(struct pstore_zone *zone, |
942 | struct pstore_record *record) |
943 | { |
944 | struct psz_buffer *buffer = zone->buffer; |
945 | struct psz_kmsg_header *hdr = |
946 | (struct psz_kmsg_header *)buffer->data; |
947 | |
948 | if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) |
949 | return -EINVAL; |
950 | record->compressed = hdr->compressed; |
951 | record->time.tv_sec = hdr->time.tv_sec; |
952 | record->time.tv_nsec = hdr->time.tv_nsec; |
953 | record->reason = hdr->reason; |
954 | record->count = hdr->counter; |
955 | return 0; |
956 | } |
957 | |
958 | static ssize_t psz_kmsg_read(struct pstore_zone *zone, |
959 | struct pstore_record *record) |
960 | { |
961 | ssize_t size, hlen = 0; |
962 | |
963 | size = buffer_datalen(zone); |
964 | /* Clear and skip this kmsg dump record if it has no valid header */ |
965 | if (psz_kmsg_read_hdr(zone, record)) { |
966 | atomic_set(v: &zone->buffer->datalen, i: 0); |
967 | atomic_set(v: &zone->dirty, i: 0); |
968 | return -ENOMSG; |
969 | } |
970 | size -= sizeof(struct psz_kmsg_header); |
971 | |
972 | if (!record->compressed) { |
973 | char *buf = kasprintf(GFP_KERNEL, fmt: "%s: Total %d times\n" , |
974 | kmsg_dump_reason_str(reason: record->reason), |
975 | record->count); |
976 | if (!buf) |
977 | return -ENOMEM; |
978 | hlen = strlen(buf); |
979 | record->buf = krealloc(objp: buf, new_size: hlen + size, GFP_KERNEL); |
980 | if (!record->buf) { |
981 | kfree(objp: buf); |
982 | return -ENOMEM; |
983 | } |
984 | } else { |
985 | record->buf = kmalloc(size, GFP_KERNEL); |
986 | if (!record->buf) |
987 | return -ENOMEM; |
988 | } |
989 | |
990 | size = psz_zone_read_buffer(zone, buf: record->buf + hlen, len: size, |
991 | off: sizeof(struct psz_kmsg_header)); |
992 | if (unlikely(size < 0)) { |
993 | kfree(objp: record->buf); |
994 | return -ENOMSG; |
995 | } |
996 | |
997 | return size + hlen; |
998 | } |
999 | |
1000 | /* try to combine all ftrace zones */ |
1001 | static ssize_t psz_ftrace_read(struct pstore_zone *zone, |
1002 | struct pstore_record *record) |
1003 | { |
1004 | struct psz_context *cxt; |
1005 | struct psz_buffer *buf; |
1006 | int ret; |
1007 | |
1008 | if (!zone || !record) |
1009 | return -ENOSPC; |
1010 | |
1011 | if (!psz_old_ok(zone)) |
1012 | goto out; |
1013 | |
1014 | buf = (struct psz_buffer *)zone->oldbuf; |
1015 | if (!buf) |
1016 | return -ENOMSG; |
1017 | |
1018 | ret = pstore_ftrace_combine_log(dest_log: &record->buf, dest_log_size: &record->size, |
1019 | src_log: (char *)buf->data, src_log_size: atomic_read(v: &buf->datalen)); |
1020 | if (unlikely(ret)) |
1021 | return ret; |
1022 | |
1023 | out: |
1024 | cxt = record->psi->data; |
1025 | if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt) |
1026 | /* then, read next ftrace zone */ |
1027 | return -ENOMSG; |
1028 | record->id = 0; |
1029 | return record->size ? record->size : -ENOMSG; |
1030 | } |
1031 | |
1032 | static ssize_t psz_record_read(struct pstore_zone *zone, |
1033 | struct pstore_record *record) |
1034 | { |
1035 | size_t len; |
1036 | struct psz_buffer *buf; |
1037 | |
1038 | if (!zone || !record) |
1039 | return -ENOSPC; |
1040 | |
1041 | buf = (struct psz_buffer *)zone->oldbuf; |
1042 | if (!buf) |
1043 | return -ENOMSG; |
1044 | |
1045 | len = atomic_read(v: &buf->datalen); |
1046 | record->buf = kmalloc(size: len, GFP_KERNEL); |
1047 | if (!record->buf) |
1048 | return -ENOMEM; |
1049 | |
1050 | if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) { |
1051 | kfree(objp: record->buf); |
1052 | return -ENOMSG; |
1053 | } |
1054 | |
1055 | return len; |
1056 | } |
1057 | |
1058 | static ssize_t psz_pstore_read(struct pstore_record *record) |
1059 | { |
1060 | struct psz_context *cxt = record->psi->data; |
1061 | ssize_t (*readop)(struct pstore_zone *zone, |
1062 | struct pstore_record *record); |
1063 | struct pstore_zone *zone; |
1064 | ssize_t ret; |
1065 | |
1066 | /* before read, we must recover from storage */ |
1067 | ret = psz_recovery(cxt); |
1068 | if (ret) |
1069 | return ret; |
1070 | |
1071 | next_zone: |
1072 | zone = psz_read_next_zone(cxt); |
1073 | if (!zone) |
1074 | return 0; |
1075 | |
1076 | record->type = zone->type; |
1077 | switch (record->type) { |
1078 | case PSTORE_TYPE_DMESG: |
1079 | readop = psz_kmsg_read; |
1080 | record->id = cxt->kmsg_read_cnt - 1; |
1081 | break; |
1082 | case PSTORE_TYPE_FTRACE: |
1083 | readop = psz_ftrace_read; |
1084 | break; |
1085 | case PSTORE_TYPE_CONSOLE: |
1086 | case PSTORE_TYPE_PMSG: |
1087 | readop = psz_record_read; |
1088 | break; |
1089 | default: |
1090 | goto next_zone; |
1091 | } |
1092 | |
1093 | ret = readop(zone, record); |
1094 | if (ret == -ENOMSG) |
1095 | goto next_zone; |
1096 | return ret; |
1097 | } |
1098 | |
1099 | static struct psz_context pstore_zone_cxt = { |
1100 | .pstore_zone_info_lock = |
1101 | __MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock), |
1102 | .recovered = ATOMIC_INIT(0), |
1103 | .on_panic = ATOMIC_INIT(0), |
1104 | .pstore = { |
1105 | .owner = THIS_MODULE, |
1106 | .open = psz_pstore_open, |
1107 | .read = psz_pstore_read, |
1108 | .write = psz_pstore_write, |
1109 | .erase = psz_pstore_erase, |
1110 | }, |
1111 | }; |
1112 | |
1113 | static void psz_free_zone(struct pstore_zone **pszone) |
1114 | { |
1115 | struct pstore_zone *zone = *pszone; |
1116 | |
1117 | if (!zone) |
1118 | return; |
1119 | |
1120 | kfree(objp: zone->buffer); |
1121 | kfree(objp: zone); |
1122 | *pszone = NULL; |
1123 | } |
1124 | |
1125 | static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt) |
1126 | { |
1127 | struct pstore_zone **zones = *pszones; |
1128 | |
1129 | if (!zones) |
1130 | return; |
1131 | |
1132 | while (*cnt > 0) { |
1133 | (*cnt)--; |
1134 | psz_free_zone(pszone: &(zones[*cnt])); |
1135 | } |
1136 | kfree(objp: zones); |
1137 | *pszones = NULL; |
1138 | } |
1139 | |
1140 | static void psz_free_all_zones(struct psz_context *cxt) |
1141 | { |
1142 | if (cxt->kpszs) |
1143 | psz_free_zones(pszones: &cxt->kpszs, cnt: &cxt->kmsg_max_cnt); |
1144 | if (cxt->ppsz) |
1145 | psz_free_zone(pszone: &cxt->ppsz); |
1146 | if (cxt->cpsz) |
1147 | psz_free_zone(pszone: &cxt->cpsz); |
1148 | if (cxt->fpszs) |
1149 | psz_free_zones(pszones: &cxt->fpszs, cnt: &cxt->ftrace_max_cnt); |
1150 | } |
1151 | |
1152 | static struct pstore_zone *psz_init_zone(enum pstore_type_id type, |
1153 | loff_t *off, size_t size) |
1154 | { |
1155 | struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info; |
1156 | struct pstore_zone *zone; |
1157 | const char *name = pstore_type_to_name(type); |
1158 | |
1159 | if (!size) |
1160 | return NULL; |
1161 | |
1162 | if (*off + size > info->total_size) { |
1163 | pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n" , |
1164 | name, size, *off, info->total_size); |
1165 | return ERR_PTR(error: -ENOMEM); |
1166 | } |
1167 | |
1168 | zone = kzalloc(size: sizeof(struct pstore_zone), GFP_KERNEL); |
1169 | if (!zone) |
1170 | return ERR_PTR(error: -ENOMEM); |
1171 | |
1172 | zone->buffer = kmalloc(size, GFP_KERNEL); |
1173 | if (!zone->buffer) { |
1174 | kfree(objp: zone); |
1175 | return ERR_PTR(error: -ENOMEM); |
1176 | } |
1177 | memset(zone->buffer, 0xFF, size); |
1178 | zone->off = *off; |
1179 | zone->name = name; |
1180 | zone->type = type; |
1181 | zone->buffer_size = size - sizeof(struct psz_buffer); |
1182 | zone->buffer->sig = type ^ PSZ_SIG; |
1183 | zone->oldbuf = NULL; |
1184 | atomic_set(v: &zone->dirty, i: 0); |
1185 | atomic_set(v: &zone->buffer->datalen, i: 0); |
1186 | atomic_set(v: &zone->buffer->start, i: 0); |
1187 | |
1188 | *off += size; |
1189 | |
1190 | pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n" , zone->name, |
1191 | zone->off, sizeof(*zone->buffer), zone->buffer_size); |
1192 | return zone; |
1193 | } |
1194 | |
1195 | static struct pstore_zone **psz_init_zones(enum pstore_type_id type, |
1196 | loff_t *off, size_t total_size, ssize_t record_size, |
1197 | unsigned int *cnt) |
1198 | { |
1199 | struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info; |
1200 | struct pstore_zone **zones, *zone; |
1201 | const char *name = pstore_type_to_name(type); |
1202 | int c, i; |
1203 | |
1204 | *cnt = 0; |
1205 | if (!total_size || !record_size) |
1206 | return NULL; |
1207 | |
1208 | if (*off + total_size > info->total_size) { |
1209 | pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n" , |
1210 | name, total_size, *off, info->total_size); |
1211 | return ERR_PTR(error: -ENOMEM); |
1212 | } |
1213 | |
1214 | c = total_size / record_size; |
1215 | zones = kcalloc(n: c, size: sizeof(*zones), GFP_KERNEL); |
1216 | if (!zones) { |
1217 | pr_err("allocate for zones %s failed\n" , name); |
1218 | return ERR_PTR(error: -ENOMEM); |
1219 | } |
1220 | |
1221 | for (i = 0; i < c; i++) { |
1222 | zone = psz_init_zone(type, off, size: record_size); |
1223 | if (!zone || IS_ERR(ptr: zone)) { |
1224 | pr_err("initialize zones %s failed\n" , name); |
1225 | psz_free_zones(pszones: &zones, cnt: &i); |
1226 | return (void *)zone; |
1227 | } |
1228 | zones[i] = zone; |
1229 | } |
1230 | |
1231 | *cnt = c; |
1232 | return zones; |
1233 | } |
1234 | |
1235 | static int psz_alloc_zones(struct psz_context *cxt) |
1236 | { |
1237 | struct pstore_zone_info *info = cxt->pstore_zone_info; |
1238 | loff_t off = 0; |
1239 | int err; |
1240 | size_t off_size = 0; |
1241 | |
1242 | off_size += info->pmsg_size; |
1243 | cxt->ppsz = psz_init_zone(type: PSTORE_TYPE_PMSG, off: &off, size: info->pmsg_size); |
1244 | if (IS_ERR(ptr: cxt->ppsz)) { |
1245 | err = PTR_ERR(ptr: cxt->ppsz); |
1246 | cxt->ppsz = NULL; |
1247 | goto free_out; |
1248 | } |
1249 | |
1250 | off_size += info->console_size; |
1251 | cxt->cpsz = psz_init_zone(type: PSTORE_TYPE_CONSOLE, off: &off, |
1252 | size: info->console_size); |
1253 | if (IS_ERR(ptr: cxt->cpsz)) { |
1254 | err = PTR_ERR(ptr: cxt->cpsz); |
1255 | cxt->cpsz = NULL; |
1256 | goto free_out; |
1257 | } |
1258 | |
1259 | off_size += info->ftrace_size; |
1260 | cxt->fpszs = psz_init_zones(type: PSTORE_TYPE_FTRACE, off: &off, |
1261 | total_size: info->ftrace_size, |
1262 | record_size: info->ftrace_size / nr_cpu_ids, |
1263 | cnt: &cxt->ftrace_max_cnt); |
1264 | if (IS_ERR(ptr: cxt->fpszs)) { |
1265 | err = PTR_ERR(ptr: cxt->fpszs); |
1266 | cxt->fpszs = NULL; |
1267 | goto free_out; |
1268 | } |
1269 | |
1270 | cxt->kpszs = psz_init_zones(type: PSTORE_TYPE_DMESG, off: &off, |
1271 | total_size: info->total_size - off_size, |
1272 | record_size: info->kmsg_size, cnt: &cxt->kmsg_max_cnt); |
1273 | if (IS_ERR(ptr: cxt->kpszs)) { |
1274 | err = PTR_ERR(ptr: cxt->kpszs); |
1275 | cxt->kpszs = NULL; |
1276 | goto free_out; |
1277 | } |
1278 | |
1279 | return 0; |
1280 | free_out: |
1281 | psz_free_all_zones(cxt); |
1282 | return err; |
1283 | } |
1284 | |
1285 | /** |
1286 | * register_pstore_zone() - register to pstore/zone |
1287 | * |
1288 | * @info: back-end driver information. See &struct pstore_zone_info. |
1289 | * |
1290 | * Only one back-end at one time. |
1291 | * |
1292 | * Return: 0 on success, others on failure. |
1293 | */ |
1294 | int register_pstore_zone(struct pstore_zone_info *info) |
1295 | { |
1296 | int err = -EINVAL; |
1297 | struct psz_context *cxt = &pstore_zone_cxt; |
1298 | |
1299 | if (info->total_size < 4096) { |
1300 | pr_warn("total_size must be >= 4096\n" ); |
1301 | return -EINVAL; |
1302 | } |
1303 | if (info->total_size > SZ_128M) { |
1304 | pr_warn("capping size to 128MiB\n" ); |
1305 | info->total_size = SZ_128M; |
1306 | } |
1307 | |
1308 | if (!info->kmsg_size && !info->pmsg_size && !info->console_size && |
1309 | !info->ftrace_size) { |
1310 | pr_warn("at least one record size must be non-zero\n" ); |
1311 | return -EINVAL; |
1312 | } |
1313 | |
1314 | if (!info->name || !info->name[0]) |
1315 | return -EINVAL; |
1316 | |
1317 | #define check_size(name, size) { \ |
1318 | if (info->name > 0 && info->name < (size)) { \ |
1319 | pr_err(#name " must be over %d\n", (size)); \ |
1320 | return -EINVAL; \ |
1321 | } \ |
1322 | if (info->name & (size - 1)) { \ |
1323 | pr_err(#name " must be a multiple of %d\n", \ |
1324 | (size)); \ |
1325 | return -EINVAL; \ |
1326 | } \ |
1327 | } |
1328 | |
1329 | check_size(total_size, 4096); |
1330 | check_size(kmsg_size, SECTOR_SIZE); |
1331 | check_size(pmsg_size, SECTOR_SIZE); |
1332 | check_size(console_size, SECTOR_SIZE); |
1333 | check_size(ftrace_size, SECTOR_SIZE); |
1334 | |
1335 | #undef check_size |
1336 | |
1337 | /* |
1338 | * the @read and @write must be applied. |
1339 | * if no @read, pstore may mount failed. |
1340 | * if no @write, pstore do not support to remove record file. |
1341 | */ |
1342 | if (!info->read || !info->write) { |
1343 | pr_err("no valid general read/write interface\n" ); |
1344 | return -EINVAL; |
1345 | } |
1346 | |
1347 | mutex_lock(&cxt->pstore_zone_info_lock); |
1348 | if (cxt->pstore_zone_info) { |
1349 | pr_warn("'%s' already loaded: ignoring '%s'\n" , |
1350 | cxt->pstore_zone_info->name, info->name); |
1351 | mutex_unlock(lock: &cxt->pstore_zone_info_lock); |
1352 | return -EBUSY; |
1353 | } |
1354 | cxt->pstore_zone_info = info; |
1355 | |
1356 | pr_debug("register %s with properties:\n" , info->name); |
1357 | pr_debug("\ttotal size : %ld Bytes\n" , info->total_size); |
1358 | pr_debug("\tkmsg size : %ld Bytes\n" , info->kmsg_size); |
1359 | pr_debug("\tpmsg size : %ld Bytes\n" , info->pmsg_size); |
1360 | pr_debug("\tconsole size : %ld Bytes\n" , info->console_size); |
1361 | pr_debug("\tftrace size : %ld Bytes\n" , info->ftrace_size); |
1362 | |
1363 | err = psz_alloc_zones(cxt); |
1364 | if (err) { |
1365 | pr_err("alloc zones failed\n" ); |
1366 | goto fail_out; |
1367 | } |
1368 | |
1369 | if (info->kmsg_size) { |
1370 | cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size - |
1371 | sizeof(struct psz_kmsg_header); |
1372 | cxt->pstore.buf = kzalloc(size: cxt->pstore.bufsize, GFP_KERNEL); |
1373 | if (!cxt->pstore.buf) { |
1374 | err = -ENOMEM; |
1375 | goto fail_free; |
1376 | } |
1377 | } |
1378 | cxt->pstore.data = cxt; |
1379 | |
1380 | pr_info("registered %s as backend for" , info->name); |
1381 | cxt->pstore.max_reason = info->max_reason; |
1382 | cxt->pstore.name = info->name; |
1383 | if (info->kmsg_size) { |
1384 | cxt->pstore.flags |= PSTORE_FLAGS_DMESG; |
1385 | pr_cont(" kmsg(%s" , |
1386 | kmsg_dump_reason_str(cxt->pstore.max_reason)); |
1387 | if (cxt->pstore_zone_info->panic_write) |
1388 | pr_cont(",panic_write" ); |
1389 | pr_cont(")" ); |
1390 | } |
1391 | if (info->pmsg_size) { |
1392 | cxt->pstore.flags |= PSTORE_FLAGS_PMSG; |
1393 | pr_cont(" pmsg" ); |
1394 | } |
1395 | if (info->console_size) { |
1396 | cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; |
1397 | pr_cont(" console" ); |
1398 | } |
1399 | if (info->ftrace_size) { |
1400 | cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; |
1401 | pr_cont(" ftrace" ); |
1402 | } |
1403 | pr_cont("\n" ); |
1404 | |
1405 | err = pstore_register(&cxt->pstore); |
1406 | if (err) { |
1407 | pr_err("registering with pstore failed\n" ); |
1408 | goto fail_free; |
1409 | } |
1410 | mutex_unlock(lock: &pstore_zone_cxt.pstore_zone_info_lock); |
1411 | |
1412 | return 0; |
1413 | |
1414 | fail_free: |
1415 | kfree(objp: cxt->pstore.buf); |
1416 | cxt->pstore.buf = NULL; |
1417 | cxt->pstore.bufsize = 0; |
1418 | psz_free_all_zones(cxt); |
1419 | fail_out: |
1420 | pstore_zone_cxt.pstore_zone_info = NULL; |
1421 | mutex_unlock(lock: &pstore_zone_cxt.pstore_zone_info_lock); |
1422 | return err; |
1423 | } |
1424 | EXPORT_SYMBOL_GPL(register_pstore_zone); |
1425 | |
1426 | /** |
1427 | * unregister_pstore_zone() - unregister to pstore/zone |
1428 | * |
1429 | * @info: back-end driver information. See struct pstore_zone_info. |
1430 | */ |
1431 | void unregister_pstore_zone(struct pstore_zone_info *info) |
1432 | { |
1433 | struct psz_context *cxt = &pstore_zone_cxt; |
1434 | |
1435 | mutex_lock(&cxt->pstore_zone_info_lock); |
1436 | if (!cxt->pstore_zone_info) { |
1437 | mutex_unlock(lock: &cxt->pstore_zone_info_lock); |
1438 | return; |
1439 | } |
1440 | |
1441 | /* Stop incoming writes from pstore. */ |
1442 | pstore_unregister(&cxt->pstore); |
1443 | |
1444 | /* Flush any pending writes. */ |
1445 | psz_flush_all_dirty_zones(NULL); |
1446 | flush_delayed_work(dwork: &psz_cleaner); |
1447 | |
1448 | /* Clean up allocations. */ |
1449 | kfree(objp: cxt->pstore.buf); |
1450 | cxt->pstore.buf = NULL; |
1451 | cxt->pstore.bufsize = 0; |
1452 | cxt->pstore_zone_info = NULL; |
1453 | |
1454 | psz_free_all_zones(cxt); |
1455 | |
1456 | /* Clear counters and zone state. */ |
1457 | cxt->oops_counter = 0; |
1458 | cxt->panic_counter = 0; |
1459 | atomic_set(v: &cxt->recovered, i: 0); |
1460 | atomic_set(v: &cxt->on_panic, i: 0); |
1461 | |
1462 | mutex_unlock(lock: &cxt->pstore_zone_info_lock); |
1463 | } |
1464 | EXPORT_SYMBOL_GPL(unregister_pstore_zone); |
1465 | |
1466 | MODULE_LICENSE("GPL" ); |
1467 | MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>" ); |
1468 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>" ); |
1469 | MODULE_DESCRIPTION("Storage Manager for pstore/blk" ); |
1470 | |