1 | // SPDX-License-Identifier: GPL-2.0+ |
---|---|
2 | /* |
3 | * linux/fs/jbd2/journal.c |
4 | * |
5 | * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 |
6 | * |
7 | * Copyright 1998 Red Hat corp --- All Rights Reserved |
8 | * |
9 | * Generic filesystem journal-writing code; part of the ext2fs |
10 | * journaling system. |
11 | * |
12 | * This file manages journals: areas of disk reserved for logging |
13 | * transactional updates. This includes the kernel journaling thread |
14 | * which is responsible for scheduling updates to the log. |
15 | * |
16 | * We do not actually manage the physical storage of the journal in this |
17 | * file: that is left to a per-journal policy function, which allows us |
18 | * to store the journal within a filesystem-specified area for ext2 |
19 | * journaling (ext2 can use a reserved inode for storing the log). |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/time.h> |
24 | #include <linux/fs.h> |
25 | #include <linux/jbd2.h> |
26 | #include <linux/errno.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/init.h> |
29 | #include <linux/mm.h> |
30 | #include <linux/freezer.h> |
31 | #include <linux/pagemap.h> |
32 | #include <linux/kthread.h> |
33 | #include <linux/poison.h> |
34 | #include <linux/proc_fs.h> |
35 | #include <linux/seq_file.h> |
36 | #include <linux/math64.h> |
37 | #include <linux/hash.h> |
38 | #include <linux/log2.h> |
39 | #include <linux/vmalloc.h> |
40 | #include <linux/backing-dev.h> |
41 | #include <linux/bitops.h> |
42 | #include <linux/ratelimit.h> |
43 | #include <linux/sched/mm.h> |
44 | |
45 | #define CREATE_TRACE_POINTS |
46 | #include <trace/events/jbd2.h> |
47 | |
48 | #include <linux/uaccess.h> |
49 | #include <asm/page.h> |
50 | |
51 | #ifdef CONFIG_JBD2_DEBUG |
52 | static ushort jbd2_journal_enable_debug __read_mostly; |
53 | |
54 | module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644); |
55 | MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2"); |
56 | #endif |
57 | |
58 | EXPORT_SYMBOL(jbd2_journal_extend); |
59 | EXPORT_SYMBOL(jbd2_journal_stop); |
60 | EXPORT_SYMBOL(jbd2_journal_lock_updates); |
61 | EXPORT_SYMBOL(jbd2_journal_unlock_updates); |
62 | EXPORT_SYMBOL(jbd2_journal_get_write_access); |
63 | EXPORT_SYMBOL(jbd2_journal_get_create_access); |
64 | EXPORT_SYMBOL(jbd2_journal_get_undo_access); |
65 | EXPORT_SYMBOL(jbd2_journal_set_triggers); |
66 | EXPORT_SYMBOL(jbd2_journal_dirty_metadata); |
67 | EXPORT_SYMBOL(jbd2_journal_forget); |
68 | EXPORT_SYMBOL(jbd2_journal_flush); |
69 | EXPORT_SYMBOL(jbd2_journal_revoke); |
70 | |
71 | EXPORT_SYMBOL(jbd2_journal_init_dev); |
72 | EXPORT_SYMBOL(jbd2_journal_init_inode); |
73 | EXPORT_SYMBOL(jbd2_journal_check_used_features); |
74 | EXPORT_SYMBOL(jbd2_journal_check_available_features); |
75 | EXPORT_SYMBOL(jbd2_journal_set_features); |
76 | EXPORT_SYMBOL(jbd2_journal_load); |
77 | EXPORT_SYMBOL(jbd2_journal_destroy); |
78 | EXPORT_SYMBOL(jbd2_journal_abort); |
79 | EXPORT_SYMBOL(jbd2_journal_errno); |
80 | EXPORT_SYMBOL(jbd2_journal_ack_err); |
81 | EXPORT_SYMBOL(jbd2_journal_clear_err); |
82 | EXPORT_SYMBOL(jbd2_log_wait_commit); |
83 | EXPORT_SYMBOL(jbd2_journal_start_commit); |
84 | EXPORT_SYMBOL(jbd2_journal_force_commit_nested); |
85 | EXPORT_SYMBOL(jbd2_journal_wipe); |
86 | EXPORT_SYMBOL(jbd2_journal_blocks_per_folio); |
87 | EXPORT_SYMBOL(jbd2_journal_invalidate_folio); |
88 | EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers); |
89 | EXPORT_SYMBOL(jbd2_journal_force_commit); |
90 | EXPORT_SYMBOL(jbd2_journal_inode_ranged_write); |
91 | EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait); |
92 | EXPORT_SYMBOL(jbd2_journal_finish_inode_data_buffers); |
93 | EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); |
94 | EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); |
95 | EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); |
96 | EXPORT_SYMBOL(jbd2_inode_cache); |
97 | |
98 | static int jbd2_journal_create_slab(size_t slab_size); |
99 | |
100 | #ifdef CONFIG_JBD2_DEBUG |
101 | void __jbd2_debug(int level, const char *file, const char *func, |
102 | unsigned int line, const char *fmt, ...) |
103 | { |
104 | struct va_format vaf; |
105 | va_list args; |
106 | |
107 | if (level > jbd2_journal_enable_debug) |
108 | return; |
109 | va_start(args, fmt); |
110 | vaf.fmt = fmt; |
111 | vaf.va = &args; |
112 | printk(KERN_DEBUG "%s: (%s, %u): %pV", file, func, line, &vaf); |
113 | va_end(args); |
114 | } |
115 | #endif |
116 | |
117 | /* Checksumming functions */ |
118 | static __be32 jbd2_superblock_csum(journal_superblock_t *sb) |
119 | { |
120 | __u32 csum; |
121 | __be32 old_csum; |
122 | |
123 | old_csum = sb->s_checksum; |
124 | sb->s_checksum = 0; |
125 | csum = jbd2_chksum(crc: ~0, address: (char *)sb, length: sizeof(journal_superblock_t)); |
126 | sb->s_checksum = old_csum; |
127 | |
128 | return cpu_to_be32(csum); |
129 | } |
130 | |
131 | /* |
132 | * Helper function used to manage commit timeouts |
133 | */ |
134 | |
135 | static void commit_timeout(struct timer_list *t) |
136 | { |
137 | journal_t *journal = timer_container_of(journal, t, j_commit_timer); |
138 | |
139 | wake_up_process(tsk: journal->j_task); |
140 | } |
141 | |
142 | /* |
143 | * kjournald2: The main thread function used to manage a logging device |
144 | * journal. |
145 | * |
146 | * This kernel thread is responsible for two things: |
147 | * |
148 | * 1) COMMIT: Every so often we need to commit the current state of the |
149 | * filesystem to disk. The journal thread is responsible for writing |
150 | * all of the metadata buffers to disk. If a fast commit is ongoing |
151 | * journal thread waits until it's done and then continues from |
152 | * there on. |
153 | * |
154 | * 2) CHECKPOINT: We cannot reuse a used section of the log file until all |
155 | * of the data in that part of the log has been rewritten elsewhere on |
156 | * the disk. Flushing these old buffers to reclaim space in the log is |
157 | * known as checkpointing, and this thread is responsible for that job. |
158 | */ |
159 | |
160 | static int kjournald2(void *arg) |
161 | { |
162 | journal_t *journal = arg; |
163 | transaction_t *transaction; |
164 | |
165 | /* |
166 | * Set up an interval timer which can be used to trigger a commit wakeup |
167 | * after the commit interval expires |
168 | */ |
169 | timer_setup(&journal->j_commit_timer, commit_timeout, 0); |
170 | |
171 | set_freezable(); |
172 | |
173 | /* Record that the journal thread is running */ |
174 | journal->j_task = current; |
175 | wake_up(&journal->j_wait_done_commit); |
176 | |
177 | /* |
178 | * Make sure that no allocations from this kernel thread will ever |
179 | * recurse to the fs layer because we are responsible for the |
180 | * transaction commit and any fs involvement might get stuck waiting for |
181 | * the trasn. commit. |
182 | */ |
183 | memalloc_nofs_save(); |
184 | |
185 | /* |
186 | * And now, wait forever for commit wakeup events. |
187 | */ |
188 | write_lock(&journal->j_state_lock); |
189 | |
190 | loop: |
191 | if (journal->j_flags & JBD2_UNMOUNT) |
192 | goto end_loop; |
193 | |
194 | jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n", |
195 | journal->j_commit_sequence, journal->j_commit_request); |
196 | |
197 | if (journal->j_commit_sequence != journal->j_commit_request) { |
198 | jbd2_debug(1, "OK, requests differ\n"); |
199 | write_unlock(&journal->j_state_lock); |
200 | timer_delete_sync(timer: &journal->j_commit_timer); |
201 | jbd2_journal_commit_transaction(journal); |
202 | write_lock(&journal->j_state_lock); |
203 | goto loop; |
204 | } |
205 | |
206 | wake_up(&journal->j_wait_done_commit); |
207 | if (freezing(current)) { |
208 | /* |
209 | * The simpler the better. Flushing journal isn't a |
210 | * good idea, because that depends on threads that may |
211 | * be already stopped. |
212 | */ |
213 | jbd2_debug(1, "Now suspending kjournald2\n"); |
214 | write_unlock(&journal->j_state_lock); |
215 | try_to_freeze(); |
216 | write_lock(&journal->j_state_lock); |
217 | } else { |
218 | /* |
219 | * We assume on resume that commits are already there, |
220 | * so we don't sleep |
221 | */ |
222 | DEFINE_WAIT(wait); |
223 | |
224 | prepare_to_wait(wq_head: &journal->j_wait_commit, wq_entry: &wait, |
225 | TASK_INTERRUPTIBLE); |
226 | transaction = journal->j_running_transaction; |
227 | if (transaction == NULL || |
228 | time_before(jiffies, transaction->t_expires)) { |
229 | write_unlock(&journal->j_state_lock); |
230 | schedule(); |
231 | write_lock(&journal->j_state_lock); |
232 | } |
233 | finish_wait(wq_head: &journal->j_wait_commit, wq_entry: &wait); |
234 | } |
235 | |
236 | jbd2_debug(1, "kjournald2 wakes\n"); |
237 | |
238 | /* |
239 | * Were we woken up by a commit wakeup event? |
240 | */ |
241 | transaction = journal->j_running_transaction; |
242 | if (transaction && time_after_eq(jiffies, transaction->t_expires)) { |
243 | journal->j_commit_request = transaction->t_tid; |
244 | jbd2_debug(1, "woke because of timeout\n"); |
245 | } |
246 | goto loop; |
247 | |
248 | end_loop: |
249 | timer_delete_sync(timer: &journal->j_commit_timer); |
250 | journal->j_task = NULL; |
251 | wake_up(&journal->j_wait_done_commit); |
252 | jbd2_debug(1, "Journal thread exiting.\n"); |
253 | write_unlock(&journal->j_state_lock); |
254 | return 0; |
255 | } |
256 | |
257 | static int jbd2_journal_start_thread(journal_t *journal) |
258 | { |
259 | struct task_struct *t; |
260 | |
261 | t = kthread_run(kjournald2, journal, "jbd2/%s", |
262 | journal->j_devname); |
263 | if (IS_ERR(ptr: t)) |
264 | return PTR_ERR(ptr: t); |
265 | |
266 | wait_event(journal->j_wait_done_commit, journal->j_task != NULL); |
267 | return 0; |
268 | } |
269 | |
270 | static void journal_kill_thread(journal_t *journal) |
271 | { |
272 | write_lock(&journal->j_state_lock); |
273 | journal->j_flags |= JBD2_UNMOUNT; |
274 | |
275 | while (journal->j_task) { |
276 | write_unlock(&journal->j_state_lock); |
277 | wake_up(&journal->j_wait_commit); |
278 | wait_event(journal->j_wait_done_commit, journal->j_task == NULL); |
279 | write_lock(&journal->j_state_lock); |
280 | } |
281 | write_unlock(&journal->j_state_lock); |
282 | } |
283 | |
284 | static inline bool jbd2_data_needs_escaping(char *data) |
285 | { |
286 | return *((__be32 *)data) == cpu_to_be32(JBD2_MAGIC_NUMBER); |
287 | } |
288 | |
289 | static inline void jbd2_data_do_escape(char *data) |
290 | { |
291 | *((unsigned int *)data) = 0; |
292 | } |
293 | |
294 | /* |
295 | * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal. |
296 | * |
297 | * Writes a metadata buffer to a given disk block. The actual IO is not |
298 | * performed but a new buffer_head is constructed which labels the data |
299 | * to be written with the correct destination disk block. |
300 | * |
301 | * Any magic-number escaping which needs to be done will cause a |
302 | * copy-out here. If the buffer happens to start with the |
303 | * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the |
304 | * magic number is only written to the log for descripter blocks. In |
305 | * this case, we copy the data and replace the first word with 0, and we |
306 | * return a result code which indicates that this buffer needs to be |
307 | * marked as an escaped buffer in the corresponding log descriptor |
308 | * block. The missing word can then be restored when the block is read |
309 | * during recovery. |
310 | * |
311 | * If the source buffer has already been modified by a new transaction |
312 | * since we took the last commit snapshot, we use the frozen copy of |
313 | * that data for IO. If we end up using the existing buffer_head's data |
314 | * for the write, then we have to make sure nobody modifies it while the |
315 | * IO is in progress. do_get_write_access() handles this. |
316 | * |
317 | * The function returns a pointer to the buffer_head to be used for IO. |
318 | * |
319 | * |
320 | * Return value: |
321 | * =0: Finished OK without escape |
322 | * =1: Finished OK with escape |
323 | */ |
324 | |
325 | int jbd2_journal_write_metadata_buffer(transaction_t *transaction, |
326 | struct journal_head *jh_in, |
327 | struct buffer_head **bh_out, |
328 | sector_t blocknr) |
329 | { |
330 | int do_escape = 0; |
331 | struct buffer_head *new_bh; |
332 | struct folio *new_folio; |
333 | unsigned int new_offset; |
334 | struct buffer_head *bh_in = jh2bh(jh: jh_in); |
335 | journal_t *journal = transaction->t_journal; |
336 | |
337 | /* |
338 | * The buffer really shouldn't be locked: only the current committing |
339 | * transaction is allowed to write it, so nobody else is allowed |
340 | * to do any IO. |
341 | * |
342 | * akpm: except if we're journalling data, and write() output is |
343 | * also part of a shared mapping, and another thread has |
344 | * decided to launch a writepage() against this buffer. |
345 | */ |
346 | J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); |
347 | |
348 | new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); |
349 | |
350 | /* keep subsequent assertions sane */ |
351 | atomic_set(v: &new_bh->b_count, i: 1); |
352 | |
353 | spin_lock(lock: &jh_in->b_state_lock); |
354 | /* |
355 | * If a new transaction has already done a buffer copy-out, then |
356 | * we use that version of the data for the commit. |
357 | */ |
358 | if (jh_in->b_frozen_data) { |
359 | new_folio = virt_to_folio(x: jh_in->b_frozen_data); |
360 | new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data); |
361 | do_escape = jbd2_data_needs_escaping(data: jh_in->b_frozen_data); |
362 | if (do_escape) |
363 | jbd2_data_do_escape(data: jh_in->b_frozen_data); |
364 | } else { |
365 | char *tmp; |
366 | char *mapped_data; |
367 | |
368 | new_folio = bh_in->b_folio; |
369 | new_offset = offset_in_folio(new_folio, bh_in->b_data); |
370 | mapped_data = kmap_local_folio(folio: new_folio, offset: new_offset); |
371 | /* |
372 | * Fire data frozen trigger if data already wasn't frozen. Do |
373 | * this before checking for escaping, as the trigger may modify |
374 | * the magic offset. If a copy-out happens afterwards, it will |
375 | * have the correct data in the buffer. |
376 | */ |
377 | jbd2_buffer_frozen_trigger(jh: jh_in, mapped_data, |
378 | triggers: jh_in->b_triggers); |
379 | do_escape = jbd2_data_needs_escaping(data: mapped_data); |
380 | kunmap_local(mapped_data); |
381 | /* |
382 | * Do we need to do a data copy? |
383 | */ |
384 | if (!do_escape) |
385 | goto escape_done; |
386 | |
387 | spin_unlock(lock: &jh_in->b_state_lock); |
388 | tmp = jbd2_alloc(size: bh_in->b_size, GFP_NOFS | __GFP_NOFAIL); |
389 | spin_lock(lock: &jh_in->b_state_lock); |
390 | if (jh_in->b_frozen_data) { |
391 | jbd2_free(ptr: tmp, size: bh_in->b_size); |
392 | goto copy_done; |
393 | } |
394 | |
395 | jh_in->b_frozen_data = tmp; |
396 | memcpy_from_folio(to: tmp, folio: new_folio, offset: new_offset, len: bh_in->b_size); |
397 | /* |
398 | * This isn't strictly necessary, as we're using frozen |
399 | * data for the escaping, but it keeps consistency with |
400 | * b_frozen_data usage. |
401 | */ |
402 | jh_in->b_frozen_triggers = jh_in->b_triggers; |
403 | |
404 | copy_done: |
405 | new_folio = virt_to_folio(x: jh_in->b_frozen_data); |
406 | new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data); |
407 | jbd2_data_do_escape(data: jh_in->b_frozen_data); |
408 | } |
409 | |
410 | escape_done: |
411 | folio_set_bh(bh: new_bh, folio: new_folio, offset: new_offset); |
412 | new_bh->b_size = bh_in->b_size; |
413 | new_bh->b_bdev = journal->j_dev; |
414 | new_bh->b_blocknr = blocknr; |
415 | new_bh->b_private = bh_in; |
416 | set_buffer_mapped(new_bh); |
417 | set_buffer_dirty(new_bh); |
418 | |
419 | *bh_out = new_bh; |
420 | |
421 | /* |
422 | * The to-be-written buffer needs to get moved to the io queue, |
423 | * and the original buffer whose contents we are shadowing or |
424 | * copying is moved to the transaction's shadow queue. |
425 | */ |
426 | JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); |
427 | spin_lock(lock: &journal->j_list_lock); |
428 | __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); |
429 | spin_unlock(lock: &journal->j_list_lock); |
430 | set_buffer_shadow(bh_in); |
431 | spin_unlock(lock: &jh_in->b_state_lock); |
432 | |
433 | return do_escape; |
434 | } |
435 | |
436 | /* |
437 | * Allocation code for the journal file. Manage the space left in the |
438 | * journal, so that we can begin checkpointing when appropriate. |
439 | */ |
440 | |
441 | /* |
442 | * Called with j_state_lock locked for writing. |
443 | * Returns true if a transaction commit was started. |
444 | */ |
445 | static int __jbd2_log_start_commit(journal_t *journal, tid_t target) |
446 | { |
447 | /* Return if the txn has already requested to be committed */ |
448 | if (journal->j_commit_request == target) |
449 | return 0; |
450 | |
451 | /* |
452 | * The only transaction we can possibly wait upon is the |
453 | * currently running transaction (if it exists). Otherwise, |
454 | * the target tid must be an old one. |
455 | */ |
456 | if (journal->j_running_transaction && |
457 | journal->j_running_transaction->t_tid == target) { |
458 | /* |
459 | * We want a new commit: OK, mark the request and wakeup the |
460 | * commit thread. We do _not_ do the commit ourselves. |
461 | */ |
462 | |
463 | journal->j_commit_request = target; |
464 | jbd2_debug(1, "JBD2: requesting commit %u/%u\n", |
465 | journal->j_commit_request, |
466 | journal->j_commit_sequence); |
467 | journal->j_running_transaction->t_requested = jiffies; |
468 | wake_up(&journal->j_wait_commit); |
469 | return 1; |
470 | } else if (!tid_geq(x: journal->j_commit_request, y: target)) |
471 | /* This should never happen, but if it does, preserve |
472 | the evidence before kjournald goes into a loop and |
473 | increments j_commit_sequence beyond all recognition. */ |
474 | WARN_ONCE(1, "JBD2: bad log_start_commit: %u %u %u %u\n", |
475 | journal->j_commit_request, |
476 | journal->j_commit_sequence, |
477 | target, journal->j_running_transaction ? |
478 | journal->j_running_transaction->t_tid : 0); |
479 | return 0; |
480 | } |
481 | |
482 | int jbd2_log_start_commit(journal_t *journal, tid_t tid) |
483 | { |
484 | int ret; |
485 | |
486 | write_lock(&journal->j_state_lock); |
487 | ret = __jbd2_log_start_commit(journal, target: tid); |
488 | write_unlock(&journal->j_state_lock); |
489 | return ret; |
490 | } |
491 | |
492 | /* |
493 | * Force and wait any uncommitted transactions. We can only force the running |
494 | * transaction if we don't have an active handle, otherwise, we will deadlock. |
495 | * Returns: <0 in case of error, |
496 | * 0 if nothing to commit, |
497 | * 1 if transaction was successfully committed. |
498 | */ |
499 | static int __jbd2_journal_force_commit(journal_t *journal) |
500 | { |
501 | transaction_t *transaction = NULL; |
502 | tid_t tid; |
503 | int need_to_start = 0, ret = 0; |
504 | |
505 | read_lock(&journal->j_state_lock); |
506 | if (journal->j_running_transaction && !current->journal_info) { |
507 | transaction = journal->j_running_transaction; |
508 | if (!tid_geq(x: journal->j_commit_request, y: transaction->t_tid)) |
509 | need_to_start = 1; |
510 | } else if (journal->j_committing_transaction) |
511 | transaction = journal->j_committing_transaction; |
512 | |
513 | if (!transaction) { |
514 | /* Nothing to commit */ |
515 | read_unlock(&journal->j_state_lock); |
516 | return 0; |
517 | } |
518 | tid = transaction->t_tid; |
519 | read_unlock(&journal->j_state_lock); |
520 | if (need_to_start) |
521 | jbd2_log_start_commit(journal, tid); |
522 | ret = jbd2_log_wait_commit(journal, tid); |
523 | if (!ret) |
524 | ret = 1; |
525 | |
526 | return ret; |
527 | } |
528 | |
529 | /** |
530 | * jbd2_journal_force_commit_nested - Force and wait upon a commit if the |
531 | * calling process is not within transaction. |
532 | * |
533 | * @journal: journal to force |
534 | * Returns true if progress was made. |
535 | * |
536 | * This is used for forcing out undo-protected data which contains |
537 | * bitmaps, when the fs is running out of space. |
538 | */ |
539 | int jbd2_journal_force_commit_nested(journal_t *journal) |
540 | { |
541 | int ret; |
542 | |
543 | ret = __jbd2_journal_force_commit(journal); |
544 | return ret > 0; |
545 | } |
546 | |
547 | /** |
548 | * jbd2_journal_force_commit() - force any uncommitted transactions |
549 | * @journal: journal to force |
550 | * |
551 | * Caller want unconditional commit. We can only force the running transaction |
552 | * if we don't have an active handle, otherwise, we will deadlock. |
553 | */ |
554 | int jbd2_journal_force_commit(journal_t *journal) |
555 | { |
556 | int ret; |
557 | |
558 | J_ASSERT(!current->journal_info); |
559 | ret = __jbd2_journal_force_commit(journal); |
560 | if (ret > 0) |
561 | ret = 0; |
562 | return ret; |
563 | } |
564 | |
565 | /* |
566 | * Start a commit of the current running transaction (if any). Returns true |
567 | * if a transaction is going to be committed (or is currently already |
568 | * committing), and fills its tid in at *ptid |
569 | */ |
570 | int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid) |
571 | { |
572 | int ret = 0; |
573 | |
574 | write_lock(&journal->j_state_lock); |
575 | if (journal->j_running_transaction) { |
576 | tid_t tid = journal->j_running_transaction->t_tid; |
577 | |
578 | __jbd2_log_start_commit(journal, target: tid); |
579 | /* There's a running transaction and we've just made sure |
580 | * it's commit has been scheduled. */ |
581 | if (ptid) |
582 | *ptid = tid; |
583 | ret = 1; |
584 | } else if (journal->j_committing_transaction) { |
585 | /* |
586 | * If commit has been started, then we have to wait for |
587 | * completion of that transaction. |
588 | */ |
589 | if (ptid) |
590 | *ptid = journal->j_committing_transaction->t_tid; |
591 | ret = 1; |
592 | } |
593 | write_unlock(&journal->j_state_lock); |
594 | return ret; |
595 | } |
596 | |
597 | /* |
598 | * Return 1 if a given transaction has not yet sent barrier request |
599 | * connected with a transaction commit. If 0 is returned, transaction |
600 | * may or may not have sent the barrier. Used to avoid sending barrier |
601 | * twice in common cases. |
602 | */ |
603 | int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid) |
604 | { |
605 | int ret = 0; |
606 | transaction_t *commit_trans, *running_trans; |
607 | |
608 | if (!(journal->j_flags & JBD2_BARRIER)) |
609 | return 0; |
610 | read_lock(&journal->j_state_lock); |
611 | /* Transaction already committed? */ |
612 | if (tid_geq(x: journal->j_commit_sequence, y: tid)) |
613 | goto out; |
614 | commit_trans = journal->j_committing_transaction; |
615 | if (!commit_trans || commit_trans->t_tid != tid) { |
616 | running_trans = journal->j_running_transaction; |
617 | /* |
618 | * The query transaction hasn't started committing, |
619 | * it must still be running. |
620 | */ |
621 | if (WARN_ON_ONCE(!running_trans || |
622 | running_trans->t_tid != tid)) |
623 | goto out; |
624 | |
625 | running_trans->t_need_data_flush = 1; |
626 | ret = 1; |
627 | goto out; |
628 | } |
629 | /* |
630 | * Transaction is being committed and we already proceeded to |
631 | * submitting a flush to fs partition? |
632 | */ |
633 | if (journal->j_fs_dev != journal->j_dev) { |
634 | if (!commit_trans->t_need_data_flush || |
635 | commit_trans->t_state >= T_COMMIT_DFLUSH) |
636 | goto out; |
637 | } else { |
638 | if (commit_trans->t_state >= T_COMMIT_JFLUSH) |
639 | goto out; |
640 | } |
641 | ret = 1; |
642 | out: |
643 | read_unlock(&journal->j_state_lock); |
644 | return ret; |
645 | } |
646 | EXPORT_SYMBOL(jbd2_trans_will_send_data_barrier); |
647 | |
648 | /* |
649 | * Wait for a specified commit to complete. |
650 | * The caller may not hold the journal lock. |
651 | */ |
652 | int jbd2_log_wait_commit(journal_t *journal, tid_t tid) |
653 | { |
654 | int err = 0; |
655 | |
656 | read_lock(&journal->j_state_lock); |
657 | #ifdef CONFIG_PROVE_LOCKING |
658 | /* |
659 | * Some callers make sure transaction is already committing and in that |
660 | * case we cannot block on open handles anymore. So don't warn in that |
661 | * case. |
662 | */ |
663 | if (tid_gt(x: tid, y: journal->j_commit_sequence) && |
664 | (!journal->j_committing_transaction || |
665 | journal->j_committing_transaction->t_tid != tid)) { |
666 | read_unlock(&journal->j_state_lock); |
667 | jbd2_might_wait_for_commit(journal); |
668 | read_lock(&journal->j_state_lock); |
669 | } |
670 | #endif |
671 | #ifdef CONFIG_JBD2_DEBUG |
672 | if (!tid_geq(x: journal->j_commit_request, y: tid)) { |
673 | printk(KERN_ERR |
674 | "%s: error: j_commit_request=%u, tid=%u\n", |
675 | __func__, journal->j_commit_request, tid); |
676 | } |
677 | #endif |
678 | while (tid_gt(x: tid, y: journal->j_commit_sequence)) { |
679 | jbd2_debug(1, "JBD2: want %u, j_commit_sequence=%u\n", |
680 | tid, journal->j_commit_sequence); |
681 | read_unlock(&journal->j_state_lock); |
682 | wake_up(&journal->j_wait_commit); |
683 | wait_event(journal->j_wait_done_commit, |
684 | !tid_gt(tid, journal->j_commit_sequence)); |
685 | read_lock(&journal->j_state_lock); |
686 | } |
687 | read_unlock(&journal->j_state_lock); |
688 | |
689 | if (unlikely(is_journal_aborted(journal))) |
690 | err = -EIO; |
691 | return err; |
692 | } |
693 | |
694 | /* |
695 | * Start a fast commit. If there's an ongoing fast or full commit wait for |
696 | * it to complete. Returns 0 if a new fast commit was started. Returns -EALREADY |
697 | * if a fast commit is not needed, either because there's an already a commit |
698 | * going on or this tid has already been committed. Returns -EINVAL if no jbd2 |
699 | * commit has yet been performed. |
700 | */ |
701 | int jbd2_fc_begin_commit(journal_t *journal, tid_t tid) |
702 | { |
703 | if (unlikely(is_journal_aborted(journal))) |
704 | return -EIO; |
705 | /* |
706 | * Fast commits only allowed if at least one full commit has |
707 | * been processed. |
708 | */ |
709 | if (!journal->j_stats.ts_tid) |
710 | return -EINVAL; |
711 | |
712 | write_lock(&journal->j_state_lock); |
713 | if (tid_geq(x: journal->j_commit_sequence, y: tid)) { |
714 | write_unlock(&journal->j_state_lock); |
715 | return -EALREADY; |
716 | } |
717 | |
718 | if (journal->j_flags & JBD2_FULL_COMMIT_ONGOING || |
719 | (journal->j_flags & JBD2_FAST_COMMIT_ONGOING)) { |
720 | DEFINE_WAIT(wait); |
721 | |
722 | prepare_to_wait(wq_head: &journal->j_fc_wait, wq_entry: &wait, |
723 | TASK_UNINTERRUPTIBLE); |
724 | write_unlock(&journal->j_state_lock); |
725 | schedule(); |
726 | finish_wait(wq_head: &journal->j_fc_wait, wq_entry: &wait); |
727 | return -EALREADY; |
728 | } |
729 | journal->j_flags |= JBD2_FAST_COMMIT_ONGOING; |
730 | write_unlock(&journal->j_state_lock); |
731 | |
732 | return 0; |
733 | } |
734 | EXPORT_SYMBOL(jbd2_fc_begin_commit); |
735 | |
736 | /* |
737 | * Stop a fast commit. If fallback is set, this function starts commit of |
738 | * TID tid before any other fast commit can start. |
739 | */ |
740 | static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback) |
741 | { |
742 | if (journal->j_fc_cleanup_callback) |
743 | journal->j_fc_cleanup_callback(journal, 0, tid); |
744 | write_lock(&journal->j_state_lock); |
745 | journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING; |
746 | if (fallback) |
747 | journal->j_flags |= JBD2_FULL_COMMIT_ONGOING; |
748 | write_unlock(&journal->j_state_lock); |
749 | wake_up(&journal->j_fc_wait); |
750 | if (fallback) |
751 | return jbd2_complete_transaction(journal, tid); |
752 | return 0; |
753 | } |
754 | |
755 | int jbd2_fc_end_commit(journal_t *journal) |
756 | { |
757 | return __jbd2_fc_end_commit(journal, tid: 0, fallback: false); |
758 | } |
759 | EXPORT_SYMBOL(jbd2_fc_end_commit); |
760 | |
761 | int jbd2_fc_end_commit_fallback(journal_t *journal) |
762 | { |
763 | tid_t tid; |
764 | |
765 | read_lock(&journal->j_state_lock); |
766 | tid = journal->j_running_transaction ? |
767 | journal->j_running_transaction->t_tid : 0; |
768 | read_unlock(&journal->j_state_lock); |
769 | return __jbd2_fc_end_commit(journal, tid, fallback: true); |
770 | } |
771 | EXPORT_SYMBOL(jbd2_fc_end_commit_fallback); |
772 | |
773 | /* Return 1 when transaction with given tid has already committed. */ |
774 | int jbd2_transaction_committed(journal_t *journal, tid_t tid) |
775 | { |
776 | return tid_geq(READ_ONCE(journal->j_commit_sequence), y: tid); |
777 | } |
778 | EXPORT_SYMBOL(jbd2_transaction_committed); |
779 | |
780 | /* |
781 | * When this function returns the transaction corresponding to tid |
782 | * will be completed. If the transaction has currently running, start |
783 | * committing that transaction before waiting for it to complete. If |
784 | * the transaction id is stale, it is by definition already completed, |
785 | * so just return SUCCESS. |
786 | */ |
787 | int jbd2_complete_transaction(journal_t *journal, tid_t tid) |
788 | { |
789 | int need_to_wait = 1; |
790 | |
791 | read_lock(&journal->j_state_lock); |
792 | if (journal->j_running_transaction && |
793 | journal->j_running_transaction->t_tid == tid) { |
794 | if (journal->j_commit_request != tid) { |
795 | /* transaction not yet started, so request it */ |
796 | read_unlock(&journal->j_state_lock); |
797 | jbd2_log_start_commit(journal, tid); |
798 | goto wait_commit; |
799 | } |
800 | } else if (!(journal->j_committing_transaction && |
801 | journal->j_committing_transaction->t_tid == tid)) |
802 | need_to_wait = 0; |
803 | read_unlock(&journal->j_state_lock); |
804 | if (!need_to_wait) |
805 | return 0; |
806 | wait_commit: |
807 | return jbd2_log_wait_commit(journal, tid); |
808 | } |
809 | EXPORT_SYMBOL(jbd2_complete_transaction); |
810 | |
811 | /* |
812 | * Log buffer allocation routines: |
813 | */ |
814 | |
815 | int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp) |
816 | { |
817 | unsigned long blocknr; |
818 | |
819 | write_lock(&journal->j_state_lock); |
820 | J_ASSERT(journal->j_free > 1); |
821 | |
822 | blocknr = journal->j_head; |
823 | journal->j_head++; |
824 | journal->j_free--; |
825 | if (journal->j_head == journal->j_last) |
826 | journal->j_head = journal->j_first; |
827 | write_unlock(&journal->j_state_lock); |
828 | return jbd2_journal_bmap(journal, blocknr, retp); |
829 | } |
830 | |
831 | /* Map one fast commit buffer for use by the file system */ |
832 | int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out) |
833 | { |
834 | unsigned long long pblock; |
835 | unsigned long blocknr; |
836 | int ret = 0; |
837 | struct buffer_head *bh; |
838 | int fc_off; |
839 | |
840 | *bh_out = NULL; |
841 | |
842 | if (journal->j_fc_off + journal->j_fc_first >= journal->j_fc_last) |
843 | return -EINVAL; |
844 | |
845 | fc_off = journal->j_fc_off; |
846 | blocknr = journal->j_fc_first + fc_off; |
847 | journal->j_fc_off++; |
848 | ret = jbd2_journal_bmap(journal, blocknr, &pblock); |
849 | if (ret) |
850 | return ret; |
851 | |
852 | bh = __getblk(bdev: journal->j_dev, block: pblock, size: journal->j_blocksize); |
853 | if (!bh) |
854 | return -ENOMEM; |
855 | |
856 | journal->j_fc_wbuf[fc_off] = bh; |
857 | |
858 | *bh_out = bh; |
859 | |
860 | return 0; |
861 | } |
862 | EXPORT_SYMBOL(jbd2_fc_get_buf); |
863 | |
864 | /* |
865 | * Wait on fast commit buffers that were allocated by jbd2_fc_get_buf |
866 | * for completion. |
867 | */ |
868 | int jbd2_fc_wait_bufs(journal_t *journal, int num_blks) |
869 | { |
870 | struct buffer_head *bh; |
871 | int i, j_fc_off; |
872 | |
873 | j_fc_off = journal->j_fc_off; |
874 | |
875 | /* |
876 | * Wait in reverse order to minimize chances of us being woken up before |
877 | * all IOs have completed |
878 | */ |
879 | for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) { |
880 | bh = journal->j_fc_wbuf[i]; |
881 | wait_on_buffer(bh); |
882 | /* |
883 | * Update j_fc_off so jbd2_fc_release_bufs can release remain |
884 | * buffer head. |
885 | */ |
886 | if (unlikely(!buffer_uptodate(bh))) { |
887 | journal->j_fc_off = i + 1; |
888 | return -EIO; |
889 | } |
890 | put_bh(bh); |
891 | journal->j_fc_wbuf[i] = NULL; |
892 | } |
893 | |
894 | return 0; |
895 | } |
896 | EXPORT_SYMBOL(jbd2_fc_wait_bufs); |
897 | |
898 | void jbd2_fc_release_bufs(journal_t *journal) |
899 | { |
900 | struct buffer_head *bh; |
901 | int i, j_fc_off; |
902 | |
903 | j_fc_off = journal->j_fc_off; |
904 | |
905 | for (i = j_fc_off - 1; i >= 0; i--) { |
906 | bh = journal->j_fc_wbuf[i]; |
907 | if (!bh) |
908 | break; |
909 | put_bh(bh); |
910 | journal->j_fc_wbuf[i] = NULL; |
911 | } |
912 | } |
913 | EXPORT_SYMBOL(jbd2_fc_release_bufs); |
914 | |
915 | /* |
916 | * Conversion of logical to physical block numbers for the journal |
917 | * |
918 | * On external journals the journal blocks are identity-mapped, so |
919 | * this is a no-op. If needed, we can use j_blk_offset - everything is |
920 | * ready. |
921 | */ |
922 | int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, |
923 | unsigned long long *retp) |
924 | { |
925 | int err = 0; |
926 | unsigned long long ret; |
927 | sector_t block = blocknr; |
928 | |
929 | if (journal->j_bmap) { |
930 | err = journal->j_bmap(journal, &block); |
931 | if (err == 0) |
932 | *retp = block; |
933 | } else if (journal->j_inode) { |
934 | ret = bmap(inode: journal->j_inode, block: &block); |
935 | |
936 | if (ret || !block) { |
937 | printk(KERN_ALERT "%s: journal block not found " |
938 | "at offset %lu on %s\n", |
939 | __func__, blocknr, journal->j_devname); |
940 | err = -EIO; |
941 | jbd2_journal_abort(journal, err); |
942 | } else { |
943 | *retp = block; |
944 | } |
945 | |
946 | } else { |
947 | *retp = blocknr; /* +journal->j_blk_offset */ |
948 | } |
949 | return err; |
950 | } |
951 | |
952 | /* |
953 | * We play buffer_head aliasing tricks to write data/metadata blocks to |
954 | * the journal without copying their contents, but for journal |
955 | * descriptor blocks we do need to generate bona fide buffers. |
956 | * |
957 | * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying |
958 | * the buffer's contents they really should run flush_dcache_folio(bh->b_folio). |
959 | * But we don't bother doing that, so there will be coherency problems with |
960 | * mmaps of blockdevs which hold live JBD-controlled filesystems. |
961 | */ |
962 | struct buffer_head * |
963 | jbd2_journal_get_descriptor_buffer(transaction_t *transaction, int type) |
964 | { |
965 | journal_t *journal = transaction->t_journal; |
966 | struct buffer_head *bh; |
967 | unsigned long long blocknr; |
968 | journal_header_t *header; |
969 | int err; |
970 | |
971 | err = jbd2_journal_next_log_block(journal, retp: &blocknr); |
972 | |
973 | if (err) |
974 | return NULL; |
975 | |
976 | bh = __getblk(bdev: journal->j_dev, block: blocknr, size: journal->j_blocksize); |
977 | if (!bh) |
978 | return NULL; |
979 | atomic_dec(v: &transaction->t_outstanding_credits); |
980 | lock_buffer(bh); |
981 | memset(bh->b_data, 0, journal->j_blocksize); |
982 | header = (journal_header_t *)bh->b_data; |
983 | header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); |
984 | header->h_blocktype = cpu_to_be32(type); |
985 | header->h_sequence = cpu_to_be32(transaction->t_tid); |
986 | set_buffer_uptodate(bh); |
987 | unlock_buffer(bh); |
988 | BUFFER_TRACE(bh, "return this buffer"); |
989 | return bh; |
990 | } |
991 | |
992 | void jbd2_descriptor_block_csum_set(journal_t *j, struct buffer_head *bh) |
993 | { |
994 | struct jbd2_journal_block_tail *tail; |
995 | __u32 csum; |
996 | |
997 | if (!jbd2_journal_has_csum_v2or3(journal: j)) |
998 | return; |
999 | |
1000 | tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize - |
1001 | sizeof(struct jbd2_journal_block_tail)); |
1002 | tail->t_checksum = 0; |
1003 | csum = jbd2_chksum(crc: j->j_csum_seed, address: bh->b_data, length: j->j_blocksize); |
1004 | tail->t_checksum = cpu_to_be32(csum); |
1005 | } |
1006 | |
1007 | /* |
1008 | * Return tid of the oldest transaction in the journal and block in the journal |
1009 | * where the transaction starts. |
1010 | * |
1011 | * If the journal is now empty, return which will be the next transaction ID |
1012 | * we will write and where will that transaction start. |
1013 | * |
1014 | * The return value is 0 if journal tail cannot be pushed any further, 1 if |
1015 | * it can. |
1016 | */ |
1017 | int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, |
1018 | unsigned long *block) |
1019 | { |
1020 | transaction_t *transaction; |
1021 | int ret; |
1022 | |
1023 | read_lock(&journal->j_state_lock); |
1024 | spin_lock(lock: &journal->j_list_lock); |
1025 | transaction = journal->j_checkpoint_transactions; |
1026 | if (transaction) { |
1027 | *tid = transaction->t_tid; |
1028 | *block = transaction->t_log_start; |
1029 | } else if ((transaction = journal->j_committing_transaction) != NULL) { |
1030 | *tid = transaction->t_tid; |
1031 | *block = transaction->t_log_start; |
1032 | } else if ((transaction = journal->j_running_transaction) != NULL) { |
1033 | *tid = transaction->t_tid; |
1034 | *block = journal->j_head; |
1035 | } else { |
1036 | *tid = journal->j_transaction_sequence; |
1037 | *block = journal->j_head; |
1038 | } |
1039 | ret = tid_gt(x: *tid, y: journal->j_tail_sequence); |
1040 | spin_unlock(lock: &journal->j_list_lock); |
1041 | read_unlock(&journal->j_state_lock); |
1042 | |
1043 | return ret; |
1044 | } |
1045 | |
1046 | /* |
1047 | * Update information in journal structure and in on disk journal superblock |
1048 | * about log tail. This function does not check whether information passed in |
1049 | * really pushes log tail further. It's responsibility of the caller to make |
1050 | * sure provided log tail information is valid (e.g. by holding |
1051 | * j_checkpoint_mutex all the time between computing log tail and calling this |
1052 | * function as is the case with jbd2_cleanup_journal_tail()). |
1053 | * |
1054 | * Requires j_checkpoint_mutex |
1055 | */ |
1056 | int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) |
1057 | { |
1058 | unsigned long freed; |
1059 | int ret; |
1060 | |
1061 | BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); |
1062 | |
1063 | /* |
1064 | * We cannot afford for write to remain in drive's caches since as |
1065 | * soon as we update j_tail, next transaction can start reusing journal |
1066 | * space and if we lose sb update during power failure we'd replay |
1067 | * old transaction with possibly newly overwritten data. |
1068 | */ |
1069 | ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA); |
1070 | if (ret) |
1071 | goto out; |
1072 | |
1073 | write_lock(&journal->j_state_lock); |
1074 | freed = block - journal->j_tail; |
1075 | if (block < journal->j_tail) |
1076 | freed += journal->j_last - journal->j_first; |
1077 | |
1078 | trace_jbd2_update_log_tail(journal, first_tid: tid, block_nr: block, freed); |
1079 | jbd2_debug(1, |
1080 | "Cleaning journal tail from %u to %u (offset %lu), " |
1081 | "freeing %lu\n", |
1082 | journal->j_tail_sequence, tid, block, freed); |
1083 | |
1084 | journal->j_free += freed; |
1085 | journal->j_tail_sequence = tid; |
1086 | journal->j_tail = block; |
1087 | write_unlock(&journal->j_state_lock); |
1088 | |
1089 | out: |
1090 | return ret; |
1091 | } |
1092 | |
1093 | /* |
1094 | * This is a variation of __jbd2_update_log_tail which checks for validity of |
1095 | * provided log tail and locks j_checkpoint_mutex. So it is safe against races |
1096 | * with other threads updating log tail. |
1097 | */ |
1098 | void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) |
1099 | { |
1100 | mutex_lock_io(&journal->j_checkpoint_mutex); |
1101 | if (tid_gt(x: tid, y: journal->j_tail_sequence)) |
1102 | __jbd2_update_log_tail(journal, tid, block); |
1103 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
1104 | } |
1105 | |
1106 | struct jbd2_stats_proc_session { |
1107 | journal_t *journal; |
1108 | struct transaction_stats_s *stats; |
1109 | int start; |
1110 | int max; |
1111 | }; |
1112 | |
1113 | static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos) |
1114 | { |
1115 | return *pos ? NULL : SEQ_START_TOKEN; |
1116 | } |
1117 | |
1118 | static void *jbd2_seq_info_next(struct seq_file *seq, void *v, loff_t *pos) |
1119 | { |
1120 | (*pos)++; |
1121 | return NULL; |
1122 | } |
1123 | |
1124 | static int jbd2_seq_info_show(struct seq_file *seq, void *v) |
1125 | { |
1126 | struct jbd2_stats_proc_session *s = seq->private; |
1127 | |
1128 | if (v != SEQ_START_TOKEN) |
1129 | return 0; |
1130 | seq_printf(m: seq, fmt: "%lu transactions (%lu requested), " |
1131 | "each up to %u blocks\n", |
1132 | s->stats->ts_tid, s->stats->ts_requested, |
1133 | s->journal->j_max_transaction_buffers); |
1134 | if (s->stats->ts_tid == 0) |
1135 | return 0; |
1136 | seq_printf(m: seq, fmt: "average: \n %ums waiting for transaction\n", |
1137 | jiffies_to_msecs(j: s->stats->run.rs_wait / s->stats->ts_tid)); |
1138 | seq_printf(m: seq, fmt: " %ums request delay\n", |
1139 | (s->stats->ts_requested == 0) ? 0 : |
1140 | jiffies_to_msecs(j: s->stats->run.rs_request_delay / |
1141 | s->stats->ts_requested)); |
1142 | seq_printf(m: seq, fmt: " %ums running transaction\n", |
1143 | jiffies_to_msecs(j: s->stats->run.rs_running / s->stats->ts_tid)); |
1144 | seq_printf(m: seq, fmt: " %ums transaction was being locked\n", |
1145 | jiffies_to_msecs(j: s->stats->run.rs_locked / s->stats->ts_tid)); |
1146 | seq_printf(m: seq, fmt: " %ums flushing data (in ordered mode)\n", |
1147 | jiffies_to_msecs(j: s->stats->run.rs_flushing / s->stats->ts_tid)); |
1148 | seq_printf(m: seq, fmt: " %ums logging transaction\n", |
1149 | jiffies_to_msecs(j: s->stats->run.rs_logging / s->stats->ts_tid)); |
1150 | seq_printf(m: seq, fmt: " %lluus average transaction commit time\n", |
1151 | div_u64(dividend: s->journal->j_average_commit_time, divisor: 1000)); |
1152 | seq_printf(m: seq, fmt: " %lu handles per transaction\n", |
1153 | s->stats->run.rs_handle_count / s->stats->ts_tid); |
1154 | seq_printf(m: seq, fmt: " %lu blocks per transaction\n", |
1155 | s->stats->run.rs_blocks / s->stats->ts_tid); |
1156 | seq_printf(m: seq, fmt: " %lu logged blocks per transaction\n", |
1157 | s->stats->run.rs_blocks_logged / s->stats->ts_tid); |
1158 | return 0; |
1159 | } |
1160 | |
1161 | static void jbd2_seq_info_stop(struct seq_file *seq, void *v) |
1162 | { |
1163 | } |
1164 | |
1165 | static const struct seq_operations jbd2_seq_info_ops = { |
1166 | .start = jbd2_seq_info_start, |
1167 | .next = jbd2_seq_info_next, |
1168 | .stop = jbd2_seq_info_stop, |
1169 | .show = jbd2_seq_info_show, |
1170 | }; |
1171 | |
1172 | static int jbd2_seq_info_open(struct inode *inode, struct file *file) |
1173 | { |
1174 | journal_t *journal = pde_data(inode); |
1175 | struct jbd2_stats_proc_session *s; |
1176 | int rc, size; |
1177 | |
1178 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1179 | if (s == NULL) |
1180 | return -ENOMEM; |
1181 | size = sizeof(struct transaction_stats_s); |
1182 | s->stats = kmalloc(size, GFP_KERNEL); |
1183 | if (s->stats == NULL) { |
1184 | kfree(objp: s); |
1185 | return -ENOMEM; |
1186 | } |
1187 | spin_lock(lock: &journal->j_history_lock); |
1188 | memcpy(s->stats, &journal->j_stats, size); |
1189 | s->journal = journal; |
1190 | spin_unlock(lock: &journal->j_history_lock); |
1191 | |
1192 | rc = seq_open(file, &jbd2_seq_info_ops); |
1193 | if (rc == 0) { |
1194 | struct seq_file *m = file->private_data; |
1195 | m->private = s; |
1196 | } else { |
1197 | kfree(objp: s->stats); |
1198 | kfree(objp: s); |
1199 | } |
1200 | return rc; |
1201 | |
1202 | } |
1203 | |
1204 | static int jbd2_seq_info_release(struct inode *inode, struct file *file) |
1205 | { |
1206 | struct seq_file *seq = file->private_data; |
1207 | struct jbd2_stats_proc_session *s = seq->private; |
1208 | kfree(objp: s->stats); |
1209 | kfree(objp: s); |
1210 | return seq_release(inode, file); |
1211 | } |
1212 | |
1213 | static const struct proc_ops jbd2_info_proc_ops = { |
1214 | .proc_open = jbd2_seq_info_open, |
1215 | .proc_read = seq_read, |
1216 | .proc_lseek = seq_lseek, |
1217 | .proc_release = jbd2_seq_info_release, |
1218 | }; |
1219 | |
1220 | static struct proc_dir_entry *proc_jbd2_stats; |
1221 | |
1222 | static void jbd2_stats_proc_init(journal_t *journal) |
1223 | { |
1224 | journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats); |
1225 | if (journal->j_proc_entry) { |
1226 | proc_create_data("info", S_IRUGO, journal->j_proc_entry, |
1227 | &jbd2_info_proc_ops, journal); |
1228 | } |
1229 | } |
1230 | |
1231 | static void jbd2_stats_proc_exit(journal_t *journal) |
1232 | { |
1233 | remove_proc_entry("info", journal->j_proc_entry); |
1234 | remove_proc_entry(journal->j_devname, proc_jbd2_stats); |
1235 | } |
1236 | |
1237 | /* Minimum size of descriptor tag */ |
1238 | static int jbd2_min_tag_size(void) |
1239 | { |
1240 | /* |
1241 | * Tag with 32-bit block numbers does not use last four bytes of the |
1242 | * structure |
1243 | */ |
1244 | return sizeof(journal_block_tag_t) - 4; |
1245 | } |
1246 | |
1247 | /** |
1248 | * jbd2_journal_shrink_scan() |
1249 | * @shrink: shrinker to work on |
1250 | * @sc: reclaim request to process |
1251 | * |
1252 | * Scan the checkpointed buffer on the checkpoint list and release the |
1253 | * journal_head. |
1254 | */ |
1255 | static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink, |
1256 | struct shrink_control *sc) |
1257 | { |
1258 | journal_t *journal = shrink->private_data; |
1259 | unsigned long nr_to_scan = sc->nr_to_scan; |
1260 | unsigned long nr_shrunk; |
1261 | unsigned long count; |
1262 | |
1263 | count = percpu_counter_read_positive(fbc: &journal->j_checkpoint_jh_count); |
1264 | trace_jbd2_shrink_scan_enter(journal, nr_to_scan: sc->nr_to_scan, count); |
1265 | |
1266 | nr_shrunk = jbd2_journal_shrink_checkpoint_list(journal, nr_to_scan: &nr_to_scan); |
1267 | |
1268 | count = percpu_counter_read_positive(fbc: &journal->j_checkpoint_jh_count); |
1269 | trace_jbd2_shrink_scan_exit(journal, nr_to_scan, nr_shrunk, count); |
1270 | |
1271 | return nr_shrunk; |
1272 | } |
1273 | |
1274 | /** |
1275 | * jbd2_journal_shrink_count() |
1276 | * @shrink: shrinker to work on |
1277 | * @sc: reclaim request to process |
1278 | * |
1279 | * Count the number of checkpoint buffers on the checkpoint list. |
1280 | */ |
1281 | static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink, |
1282 | struct shrink_control *sc) |
1283 | { |
1284 | journal_t *journal = shrink->private_data; |
1285 | unsigned long count; |
1286 | |
1287 | count = percpu_counter_read_positive(fbc: &journal->j_checkpoint_jh_count); |
1288 | trace_jbd2_shrink_count(journal, nr_to_scan: sc->nr_to_scan, count); |
1289 | |
1290 | return count; |
1291 | } |
1292 | |
1293 | /* |
1294 | * If the journal init or create aborts, we need to mark the journal |
1295 | * superblock as being NULL to prevent the journal destroy from writing |
1296 | * back a bogus superblock. |
1297 | */ |
1298 | static void journal_fail_superblock(journal_t *journal) |
1299 | { |
1300 | struct buffer_head *bh = journal->j_sb_buffer; |
1301 | brelse(bh); |
1302 | journal->j_sb_buffer = NULL; |
1303 | } |
1304 | |
1305 | /* |
1306 | * Check the superblock for a given journal, performing initial |
1307 | * validation of the format. |
1308 | */ |
1309 | static int journal_check_superblock(journal_t *journal) |
1310 | { |
1311 | journal_superblock_t *sb = journal->j_superblock; |
1312 | int num_fc_blks; |
1313 | int err = -EINVAL; |
1314 | |
1315 | if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) || |
1316 | sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) { |
1317 | printk(KERN_WARNING "JBD2: no valid journal superblock found\n"); |
1318 | return err; |
1319 | } |
1320 | |
1321 | if (be32_to_cpu(sb->s_header.h_blocktype) != JBD2_SUPERBLOCK_V1 && |
1322 | be32_to_cpu(sb->s_header.h_blocktype) != JBD2_SUPERBLOCK_V2) { |
1323 | printk(KERN_WARNING "JBD2: unrecognised superblock format ID\n"); |
1324 | return err; |
1325 | } |
1326 | |
1327 | if (be32_to_cpu(sb->s_maxlen) > journal->j_total_len) { |
1328 | printk(KERN_WARNING "JBD2: journal file too short\n"); |
1329 | return err; |
1330 | } |
1331 | |
1332 | if (be32_to_cpu(sb->s_first) == 0 || |
1333 | be32_to_cpu(sb->s_first) >= journal->j_total_len) { |
1334 | printk(KERN_WARNING |
1335 | "JBD2: Invalid start block of journal: %u\n", |
1336 | be32_to_cpu(sb->s_first)); |
1337 | return err; |
1338 | } |
1339 | |
1340 | /* |
1341 | * If this is a V2 superblock, then we have to check the |
1342 | * features flags on it. |
1343 | */ |
1344 | if (!jbd2_format_support_feature(j: journal)) |
1345 | return 0; |
1346 | |
1347 | if ((sb->s_feature_ro_compat & |
1348 | ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) || |
1349 | (sb->s_feature_incompat & |
1350 | ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) { |
1351 | printk(KERN_WARNING "JBD2: Unrecognised features on journal\n"); |
1352 | return err; |
1353 | } |
1354 | |
1355 | num_fc_blks = jbd2_has_feature_fast_commit(j: journal) ? |
1356 | jbd2_journal_get_num_fc_blks(jsb: sb) : 0; |
1357 | if (be32_to_cpu(sb->s_maxlen) < JBD2_MIN_JOURNAL_BLOCKS || |
1358 | be32_to_cpu(sb->s_maxlen) - JBD2_MIN_JOURNAL_BLOCKS < num_fc_blks) { |
1359 | printk(KERN_ERR "JBD2: journal file too short %u,%d\n", |
1360 | be32_to_cpu(sb->s_maxlen), num_fc_blks); |
1361 | return err; |
1362 | } |
1363 | |
1364 | if (jbd2_has_feature_csum2(j: journal) && |
1365 | jbd2_has_feature_csum3(j: journal)) { |
1366 | /* Can't have checksum v2 and v3 at the same time! */ |
1367 | printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 " |
1368 | "at the same time!\n"); |
1369 | return err; |
1370 | } |
1371 | |
1372 | if (jbd2_journal_has_csum_v2or3(journal) && |
1373 | jbd2_has_feature_checksum(j: journal)) { |
1374 | /* Can't have checksum v1 and v2 on at the same time! */ |
1375 | printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 " |
1376 | "at the same time!\n"); |
1377 | return err; |
1378 | } |
1379 | |
1380 | if (jbd2_journal_has_csum_v2or3(journal)) { |
1381 | if (sb->s_checksum_type != JBD2_CRC32C_CHKSUM) { |
1382 | printk(KERN_ERR "JBD2: Unknown checksum type\n"); |
1383 | return err; |
1384 | } |
1385 | |
1386 | /* Check superblock checksum */ |
1387 | if (sb->s_checksum != jbd2_superblock_csum(sb)) { |
1388 | printk(KERN_ERR "JBD2: journal checksum error\n"); |
1389 | err = -EFSBADCRC; |
1390 | return err; |
1391 | } |
1392 | } |
1393 | |
1394 | return 0; |
1395 | } |
1396 | |
1397 | static int journal_revoke_records_per_block(journal_t *journal) |
1398 | { |
1399 | int record_size; |
1400 | int space = journal->j_blocksize - sizeof(jbd2_journal_revoke_header_t); |
1401 | |
1402 | if (jbd2_has_feature_64bit(j: journal)) |
1403 | record_size = 8; |
1404 | else |
1405 | record_size = 4; |
1406 | |
1407 | if (jbd2_journal_has_csum_v2or3(journal)) |
1408 | space -= sizeof(struct jbd2_journal_block_tail); |
1409 | return space / record_size; |
1410 | } |
1411 | |
1412 | static int jbd2_journal_get_max_txn_bufs(journal_t *journal) |
1413 | { |
1414 | return (journal->j_total_len - journal->j_fc_wbufsize) / 3; |
1415 | } |
1416 | |
1417 | /* |
1418 | * Base amount of descriptor blocks we reserve for each transaction. |
1419 | */ |
1420 | static int jbd2_descriptor_blocks_per_trans(journal_t *journal) |
1421 | { |
1422 | int tag_space = journal->j_blocksize - sizeof(journal_header_t); |
1423 | int tags_per_block; |
1424 | |
1425 | /* Subtract UUID */ |
1426 | tag_space -= 16; |
1427 | if (jbd2_journal_has_csum_v2or3(journal)) |
1428 | tag_space -= sizeof(struct jbd2_journal_block_tail); |
1429 | /* Commit code leaves a slack space of 16 bytes at the end of block */ |
1430 | tags_per_block = (tag_space - 16) / journal_tag_bytes(journal); |
1431 | /* |
1432 | * Revoke descriptors are accounted separately so we need to reserve |
1433 | * space for commit block and normal transaction descriptor blocks. |
1434 | */ |
1435 | return 1 + DIV_ROUND_UP(jbd2_journal_get_max_txn_bufs(journal), |
1436 | tags_per_block); |
1437 | } |
1438 | |
1439 | /* |
1440 | * Initialize number of blocks each transaction reserves for its bookkeeping |
1441 | * and maximum number of blocks a transaction can use. This needs to be called |
1442 | * after the journal size and the fastcommit area size are initialized. |
1443 | */ |
1444 | static void jbd2_journal_init_transaction_limits(journal_t *journal) |
1445 | { |
1446 | journal->j_revoke_records_per_block = |
1447 | journal_revoke_records_per_block(journal); |
1448 | journal->j_transaction_overhead_buffers = |
1449 | jbd2_descriptor_blocks_per_trans(journal); |
1450 | journal->j_max_transaction_buffers = |
1451 | jbd2_journal_get_max_txn_bufs(journal); |
1452 | } |
1453 | |
1454 | /* |
1455 | * Load the on-disk journal superblock and read the key fields into the |
1456 | * journal_t. |
1457 | */ |
1458 | static int journal_load_superblock(journal_t *journal) |
1459 | { |
1460 | int err; |
1461 | struct buffer_head *bh; |
1462 | journal_superblock_t *sb; |
1463 | |
1464 | bh = getblk_unmovable(bdev: journal->j_dev, block: journal->j_blk_offset, |
1465 | size: journal->j_blocksize); |
1466 | if (bh) |
1467 | err = bh_read(bh, op_flags: 0); |
1468 | if (!bh || err < 0) { |
1469 | pr_err("%s: Cannot read journal superblock\n", __func__); |
1470 | brelse(bh); |
1471 | return -EIO; |
1472 | } |
1473 | |
1474 | journal->j_sb_buffer = bh; |
1475 | sb = (journal_superblock_t *)bh->b_data; |
1476 | journal->j_superblock = sb; |
1477 | err = journal_check_superblock(journal); |
1478 | if (err) { |
1479 | journal_fail_superblock(journal); |
1480 | return err; |
1481 | } |
1482 | |
1483 | journal->j_tail_sequence = be32_to_cpu(sb->s_sequence); |
1484 | journal->j_tail = be32_to_cpu(sb->s_start); |
1485 | journal->j_first = be32_to_cpu(sb->s_first); |
1486 | journal->j_errno = be32_to_cpu(sb->s_errno); |
1487 | journal->j_last = be32_to_cpu(sb->s_maxlen); |
1488 | |
1489 | if (be32_to_cpu(sb->s_maxlen) < journal->j_total_len) |
1490 | journal->j_total_len = be32_to_cpu(sb->s_maxlen); |
1491 | /* Precompute checksum seed for all metadata */ |
1492 | if (jbd2_journal_has_csum_v2or3(journal)) |
1493 | journal->j_csum_seed = jbd2_chksum(crc: ~0, address: sb->s_uuid, |
1494 | length: sizeof(sb->s_uuid)); |
1495 | /* After journal features are set, we can compute transaction limits */ |
1496 | jbd2_journal_init_transaction_limits(journal); |
1497 | |
1498 | if (jbd2_has_feature_fast_commit(j: journal)) { |
1499 | journal->j_fc_last = be32_to_cpu(sb->s_maxlen); |
1500 | journal->j_last = journal->j_fc_last - |
1501 | jbd2_journal_get_num_fc_blks(jsb: sb); |
1502 | journal->j_fc_first = journal->j_last + 1; |
1503 | journal->j_fc_off = 0; |
1504 | } |
1505 | |
1506 | return 0; |
1507 | } |
1508 | |
1509 | |
1510 | /* |
1511 | * Management for journal control blocks: functions to create and |
1512 | * destroy journal_t structures, and to initialise and read existing |
1513 | * journal blocks from disk. */ |
1514 | |
1515 | /* The journal_init_common() function creates and fills a journal_t object |
1516 | * in memory. It calls journal_load_superblock() to load the on-disk journal |
1517 | * superblock and initialize the journal_t object. |
1518 | */ |
1519 | |
1520 | static journal_t *journal_init_common(struct block_device *bdev, |
1521 | struct block_device *fs_dev, |
1522 | unsigned long long start, int len, int blocksize) |
1523 | { |
1524 | static struct lock_class_key jbd2_trans_commit_key; |
1525 | journal_t *journal; |
1526 | int err; |
1527 | int n; |
1528 | |
1529 | journal = kzalloc(sizeof(*journal), GFP_KERNEL); |
1530 | if (!journal) |
1531 | return ERR_PTR(error: -ENOMEM); |
1532 | |
1533 | journal->j_blocksize = blocksize; |
1534 | journal->j_dev = bdev; |
1535 | journal->j_fs_dev = fs_dev; |
1536 | journal->j_blk_offset = start; |
1537 | journal->j_total_len = len; |
1538 | jbd2_init_fs_dev_write_error(journal); |
1539 | |
1540 | err = journal_load_superblock(journal); |
1541 | if (err) |
1542 | goto err_cleanup; |
1543 | |
1544 | init_waitqueue_head(&journal->j_wait_transaction_locked); |
1545 | init_waitqueue_head(&journal->j_wait_done_commit); |
1546 | init_waitqueue_head(&journal->j_wait_commit); |
1547 | init_waitqueue_head(&journal->j_wait_updates); |
1548 | init_waitqueue_head(&journal->j_wait_reserved); |
1549 | init_waitqueue_head(&journal->j_fc_wait); |
1550 | mutex_init(&journal->j_abort_mutex); |
1551 | mutex_init(&journal->j_barrier); |
1552 | mutex_init(&journal->j_checkpoint_mutex); |
1553 | spin_lock_init(&journal->j_revoke_lock); |
1554 | spin_lock_init(&journal->j_list_lock); |
1555 | spin_lock_init(&journal->j_history_lock); |
1556 | rwlock_init(&journal->j_state_lock); |
1557 | |
1558 | journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); |
1559 | journal->j_min_batch_time = 0; |
1560 | journal->j_max_batch_time = 15000; /* 15ms */ |
1561 | atomic_set(v: &journal->j_reserved_credits, i: 0); |
1562 | lockdep_init_map(lock: &journal->j_trans_commit_map, name: "jbd2_handle", |
1563 | key: &jbd2_trans_commit_key, subclass: 0); |
1564 | |
1565 | /* The journal is marked for error until we succeed with recovery! */ |
1566 | journal->j_flags = JBD2_ABORT; |
1567 | |
1568 | /* Set up a default-sized revoke table for the new mount. */ |
1569 | err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); |
1570 | if (err) |
1571 | goto err_cleanup; |
1572 | |
1573 | /* |
1574 | * journal descriptor can store up to n blocks, we need enough |
1575 | * buffers to write out full descriptor block. |
1576 | */ |
1577 | err = -ENOMEM; |
1578 | n = journal->j_blocksize / jbd2_min_tag_size(); |
1579 | journal->j_wbufsize = n; |
1580 | journal->j_fc_wbuf = NULL; |
1581 | journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), |
1582 | GFP_KERNEL); |
1583 | if (!journal->j_wbuf) |
1584 | goto err_cleanup; |
1585 | |
1586 | err = percpu_counter_init(&journal->j_checkpoint_jh_count, 0, |
1587 | GFP_KERNEL); |
1588 | if (err) |
1589 | goto err_cleanup; |
1590 | |
1591 | journal->j_shrink_transaction = NULL; |
1592 | |
1593 | journal->j_shrinker = shrinker_alloc(flags: 0, fmt: "jbd2-journal:(%u:%u)", |
1594 | MAJOR(bdev->bd_dev), |
1595 | MINOR(bdev->bd_dev)); |
1596 | if (!journal->j_shrinker) { |
1597 | err = -ENOMEM; |
1598 | goto err_cleanup; |
1599 | } |
1600 | |
1601 | journal->j_shrinker->scan_objects = jbd2_journal_shrink_scan; |
1602 | journal->j_shrinker->count_objects = jbd2_journal_shrink_count; |
1603 | journal->j_shrinker->private_data = journal; |
1604 | |
1605 | shrinker_register(shrinker: journal->j_shrinker); |
1606 | |
1607 | return journal; |
1608 | |
1609 | err_cleanup: |
1610 | percpu_counter_destroy(fbc: &journal->j_checkpoint_jh_count); |
1611 | kfree(objp: journal->j_wbuf); |
1612 | jbd2_journal_destroy_revoke(journal); |
1613 | journal_fail_superblock(journal); |
1614 | kfree(objp: journal); |
1615 | return ERR_PTR(error: err); |
1616 | } |
1617 | |
1618 | /* jbd2_journal_init_dev and jbd2_journal_init_inode: |
1619 | * |
1620 | * Create a journal structure assigned some fixed set of disk blocks to |
1621 | * the journal. We don't actually touch those disk blocks yet, but we |
1622 | * need to set up all of the mapping information to tell the journaling |
1623 | * system where the journal blocks are. |
1624 | * |
1625 | */ |
1626 | |
1627 | /** |
1628 | * journal_t * jbd2_journal_init_dev() - creates and initialises a journal structure |
1629 | * @bdev: Block device on which to create the journal |
1630 | * @fs_dev: Device which hold journalled filesystem for this journal. |
1631 | * @start: Block nr Start of journal. |
1632 | * @len: Length of the journal in blocks. |
1633 | * @blocksize: blocksize of journalling device |
1634 | * |
1635 | * Returns: a newly created journal_t * |
1636 | * |
1637 | * jbd2_journal_init_dev creates a journal which maps a fixed contiguous |
1638 | * range of blocks on an arbitrary block device. |
1639 | * |
1640 | */ |
1641 | journal_t *jbd2_journal_init_dev(struct block_device *bdev, |
1642 | struct block_device *fs_dev, |
1643 | unsigned long long start, int len, int blocksize) |
1644 | { |
1645 | journal_t *journal; |
1646 | |
1647 | journal = journal_init_common(bdev, fs_dev, start, len, blocksize); |
1648 | if (IS_ERR(ptr: journal)) |
1649 | return ERR_CAST(ptr: journal); |
1650 | |
1651 | snprintf(buf: journal->j_devname, size: sizeof(journal->j_devname), |
1652 | fmt: "%pg", journal->j_dev); |
1653 | strreplace(str: journal->j_devname, old: '/', new: '!'); |
1654 | jbd2_stats_proc_init(journal); |
1655 | |
1656 | return journal; |
1657 | } |
1658 | |
1659 | /** |
1660 | * journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode. |
1661 | * @inode: An inode to create the journal in |
1662 | * |
1663 | * jbd2_journal_init_inode creates a journal which maps an on-disk inode as |
1664 | * the journal. The inode must exist already, must support bmap() and |
1665 | * must have all data blocks preallocated. |
1666 | */ |
1667 | journal_t *jbd2_journal_init_inode(struct inode *inode) |
1668 | { |
1669 | journal_t *journal; |
1670 | sector_t blocknr; |
1671 | int err = 0; |
1672 | |
1673 | blocknr = 0; |
1674 | err = bmap(inode, block: &blocknr); |
1675 | if (err || !blocknr) { |
1676 | pr_err("%s: Cannot locate journal superblock\n", __func__); |
1677 | return err ? ERR_PTR(error: err) : ERR_PTR(error: -EINVAL); |
1678 | } |
1679 | |
1680 | jbd2_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n", |
1681 | inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size, |
1682 | inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); |
1683 | |
1684 | journal = journal_init_common(bdev: inode->i_sb->s_bdev, fs_dev: inode->i_sb->s_bdev, |
1685 | start: blocknr, len: inode->i_size >> inode->i_sb->s_blocksize_bits, |
1686 | blocksize: inode->i_sb->s_blocksize); |
1687 | if (IS_ERR(ptr: journal)) |
1688 | return ERR_CAST(ptr: journal); |
1689 | |
1690 | journal->j_inode = inode; |
1691 | snprintf(buf: journal->j_devname, size: sizeof(journal->j_devname), |
1692 | fmt: "%pg-%lu", journal->j_dev, journal->j_inode->i_ino); |
1693 | strreplace(str: journal->j_devname, old: '/', new: '!'); |
1694 | jbd2_stats_proc_init(journal); |
1695 | |
1696 | return journal; |
1697 | } |
1698 | |
1699 | /* |
1700 | * Given a journal_t structure, initialise the various fields for |
1701 | * startup of a new journaling session. We use this both when creating |
1702 | * a journal, and after recovering an old journal to reset it for |
1703 | * subsequent use. |
1704 | */ |
1705 | |
1706 | static int journal_reset(journal_t *journal) |
1707 | { |
1708 | journal_superblock_t *sb = journal->j_superblock; |
1709 | unsigned long long first, last; |
1710 | |
1711 | first = be32_to_cpu(sb->s_first); |
1712 | last = be32_to_cpu(sb->s_maxlen); |
1713 | if (first + JBD2_MIN_JOURNAL_BLOCKS > last + 1) { |
1714 | printk(KERN_ERR "JBD2: Journal too short (blocks %llu-%llu).\n", |
1715 | first, last); |
1716 | journal_fail_superblock(journal); |
1717 | return -EINVAL; |
1718 | } |
1719 | |
1720 | journal->j_first = first; |
1721 | journal->j_last = last; |
1722 | |
1723 | if (journal->j_head != 0 && journal->j_flags & JBD2_CYCLE_RECORD) { |
1724 | /* |
1725 | * Disable the cycled recording mode if the journal head block |
1726 | * number is not correct. |
1727 | */ |
1728 | if (journal->j_head < first || journal->j_head >= last) { |
1729 | printk(KERN_WARNING "JBD2: Incorrect Journal head block %lu, " |
1730 | "disable journal_cycle_record\n", |
1731 | journal->j_head); |
1732 | journal->j_head = journal->j_first; |
1733 | } |
1734 | } else { |
1735 | journal->j_head = journal->j_first; |
1736 | } |
1737 | journal->j_tail = journal->j_head; |
1738 | journal->j_free = journal->j_last - journal->j_first; |
1739 | |
1740 | journal->j_tail_sequence = journal->j_transaction_sequence; |
1741 | journal->j_commit_sequence = journal->j_transaction_sequence - 1; |
1742 | journal->j_commit_request = journal->j_commit_sequence; |
1743 | |
1744 | /* |
1745 | * Now that journal recovery is done, turn fast commits off here. This |
1746 | * way, if fast commit was enabled before the crash but if now FS has |
1747 | * disabled it, we don't enable fast commits. |
1748 | */ |
1749 | jbd2_clear_feature_fast_commit(j: journal); |
1750 | |
1751 | /* |
1752 | * As a special case, if the on-disk copy is already marked as needing |
1753 | * no recovery (s_start == 0), then we can safely defer the superblock |
1754 | * update until the next commit by setting JBD2_FLUSHED. This avoids |
1755 | * attempting a write to a potential-readonly device. |
1756 | */ |
1757 | if (sb->s_start == 0) { |
1758 | jbd2_debug(1, "JBD2: Skipping superblock update on recovered sb " |
1759 | "(start %ld, seq %u, errno %d)\n", |
1760 | journal->j_tail, journal->j_tail_sequence, |
1761 | journal->j_errno); |
1762 | journal->j_flags |= JBD2_FLUSHED; |
1763 | } else { |
1764 | /* Lock here to make assertions happy... */ |
1765 | mutex_lock_io(&journal->j_checkpoint_mutex); |
1766 | /* |
1767 | * Update log tail information. We use REQ_FUA since new |
1768 | * transaction will start reusing journal space and so we |
1769 | * must make sure information about current log tail is on |
1770 | * disk before that. |
1771 | */ |
1772 | jbd2_journal_update_sb_log_tail(journal, |
1773 | journal->j_tail_sequence, |
1774 | journal->j_tail, REQ_FUA); |
1775 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
1776 | } |
1777 | return jbd2_journal_start_thread(journal); |
1778 | } |
1779 | |
1780 | /* |
1781 | * This function expects that the caller will have locked the journal |
1782 | * buffer head, and will return with it unlocked |
1783 | */ |
1784 | static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags) |
1785 | { |
1786 | struct buffer_head *bh = journal->j_sb_buffer; |
1787 | journal_superblock_t *sb = journal->j_superblock; |
1788 | int ret = 0; |
1789 | |
1790 | /* Buffer got discarded which means block device got invalidated */ |
1791 | if (!buffer_mapped(bh)) { |
1792 | unlock_buffer(bh); |
1793 | return -EIO; |
1794 | } |
1795 | |
1796 | /* |
1797 | * Always set high priority flags to exempt from block layer's |
1798 | * QOS policies, e.g. writeback throttle. |
1799 | */ |
1800 | write_flags |= JBD2_JOURNAL_REQ_FLAGS; |
1801 | if (!(journal->j_flags & JBD2_BARRIER)) |
1802 | write_flags &= ~(REQ_FUA | REQ_PREFLUSH); |
1803 | |
1804 | trace_jbd2_write_superblock(journal, write_flags); |
1805 | |
1806 | if (buffer_write_io_error(bh)) { |
1807 | /* |
1808 | * Oh, dear. A previous attempt to write the journal |
1809 | * superblock failed. This could happen because the |
1810 | * USB device was yanked out. Or it could happen to |
1811 | * be a transient write error and maybe the block will |
1812 | * be remapped. Nothing we can do but to retry the |
1813 | * write and hope for the best. |
1814 | */ |
1815 | printk(KERN_ERR "JBD2: previous I/O error detected " |
1816 | "for journal superblock update for %s.\n", |
1817 | journal->j_devname); |
1818 | clear_buffer_write_io_error(bh); |
1819 | set_buffer_uptodate(bh); |
1820 | } |
1821 | if (jbd2_journal_has_csum_v2or3(journal)) |
1822 | sb->s_checksum = jbd2_superblock_csum(sb); |
1823 | get_bh(bh); |
1824 | bh->b_end_io = end_buffer_write_sync; |
1825 | submit_bh(REQ_OP_WRITE | write_flags, bh); |
1826 | wait_on_buffer(bh); |
1827 | if (buffer_write_io_error(bh)) { |
1828 | clear_buffer_write_io_error(bh); |
1829 | set_buffer_uptodate(bh); |
1830 | ret = -EIO; |
1831 | } |
1832 | if (ret) { |
1833 | printk(KERN_ERR "JBD2: I/O error when updating journal superblock for %s.\n", |
1834 | journal->j_devname); |
1835 | if (!is_journal_aborted(journal)) |
1836 | jbd2_journal_abort(journal, ret); |
1837 | } |
1838 | |
1839 | return ret; |
1840 | } |
1841 | |
1842 | /** |
1843 | * jbd2_journal_update_sb_log_tail() - Update log tail in journal sb on disk. |
1844 | * @journal: The journal to update. |
1845 | * @tail_tid: TID of the new transaction at the tail of the log |
1846 | * @tail_block: The first block of the transaction at the tail of the log |
1847 | * @write_flags: Flags for the journal sb write operation |
1848 | * |
1849 | * Update a journal's superblock information about log tail and write it to |
1850 | * disk, waiting for the IO to complete. |
1851 | */ |
1852 | int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, |
1853 | unsigned long tail_block, |
1854 | blk_opf_t write_flags) |
1855 | { |
1856 | journal_superblock_t *sb = journal->j_superblock; |
1857 | int ret; |
1858 | |
1859 | if (is_journal_aborted(journal)) |
1860 | return -EIO; |
1861 | if (jbd2_check_fs_dev_write_error(journal)) { |
1862 | jbd2_journal_abort(journal, -EIO); |
1863 | return -EIO; |
1864 | } |
1865 | |
1866 | BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); |
1867 | jbd2_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n", |
1868 | tail_block, tail_tid); |
1869 | |
1870 | lock_buffer(bh: journal->j_sb_buffer); |
1871 | sb->s_sequence = cpu_to_be32(tail_tid); |
1872 | sb->s_start = cpu_to_be32(tail_block); |
1873 | |
1874 | ret = jbd2_write_superblock(journal, write_flags); |
1875 | if (ret) |
1876 | goto out; |
1877 | |
1878 | /* Log is no longer empty */ |
1879 | write_lock(&journal->j_state_lock); |
1880 | journal->j_flags &= ~JBD2_FLUSHED; |
1881 | write_unlock(&journal->j_state_lock); |
1882 | |
1883 | out: |
1884 | return ret; |
1885 | } |
1886 | |
1887 | /** |
1888 | * jbd2_mark_journal_empty() - Mark on disk journal as empty. |
1889 | * @journal: The journal to update. |
1890 | * @write_flags: Flags for the journal sb write operation |
1891 | * |
1892 | * Update a journal's dynamic superblock fields to show that journal is empty. |
1893 | * Write updated superblock to disk waiting for IO to complete. |
1894 | */ |
1895 | static void jbd2_mark_journal_empty(journal_t *journal, blk_opf_t write_flags) |
1896 | { |
1897 | journal_superblock_t *sb = journal->j_superblock; |
1898 | bool had_fast_commit = false; |
1899 | |
1900 | BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); |
1901 | lock_buffer(bh: journal->j_sb_buffer); |
1902 | if (sb->s_start == 0) { /* Is it already empty? */ |
1903 | unlock_buffer(bh: journal->j_sb_buffer); |
1904 | return; |
1905 | } |
1906 | |
1907 | jbd2_debug(1, "JBD2: Marking journal as empty (seq %u)\n", |
1908 | journal->j_tail_sequence); |
1909 | |
1910 | sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); |
1911 | sb->s_start = cpu_to_be32(0); |
1912 | sb->s_head = cpu_to_be32(journal->j_head); |
1913 | if (jbd2_has_feature_fast_commit(j: journal)) { |
1914 | /* |
1915 | * When journal is clean, no need to commit fast commit flag and |
1916 | * make file system incompatible with older kernels. |
1917 | */ |
1918 | jbd2_clear_feature_fast_commit(j: journal); |
1919 | had_fast_commit = true; |
1920 | } |
1921 | |
1922 | jbd2_write_superblock(journal, write_flags); |
1923 | |
1924 | if (had_fast_commit) |
1925 | jbd2_set_feature_fast_commit(j: journal); |
1926 | |
1927 | /* Log is empty */ |
1928 | write_lock(&journal->j_state_lock); |
1929 | journal->j_flags |= JBD2_FLUSHED; |
1930 | write_unlock(&journal->j_state_lock); |
1931 | } |
1932 | |
1933 | /** |
1934 | * __jbd2_journal_erase() - Discard or zeroout journal blocks (excluding superblock) |
1935 | * @journal: The journal to erase. |
1936 | * @flags: A discard/zeroout request is sent for each physically contigous |
1937 | * region of the journal. Either JBD2_JOURNAL_FLUSH_DISCARD or |
1938 | * JBD2_JOURNAL_FLUSH_ZEROOUT must be set to determine which operation |
1939 | * to perform. |
1940 | * |
1941 | * Note: JBD2_JOURNAL_FLUSH_ZEROOUT attempts to use hardware offload. Zeroes |
1942 | * will be explicitly written if no hardware offload is available, see |
1943 | * blkdev_issue_zeroout for more details. |
1944 | */ |
1945 | static int __jbd2_journal_erase(journal_t *journal, unsigned int flags) |
1946 | { |
1947 | int err = 0; |
1948 | unsigned long block, log_offset; /* logical */ |
1949 | unsigned long long phys_block, block_start, block_stop; /* physical */ |
1950 | loff_t byte_start, byte_stop, byte_count; |
1951 | |
1952 | /* flags must be set to either discard or zeroout */ |
1953 | if ((flags & ~JBD2_JOURNAL_FLUSH_VALID) || !flags || |
1954 | ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && |
1955 | (flags & JBD2_JOURNAL_FLUSH_ZEROOUT))) |
1956 | return -EINVAL; |
1957 | |
1958 | if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && |
1959 | !bdev_max_discard_sectors(bdev: journal->j_dev)) |
1960 | return -EOPNOTSUPP; |
1961 | |
1962 | /* |
1963 | * lookup block mapping and issue discard/zeroout for each |
1964 | * contiguous region |
1965 | */ |
1966 | log_offset = be32_to_cpu(journal->j_superblock->s_first); |
1967 | block_start = ~0ULL; |
1968 | for (block = log_offset; block < journal->j_total_len; block++) { |
1969 | err = jbd2_journal_bmap(journal, blocknr: block, retp: &phys_block); |
1970 | if (err) { |
1971 | pr_err("JBD2: bad block at offset %lu", block); |
1972 | return err; |
1973 | } |
1974 | |
1975 | if (block_start == ~0ULL) |
1976 | block_stop = block_start = phys_block; |
1977 | |
1978 | /* |
1979 | * last block not contiguous with current block, |
1980 | * process last contiguous region and return to this block on |
1981 | * next loop |
1982 | */ |
1983 | if (phys_block != block_stop) { |
1984 | block--; |
1985 | } else { |
1986 | block_stop++; |
1987 | /* |
1988 | * if this isn't the last block of journal, |
1989 | * no need to process now because next block may also |
1990 | * be part of this contiguous region |
1991 | */ |
1992 | if (block != journal->j_total_len - 1) |
1993 | continue; |
1994 | } |
1995 | |
1996 | /* |
1997 | * end of contiguous region or this is last block of journal, |
1998 | * take care of the region |
1999 | */ |
2000 | byte_start = block_start * journal->j_blocksize; |
2001 | byte_stop = block_stop * journal->j_blocksize; |
2002 | byte_count = (block_stop - block_start) * journal->j_blocksize; |
2003 | |
2004 | truncate_inode_pages_range(journal->j_dev->bd_mapping, |
2005 | lstart: byte_start, lend: byte_stop - 1); |
2006 | |
2007 | if (flags & JBD2_JOURNAL_FLUSH_DISCARD) { |
2008 | err = blkdev_issue_discard(bdev: journal->j_dev, |
2009 | sector: byte_start >> SECTOR_SHIFT, |
2010 | nr_sects: byte_count >> SECTOR_SHIFT, |
2011 | GFP_NOFS); |
2012 | } else if (flags & JBD2_JOURNAL_FLUSH_ZEROOUT) { |
2013 | err = blkdev_issue_zeroout(bdev: journal->j_dev, |
2014 | sector: byte_start >> SECTOR_SHIFT, |
2015 | nr_sects: byte_count >> SECTOR_SHIFT, |
2016 | GFP_NOFS, flags: 0); |
2017 | } |
2018 | |
2019 | if (unlikely(err != 0)) { |
2020 | pr_err("JBD2: (error %d) unable to wipe journal at physical blocks [%llu, %llu)", |
2021 | err, block_start, block_stop); |
2022 | return err; |
2023 | } |
2024 | |
2025 | /* reset start and stop after processing a region */ |
2026 | block_start = ~0ULL; |
2027 | } |
2028 | |
2029 | return blkdev_issue_flush(bdev: journal->j_dev); |
2030 | } |
2031 | |
2032 | /** |
2033 | * jbd2_journal_update_sb_errno() - Update error in the journal. |
2034 | * @journal: The journal to update. |
2035 | * |
2036 | * Update a journal's errno. Write updated superblock to disk waiting for IO |
2037 | * to complete. |
2038 | */ |
2039 | void jbd2_journal_update_sb_errno(journal_t *journal) |
2040 | { |
2041 | journal_superblock_t *sb = journal->j_superblock; |
2042 | int errcode; |
2043 | |
2044 | lock_buffer(bh: journal->j_sb_buffer); |
2045 | errcode = journal->j_errno; |
2046 | if (errcode == -ESHUTDOWN) |
2047 | errcode = 0; |
2048 | jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode); |
2049 | sb->s_errno = cpu_to_be32(errcode); |
2050 | |
2051 | jbd2_write_superblock(journal, REQ_FUA); |
2052 | } |
2053 | EXPORT_SYMBOL(jbd2_journal_update_sb_errno); |
2054 | |
2055 | /** |
2056 | * jbd2_journal_load() - Read journal from disk. |
2057 | * @journal: Journal to act on. |
2058 | * |
2059 | * Given a journal_t structure which tells us which disk blocks contain |
2060 | * a journal, read the journal from disk to initialise the in-memory |
2061 | * structures. |
2062 | */ |
2063 | int jbd2_journal_load(journal_t *journal) |
2064 | { |
2065 | int err; |
2066 | journal_superblock_t *sb = journal->j_superblock; |
2067 | |
2068 | /* |
2069 | * Create a slab for this blocksize |
2070 | */ |
2071 | err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize)); |
2072 | if (err) |
2073 | return err; |
2074 | |
2075 | /* Let the recovery code check whether it needs to recover any |
2076 | * data from the journal. */ |
2077 | err = jbd2_journal_recover(journal); |
2078 | if (err) { |
2079 | pr_warn("JBD2: journal recovery failed\n"); |
2080 | return err; |
2081 | } |
2082 | |
2083 | if (journal->j_failed_commit) { |
2084 | printk(KERN_ERR "JBD2: journal transaction %u on %s " |
2085 | "is corrupt.\n", journal->j_failed_commit, |
2086 | journal->j_devname); |
2087 | return -EFSCORRUPTED; |
2088 | } |
2089 | /* |
2090 | * clear JBD2_ABORT flag initialized in journal_init_common |
2091 | * here to update log tail information with the newest seq. |
2092 | */ |
2093 | journal->j_flags &= ~JBD2_ABORT; |
2094 | |
2095 | /* OK, we've finished with the dynamic journal bits: |
2096 | * reinitialise the dynamic contents of the superblock in memory |
2097 | * and reset them on disk. */ |
2098 | err = journal_reset(journal); |
2099 | if (err) { |
2100 | pr_warn("JBD2: journal reset failed\n"); |
2101 | return err; |
2102 | } |
2103 | |
2104 | journal->j_flags |= JBD2_LOADED; |
2105 | return 0; |
2106 | } |
2107 | |
2108 | /** |
2109 | * jbd2_journal_destroy() - Release a journal_t structure. |
2110 | * @journal: Journal to act on. |
2111 | * |
2112 | * Release a journal_t structure once it is no longer in use by the |
2113 | * journaled object. |
2114 | * Return <0 if we couldn't clean up the journal. |
2115 | */ |
2116 | int jbd2_journal_destroy(journal_t *journal) |
2117 | { |
2118 | int err = 0; |
2119 | |
2120 | /* Wait for the commit thread to wake up and die. */ |
2121 | journal_kill_thread(journal); |
2122 | |
2123 | /* Force a final log commit */ |
2124 | if (journal->j_running_transaction) |
2125 | jbd2_journal_commit_transaction(journal); |
2126 | |
2127 | /* Force any old transactions to disk */ |
2128 | |
2129 | /* Totally anal locking here... */ |
2130 | spin_lock(lock: &journal->j_list_lock); |
2131 | while (journal->j_checkpoint_transactions != NULL) { |
2132 | spin_unlock(lock: &journal->j_list_lock); |
2133 | mutex_lock_io(&journal->j_checkpoint_mutex); |
2134 | err = jbd2_log_do_checkpoint(journal); |
2135 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
2136 | /* |
2137 | * If checkpointing failed, just free the buffers to avoid |
2138 | * looping forever |
2139 | */ |
2140 | if (err) { |
2141 | jbd2_journal_destroy_checkpoint(journal); |
2142 | spin_lock(lock: &journal->j_list_lock); |
2143 | break; |
2144 | } |
2145 | spin_lock(lock: &journal->j_list_lock); |
2146 | } |
2147 | |
2148 | J_ASSERT(journal->j_running_transaction == NULL); |
2149 | J_ASSERT(journal->j_committing_transaction == NULL); |
2150 | J_ASSERT(journal->j_checkpoint_transactions == NULL); |
2151 | spin_unlock(lock: &journal->j_list_lock); |
2152 | |
2153 | /* |
2154 | * OK, all checkpoint transactions have been checked, now check the |
2155 | * writeback errseq of fs dev and abort the journal if some buffer |
2156 | * failed to write back to the original location, otherwise the |
2157 | * filesystem may become inconsistent. |
2158 | */ |
2159 | if (!is_journal_aborted(journal) && |
2160 | jbd2_check_fs_dev_write_error(journal)) |
2161 | jbd2_journal_abort(journal, -EIO); |
2162 | |
2163 | if (journal->j_sb_buffer) { |
2164 | if (!is_journal_aborted(journal)) { |
2165 | mutex_lock_io(&journal->j_checkpoint_mutex); |
2166 | |
2167 | write_lock(&journal->j_state_lock); |
2168 | journal->j_tail_sequence = |
2169 | ++journal->j_transaction_sequence; |
2170 | write_unlock(&journal->j_state_lock); |
2171 | |
2172 | jbd2_mark_journal_empty(journal, REQ_PREFLUSH | REQ_FUA); |
2173 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
2174 | } else |
2175 | err = -EIO; |
2176 | brelse(bh: journal->j_sb_buffer); |
2177 | } |
2178 | |
2179 | if (journal->j_shrinker) { |
2180 | percpu_counter_destroy(fbc: &journal->j_checkpoint_jh_count); |
2181 | shrinker_free(shrinker: journal->j_shrinker); |
2182 | } |
2183 | if (journal->j_proc_entry) |
2184 | jbd2_stats_proc_exit(journal); |
2185 | iput(journal->j_inode); |
2186 | if (journal->j_revoke) |
2187 | jbd2_journal_destroy_revoke(journal); |
2188 | kfree(objp: journal->j_fc_wbuf); |
2189 | kfree(objp: journal->j_wbuf); |
2190 | kfree(objp: journal); |
2191 | |
2192 | return err; |
2193 | } |
2194 | |
2195 | |
2196 | /** |
2197 | * jbd2_journal_check_used_features() - Check if features specified are used. |
2198 | * @journal: Journal to check. |
2199 | * @compat: bitmask of compatible features |
2200 | * @ro: bitmask of features that force read-only mount |
2201 | * @incompat: bitmask of incompatible features |
2202 | * |
2203 | * Check whether the journal uses all of a given set of |
2204 | * features. Return true (non-zero) if it does. |
2205 | **/ |
2206 | |
2207 | int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat, |
2208 | unsigned long ro, unsigned long incompat) |
2209 | { |
2210 | journal_superblock_t *sb; |
2211 | |
2212 | if (!compat && !ro && !incompat) |
2213 | return 1; |
2214 | if (!jbd2_format_support_feature(j: journal)) |
2215 | return 0; |
2216 | |
2217 | sb = journal->j_superblock; |
2218 | |
2219 | if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) && |
2220 | ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) && |
2221 | ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat)) |
2222 | return 1; |
2223 | |
2224 | return 0; |
2225 | } |
2226 | |
2227 | /** |
2228 | * jbd2_journal_check_available_features() - Check feature set in journalling layer |
2229 | * @journal: Journal to check. |
2230 | * @compat: bitmask of compatible features |
2231 | * @ro: bitmask of features that force read-only mount |
2232 | * @incompat: bitmask of incompatible features |
2233 | * |
2234 | * Check whether the journaling code supports the use of |
2235 | * all of a given set of features on this journal. Return true |
2236 | * (non-zero) if it can. */ |
2237 | |
2238 | int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat, |
2239 | unsigned long ro, unsigned long incompat) |
2240 | { |
2241 | if (!compat && !ro && !incompat) |
2242 | return 1; |
2243 | |
2244 | if (!jbd2_format_support_feature(j: journal)) |
2245 | return 0; |
2246 | |
2247 | if ((compat & JBD2_KNOWN_COMPAT_FEATURES) == compat && |
2248 | (ro & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro && |
2249 | (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat) |
2250 | return 1; |
2251 | |
2252 | return 0; |
2253 | } |
2254 | |
2255 | static int |
2256 | jbd2_journal_initialize_fast_commit(journal_t *journal) |
2257 | { |
2258 | journal_superblock_t *sb = journal->j_superblock; |
2259 | unsigned long long num_fc_blks; |
2260 | |
2261 | num_fc_blks = jbd2_journal_get_num_fc_blks(jsb: sb); |
2262 | if (journal->j_last - num_fc_blks < JBD2_MIN_JOURNAL_BLOCKS) |
2263 | return -ENOSPC; |
2264 | |
2265 | /* Are we called twice? */ |
2266 | WARN_ON(journal->j_fc_wbuf != NULL); |
2267 | journal->j_fc_wbuf = kmalloc_array(num_fc_blks, |
2268 | sizeof(struct buffer_head *), GFP_KERNEL); |
2269 | if (!journal->j_fc_wbuf) |
2270 | return -ENOMEM; |
2271 | |
2272 | journal->j_fc_wbufsize = num_fc_blks; |
2273 | journal->j_fc_last = journal->j_last; |
2274 | journal->j_last = journal->j_fc_last - num_fc_blks; |
2275 | journal->j_fc_first = journal->j_last + 1; |
2276 | journal->j_fc_off = 0; |
2277 | journal->j_free = journal->j_last - journal->j_first; |
2278 | |
2279 | return 0; |
2280 | } |
2281 | |
2282 | /** |
2283 | * jbd2_journal_set_features() - Mark a given journal feature in the superblock |
2284 | * @journal: Journal to act on. |
2285 | * @compat: bitmask of compatible features |
2286 | * @ro: bitmask of features that force read-only mount |
2287 | * @incompat: bitmask of incompatible features |
2288 | * |
2289 | * Mark a given journal feature as present on the |
2290 | * superblock. Returns true if the requested features could be set. |
2291 | * |
2292 | */ |
2293 | |
2294 | int jbd2_journal_set_features(journal_t *journal, unsigned long compat, |
2295 | unsigned long ro, unsigned long incompat) |
2296 | { |
2297 | #define INCOMPAT_FEATURE_ON(f) \ |
2298 | ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f))) |
2299 | #define COMPAT_FEATURE_ON(f) \ |
2300 | ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f))) |
2301 | journal_superblock_t *sb; |
2302 | |
2303 | if (jbd2_journal_check_used_features(journal, compat, ro, incompat)) |
2304 | return 1; |
2305 | |
2306 | if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) |
2307 | return 0; |
2308 | |
2309 | /* If enabling v2 checksums, turn on v3 instead */ |
2310 | if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) { |
2311 | incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2; |
2312 | incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3; |
2313 | } |
2314 | |
2315 | /* Asking for checksumming v3 and v1? Only give them v3. */ |
2316 | if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 && |
2317 | compat & JBD2_FEATURE_COMPAT_CHECKSUM) |
2318 | compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM; |
2319 | |
2320 | jbd2_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n", |
2321 | compat, ro, incompat); |
2322 | |
2323 | sb = journal->j_superblock; |
2324 | |
2325 | if (incompat & JBD2_FEATURE_INCOMPAT_FAST_COMMIT) { |
2326 | if (jbd2_journal_initialize_fast_commit(journal)) { |
2327 | pr_err("JBD2: Cannot enable fast commits.\n"); |
2328 | return 0; |
2329 | } |
2330 | } |
2331 | |
2332 | lock_buffer(bh: journal->j_sb_buffer); |
2333 | |
2334 | /* If enabling v3 checksums, update superblock and precompute seed */ |
2335 | if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { |
2336 | sb->s_checksum_type = JBD2_CRC32C_CHKSUM; |
2337 | sb->s_feature_compat &= |
2338 | ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM); |
2339 | journal->j_csum_seed = jbd2_chksum(crc: ~0, address: sb->s_uuid, |
2340 | length: sizeof(sb->s_uuid)); |
2341 | } |
2342 | |
2343 | /* If enabling v1 checksums, downgrade superblock */ |
2344 | if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM)) |
2345 | sb->s_feature_incompat &= |
2346 | ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 | |
2347 | JBD2_FEATURE_INCOMPAT_CSUM_V3); |
2348 | |
2349 | sb->s_feature_compat |= cpu_to_be32(compat); |
2350 | sb->s_feature_ro_compat |= cpu_to_be32(ro); |
2351 | sb->s_feature_incompat |= cpu_to_be32(incompat); |
2352 | unlock_buffer(bh: journal->j_sb_buffer); |
2353 | jbd2_journal_init_transaction_limits(journal); |
2354 | |
2355 | return 1; |
2356 | #undef COMPAT_FEATURE_ON |
2357 | #undef INCOMPAT_FEATURE_ON |
2358 | } |
2359 | |
2360 | /* |
2361 | * jbd2_journal_clear_features() - Clear a given journal feature in the |
2362 | * superblock |
2363 | * @journal: Journal to act on. |
2364 | * @compat: bitmask of compatible features |
2365 | * @ro: bitmask of features that force read-only mount |
2366 | * @incompat: bitmask of incompatible features |
2367 | * |
2368 | * Clear a given journal feature as present on the |
2369 | * superblock. |
2370 | */ |
2371 | void jbd2_journal_clear_features(journal_t *journal, unsigned long compat, |
2372 | unsigned long ro, unsigned long incompat) |
2373 | { |
2374 | journal_superblock_t *sb; |
2375 | |
2376 | jbd2_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n", |
2377 | compat, ro, incompat); |
2378 | |
2379 | sb = journal->j_superblock; |
2380 | |
2381 | sb->s_feature_compat &= ~cpu_to_be32(compat); |
2382 | sb->s_feature_ro_compat &= ~cpu_to_be32(ro); |
2383 | sb->s_feature_incompat &= ~cpu_to_be32(incompat); |
2384 | jbd2_journal_init_transaction_limits(journal); |
2385 | } |
2386 | EXPORT_SYMBOL(jbd2_journal_clear_features); |
2387 | |
2388 | /** |
2389 | * jbd2_journal_flush() - Flush journal |
2390 | * @journal: Journal to act on. |
2391 | * @flags: optional operation on the journal blocks after the flush (see below) |
2392 | * |
2393 | * Flush all data for a given journal to disk and empty the journal. |
2394 | * Filesystems can use this when remounting readonly to ensure that |
2395 | * recovery does not need to happen on remount. Optionally, a discard or zeroout |
2396 | * can be issued on the journal blocks after flushing. |
2397 | * |
2398 | * flags: |
2399 | * JBD2_JOURNAL_FLUSH_DISCARD: issues discards for the journal blocks |
2400 | * JBD2_JOURNAL_FLUSH_ZEROOUT: issues zeroouts for the journal blocks |
2401 | */ |
2402 | int jbd2_journal_flush(journal_t *journal, unsigned int flags) |
2403 | { |
2404 | int err = 0; |
2405 | transaction_t *transaction = NULL; |
2406 | |
2407 | write_lock(&journal->j_state_lock); |
2408 | |
2409 | /* Force everything buffered to the log... */ |
2410 | if (journal->j_running_transaction) { |
2411 | transaction = journal->j_running_transaction; |
2412 | __jbd2_log_start_commit(journal, target: transaction->t_tid); |
2413 | } else if (journal->j_committing_transaction) |
2414 | transaction = journal->j_committing_transaction; |
2415 | |
2416 | /* Wait for the log commit to complete... */ |
2417 | if (transaction) { |
2418 | tid_t tid = transaction->t_tid; |
2419 | |
2420 | write_unlock(&journal->j_state_lock); |
2421 | jbd2_log_wait_commit(journal, tid); |
2422 | } else { |
2423 | write_unlock(&journal->j_state_lock); |
2424 | } |
2425 | |
2426 | /* ...and flush everything in the log out to disk. */ |
2427 | spin_lock(lock: &journal->j_list_lock); |
2428 | while (!err && journal->j_checkpoint_transactions != NULL) { |
2429 | spin_unlock(lock: &journal->j_list_lock); |
2430 | mutex_lock_io(&journal->j_checkpoint_mutex); |
2431 | err = jbd2_log_do_checkpoint(journal); |
2432 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
2433 | spin_lock(lock: &journal->j_list_lock); |
2434 | } |
2435 | spin_unlock(lock: &journal->j_list_lock); |
2436 | |
2437 | if (is_journal_aborted(journal)) |
2438 | return -EIO; |
2439 | |
2440 | mutex_lock_io(&journal->j_checkpoint_mutex); |
2441 | if (!err) { |
2442 | err = jbd2_cleanup_journal_tail(journal); |
2443 | if (err < 0) { |
2444 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
2445 | goto out; |
2446 | } |
2447 | err = 0; |
2448 | } |
2449 | |
2450 | /* Finally, mark the journal as really needing no recovery. |
2451 | * This sets s_start==0 in the underlying superblock, which is |
2452 | * the magic code for a fully-recovered superblock. Any future |
2453 | * commits of data to the journal will restore the current |
2454 | * s_start value. */ |
2455 | jbd2_mark_journal_empty(journal, REQ_FUA); |
2456 | |
2457 | if (flags) |
2458 | err = __jbd2_journal_erase(journal, flags); |
2459 | |
2460 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
2461 | write_lock(&journal->j_state_lock); |
2462 | J_ASSERT(!journal->j_running_transaction); |
2463 | J_ASSERT(!journal->j_committing_transaction); |
2464 | J_ASSERT(!journal->j_checkpoint_transactions); |
2465 | J_ASSERT(journal->j_head == journal->j_tail); |
2466 | J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); |
2467 | write_unlock(&journal->j_state_lock); |
2468 | out: |
2469 | return err; |
2470 | } |
2471 | |
2472 | /** |
2473 | * jbd2_journal_wipe() - Wipe journal contents |
2474 | * @journal: Journal to act on. |
2475 | * @write: flag (see below) |
2476 | * |
2477 | * Wipe out all of the contents of a journal, safely. This will produce |
2478 | * a warning if the journal contains any valid recovery information. |
2479 | * Must be called between journal_init_*() and jbd2_journal_load(). |
2480 | * |
2481 | * If 'write' is non-zero, then we wipe out the journal on disk; otherwise |
2482 | * we merely suppress recovery. |
2483 | */ |
2484 | |
2485 | int jbd2_journal_wipe(journal_t *journal, int write) |
2486 | { |
2487 | int err; |
2488 | |
2489 | J_ASSERT (!(journal->j_flags & JBD2_LOADED)); |
2490 | |
2491 | if (!journal->j_tail) |
2492 | return 0; |
2493 | |
2494 | printk(KERN_WARNING "JBD2: %s recovery information on journal\n", |
2495 | write ? "Clearing": "Ignoring"); |
2496 | |
2497 | err = jbd2_journal_skip_recovery(journal); |
2498 | if (write) { |
2499 | /* Lock to make assertions happy... */ |
2500 | mutex_lock_io(&journal->j_checkpoint_mutex); |
2501 | jbd2_mark_journal_empty(journal, REQ_FUA); |
2502 | mutex_unlock(lock: &journal->j_checkpoint_mutex); |
2503 | } |
2504 | |
2505 | return err; |
2506 | } |
2507 | |
2508 | /** |
2509 | * jbd2_journal_abort () - Shutdown the journal immediately. |
2510 | * @journal: the journal to shutdown. |
2511 | * @errno: an error number to record in the journal indicating |
2512 | * the reason for the shutdown. |
2513 | * |
2514 | * Perform a complete, immediate shutdown of the ENTIRE |
2515 | * journal (not of a single transaction). This operation cannot be |
2516 | * undone without closing and reopening the journal. |
2517 | * |
2518 | * The jbd2_journal_abort function is intended to support higher level error |
2519 | * recovery mechanisms such as the ext2/ext3 remount-readonly error |
2520 | * mode. |
2521 | * |
2522 | * Journal abort has very specific semantics. Any existing dirty, |
2523 | * unjournaled buffers in the main filesystem will still be written to |
2524 | * disk by bdflush, but the journaling mechanism will be suspended |
2525 | * immediately and no further transaction commits will be honoured. |
2526 | * |
2527 | * Any dirty, journaled buffers will be written back to disk without |
2528 | * hitting the journal. Atomicity cannot be guaranteed on an aborted |
2529 | * filesystem, but we _do_ attempt to leave as much data as possible |
2530 | * behind for fsck to use for cleanup. |
2531 | * |
2532 | * Any attempt to get a new transaction handle on a journal which is in |
2533 | * ABORT state will just result in an -EROFS error return. A |
2534 | * jbd2_journal_stop on an existing handle will return -EIO if we have |
2535 | * entered abort state during the update. |
2536 | * |
2537 | * Recursive transactions are not disturbed by journal abort until the |
2538 | * final jbd2_journal_stop, which will receive the -EIO error. |
2539 | * |
2540 | * Finally, the jbd2_journal_abort call allows the caller to supply an errno |
2541 | * which will be recorded (if possible) in the journal superblock. This |
2542 | * allows a client to record failure conditions in the middle of a |
2543 | * transaction without having to complete the transaction to record the |
2544 | * failure to disk. ext3_error, for example, now uses this |
2545 | * functionality. |
2546 | * |
2547 | */ |
2548 | |
2549 | void jbd2_journal_abort(journal_t *journal, int errno) |
2550 | { |
2551 | transaction_t *transaction; |
2552 | |
2553 | /* |
2554 | * Lock the aborting procedure until everything is done, this avoid |
2555 | * races between filesystem's error handling flow (e.g. ext4_abort()), |
2556 | * ensure panic after the error info is written into journal's |
2557 | * superblock. |
2558 | */ |
2559 | mutex_lock(&journal->j_abort_mutex); |
2560 | /* |
2561 | * ESHUTDOWN always takes precedence because a file system check |
2562 | * caused by any other journal abort error is not required after |
2563 | * a shutdown triggered. |
2564 | */ |
2565 | write_lock(&journal->j_state_lock); |
2566 | if (journal->j_flags & JBD2_ABORT) { |
2567 | int old_errno = journal->j_errno; |
2568 | |
2569 | write_unlock(&journal->j_state_lock); |
2570 | if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) { |
2571 | journal->j_errno = errno; |
2572 | jbd2_journal_update_sb_errno(journal); |
2573 | } |
2574 | mutex_unlock(lock: &journal->j_abort_mutex); |
2575 | return; |
2576 | } |
2577 | |
2578 | /* |
2579 | * Mark the abort as occurred and start current running transaction |
2580 | * to release all journaled buffer. |
2581 | */ |
2582 | pr_err("Aborting journal on device %s.\n", journal->j_devname); |
2583 | |
2584 | journal->j_flags |= JBD2_ABORT; |
2585 | journal->j_errno = errno; |
2586 | transaction = journal->j_running_transaction; |
2587 | if (transaction) |
2588 | __jbd2_log_start_commit(journal, target: transaction->t_tid); |
2589 | write_unlock(&journal->j_state_lock); |
2590 | |
2591 | /* |
2592 | * Record errno to the journal super block, so that fsck and jbd2 |
2593 | * layer could realise that a filesystem check is needed. |
2594 | */ |
2595 | jbd2_journal_update_sb_errno(journal); |
2596 | mutex_unlock(lock: &journal->j_abort_mutex); |
2597 | } |
2598 | |
2599 | /** |
2600 | * jbd2_journal_errno() - returns the journal's error state. |
2601 | * @journal: journal to examine. |
2602 | * |
2603 | * This is the errno number set with jbd2_journal_abort(), the last |
2604 | * time the journal was mounted - if the journal was stopped |
2605 | * without calling abort this will be 0. |
2606 | * |
2607 | * If the journal has been aborted on this mount time -EROFS will |
2608 | * be returned. |
2609 | */ |
2610 | int jbd2_journal_errno(journal_t *journal) |
2611 | { |
2612 | int err; |
2613 | |
2614 | read_lock(&journal->j_state_lock); |
2615 | if (journal->j_flags & JBD2_ABORT) |
2616 | err = -EROFS; |
2617 | else |
2618 | err = journal->j_errno; |
2619 | read_unlock(&journal->j_state_lock); |
2620 | return err; |
2621 | } |
2622 | |
2623 | /** |
2624 | * jbd2_journal_clear_err() - clears the journal's error state |
2625 | * @journal: journal to act on. |
2626 | * |
2627 | * An error must be cleared or acked to take a FS out of readonly |
2628 | * mode. |
2629 | */ |
2630 | int jbd2_journal_clear_err(journal_t *journal) |
2631 | { |
2632 | int err = 0; |
2633 | |
2634 | write_lock(&journal->j_state_lock); |
2635 | if (journal->j_flags & JBD2_ABORT) |
2636 | err = -EROFS; |
2637 | else |
2638 | journal->j_errno = 0; |
2639 | write_unlock(&journal->j_state_lock); |
2640 | return err; |
2641 | } |
2642 | |
2643 | /** |
2644 | * jbd2_journal_ack_err() - Ack journal err. |
2645 | * @journal: journal to act on. |
2646 | * |
2647 | * An error must be cleared or acked to take a FS out of readonly |
2648 | * mode. |
2649 | */ |
2650 | void jbd2_journal_ack_err(journal_t *journal) |
2651 | { |
2652 | write_lock(&journal->j_state_lock); |
2653 | if (journal->j_errno) |
2654 | journal->j_flags |= JBD2_ACK_ERR; |
2655 | write_unlock(&journal->j_state_lock); |
2656 | } |
2657 | |
2658 | int jbd2_journal_blocks_per_folio(struct inode *inode) |
2659 | { |
2660 | return 1 << (PAGE_SHIFT + mapping_max_folio_order(mapping: inode->i_mapping) - |
2661 | inode->i_sb->s_blocksize_bits); |
2662 | } |
2663 | |
2664 | /* |
2665 | * helper functions to deal with 32 or 64bit block numbers. |
2666 | */ |
2667 | size_t journal_tag_bytes(journal_t *journal) |
2668 | { |
2669 | size_t sz; |
2670 | |
2671 | if (jbd2_has_feature_csum3(j: journal)) |
2672 | return sizeof(journal_block_tag3_t); |
2673 | |
2674 | sz = sizeof(journal_block_tag_t); |
2675 | |
2676 | if (jbd2_has_feature_csum2(j: journal)) |
2677 | sz += sizeof(__u16); |
2678 | |
2679 | if (jbd2_has_feature_64bit(j: journal)) |
2680 | return sz; |
2681 | else |
2682 | return sz - sizeof(__u32); |
2683 | } |
2684 | |
2685 | /* |
2686 | * JBD memory management |
2687 | * |
2688 | * These functions are used to allocate block-sized chunks of memory |
2689 | * used for making copies of buffer_head data. Very often it will be |
2690 | * page-sized chunks of data, but sometimes it will be in |
2691 | * sub-page-size chunks. (For example, 16k pages on Power systems |
2692 | * with a 4k block file system.) For blocks smaller than a page, we |
2693 | * use a SLAB allocator. There are slab caches for each block size, |
2694 | * which are allocated at mount time, if necessary, and we only free |
2695 | * (all of) the slab caches when/if the jbd2 module is unloaded. For |
2696 | * this reason we don't need to a mutex to protect access to |
2697 | * jbd2_slab[] allocating or releasing memory; only in |
2698 | * jbd2_journal_create_slab(). |
2699 | */ |
2700 | #define JBD2_MAX_SLABS 8 |
2701 | static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS]; |
2702 | |
2703 | static const char *jbd2_slab_names[JBD2_MAX_SLABS] = { |
2704 | "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k", |
2705 | "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k" |
2706 | }; |
2707 | |
2708 | |
2709 | static void jbd2_journal_destroy_slabs(void) |
2710 | { |
2711 | int i; |
2712 | |
2713 | for (i = 0; i < JBD2_MAX_SLABS; i++) { |
2714 | kmem_cache_destroy(s: jbd2_slab[i]); |
2715 | jbd2_slab[i] = NULL; |
2716 | } |
2717 | } |
2718 | |
2719 | static int jbd2_journal_create_slab(size_t size) |
2720 | { |
2721 | static DEFINE_MUTEX(jbd2_slab_create_mutex); |
2722 | int i = order_base_2(size) - 10; |
2723 | size_t slab_size; |
2724 | |
2725 | if (size == PAGE_SIZE) |
2726 | return 0; |
2727 | |
2728 | if (i >= JBD2_MAX_SLABS) |
2729 | return -EINVAL; |
2730 | |
2731 | if (unlikely(i < 0)) |
2732 | i = 0; |
2733 | mutex_lock(&jbd2_slab_create_mutex); |
2734 | if (jbd2_slab[i]) { |
2735 | mutex_unlock(lock: &jbd2_slab_create_mutex); |
2736 | return 0; /* Already created */ |
2737 | } |
2738 | |
2739 | slab_size = 1 << (i+10); |
2740 | jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size, |
2741 | slab_size, 0, NULL); |
2742 | mutex_unlock(lock: &jbd2_slab_create_mutex); |
2743 | if (!jbd2_slab[i]) { |
2744 | printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n"); |
2745 | return -ENOMEM; |
2746 | } |
2747 | return 0; |
2748 | } |
2749 | |
2750 | static struct kmem_cache *get_slab(size_t size) |
2751 | { |
2752 | int i = order_base_2(size) - 10; |
2753 | |
2754 | BUG_ON(i >= JBD2_MAX_SLABS); |
2755 | if (unlikely(i < 0)) |
2756 | i = 0; |
2757 | BUG_ON(jbd2_slab[i] == NULL); |
2758 | return jbd2_slab[i]; |
2759 | } |
2760 | |
2761 | void *jbd2_alloc(size_t size, gfp_t flags) |
2762 | { |
2763 | void *ptr; |
2764 | |
2765 | BUG_ON(size & (size-1)); /* Must be a power of 2 */ |
2766 | |
2767 | if (size < PAGE_SIZE) |
2768 | ptr = kmem_cache_alloc(get_slab(size), flags); |
2769 | else |
2770 | ptr = (void *)__get_free_pages(flags, get_order(size)); |
2771 | |
2772 | /* Check alignment; SLUB has gotten this wrong in the past, |
2773 | * and this can lead to user data corruption! */ |
2774 | BUG_ON(((unsigned long) ptr) & (size-1)); |
2775 | |
2776 | return ptr; |
2777 | } |
2778 | |
2779 | void jbd2_free(void *ptr, size_t size) |
2780 | { |
2781 | if (size < PAGE_SIZE) |
2782 | kmem_cache_free(s: get_slab(size), objp: ptr); |
2783 | else |
2784 | free_pages(addr: (unsigned long)ptr, order: get_order(size)); |
2785 | }; |
2786 | |
2787 | /* |
2788 | * Journal_head storage management |
2789 | */ |
2790 | static struct kmem_cache *jbd2_journal_head_cache; |
2791 | #ifdef CONFIG_JBD2_DEBUG |
2792 | static atomic_t nr_journal_heads = ATOMIC_INIT(0); |
2793 | #endif |
2794 | |
2795 | static int __init jbd2_journal_init_journal_head_cache(void) |
2796 | { |
2797 | J_ASSERT(!jbd2_journal_head_cache); |
2798 | jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head", |
2799 | sizeof(struct journal_head), |
2800 | 0, /* offset */ |
2801 | SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU, |
2802 | NULL); /* ctor */ |
2803 | if (!jbd2_journal_head_cache) { |
2804 | printk(KERN_EMERG "JBD2: no memory for journal_head cache\n"); |
2805 | return -ENOMEM; |
2806 | } |
2807 | return 0; |
2808 | } |
2809 | |
2810 | static void jbd2_journal_destroy_journal_head_cache(void) |
2811 | { |
2812 | kmem_cache_destroy(s: jbd2_journal_head_cache); |
2813 | jbd2_journal_head_cache = NULL; |
2814 | } |
2815 | |
2816 | /* |
2817 | * journal_head splicing and dicing |
2818 | */ |
2819 | static struct journal_head *journal_alloc_journal_head(void) |
2820 | { |
2821 | struct journal_head *ret; |
2822 | |
2823 | #ifdef CONFIG_JBD2_DEBUG |
2824 | atomic_inc(v: &nr_journal_heads); |
2825 | #endif |
2826 | ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS); |
2827 | if (!ret) { |
2828 | jbd2_debug(1, "out of memory for journal_head\n"); |
2829 | pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__); |
2830 | ret = kmem_cache_zalloc(jbd2_journal_head_cache, |
2831 | GFP_NOFS | __GFP_NOFAIL); |
2832 | } |
2833 | spin_lock_init(&ret->b_state_lock); |
2834 | return ret; |
2835 | } |
2836 | |
2837 | static void journal_free_journal_head(struct journal_head *jh) |
2838 | { |
2839 | #ifdef CONFIG_JBD2_DEBUG |
2840 | atomic_dec(v: &nr_journal_heads); |
2841 | memset(jh, JBD2_POISON_FREE, sizeof(*jh)); |
2842 | #endif |
2843 | kmem_cache_free(s: jbd2_journal_head_cache, objp: jh); |
2844 | } |
2845 | |
2846 | /* |
2847 | * A journal_head is attached to a buffer_head whenever JBD has an |
2848 | * interest in the buffer. |
2849 | * |
2850 | * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit |
2851 | * is set. This bit is tested in core kernel code where we need to take |
2852 | * JBD-specific actions. Testing the zeroness of ->b_private is not reliable |
2853 | * there. |
2854 | * |
2855 | * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one. |
2856 | * |
2857 | * When a buffer has its BH_JBD bit set it is immune from being released by |
2858 | * core kernel code, mainly via ->b_count. |
2859 | * |
2860 | * A journal_head is detached from its buffer_head when the journal_head's |
2861 | * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint |
2862 | * transaction (b_cp_transaction) hold their references to b_jcount. |
2863 | * |
2864 | * Various places in the kernel want to attach a journal_head to a buffer_head |
2865 | * _before_ attaching the journal_head to a transaction. To protect the |
2866 | * journal_head in this situation, jbd2_journal_add_journal_head elevates the |
2867 | * journal_head's b_jcount refcount by one. The caller must call |
2868 | * jbd2_journal_put_journal_head() to undo this. |
2869 | * |
2870 | * So the typical usage would be: |
2871 | * |
2872 | * (Attach a journal_head if needed. Increments b_jcount) |
2873 | * struct journal_head *jh = jbd2_journal_add_journal_head(bh); |
2874 | * ... |
2875 | * (Get another reference for transaction) |
2876 | * jbd2_journal_grab_journal_head(bh); |
2877 | * jh->b_transaction = xxx; |
2878 | * (Put original reference) |
2879 | * jbd2_journal_put_journal_head(jh); |
2880 | */ |
2881 | |
2882 | /* |
2883 | * Give a buffer_head a journal_head. |
2884 | * |
2885 | * May sleep. |
2886 | */ |
2887 | struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) |
2888 | { |
2889 | struct journal_head *jh; |
2890 | struct journal_head *new_jh = NULL; |
2891 | |
2892 | repeat: |
2893 | if (!buffer_jbd(bh)) |
2894 | new_jh = journal_alloc_journal_head(); |
2895 | |
2896 | jbd_lock_bh_journal_head(bh); |
2897 | if (buffer_jbd(bh)) { |
2898 | jh = bh2jh(bh); |
2899 | } else { |
2900 | J_ASSERT_BH(bh, |
2901 | (atomic_read(&bh->b_count) > 0) || |
2902 | (bh->b_folio && bh->b_folio->mapping)); |
2903 | |
2904 | if (!new_jh) { |
2905 | jbd_unlock_bh_journal_head(bh); |
2906 | goto repeat; |
2907 | } |
2908 | |
2909 | jh = new_jh; |
2910 | new_jh = NULL; /* We consumed it */ |
2911 | set_buffer_jbd(bh); |
2912 | bh->b_private = jh; |
2913 | jh->b_bh = bh; |
2914 | get_bh(bh); |
2915 | BUFFER_TRACE(bh, "added journal_head"); |
2916 | } |
2917 | jh->b_jcount++; |
2918 | jbd_unlock_bh_journal_head(bh); |
2919 | if (new_jh) |
2920 | journal_free_journal_head(jh: new_jh); |
2921 | return bh->b_private; |
2922 | } |
2923 | |
2924 | /* |
2925 | * Grab a ref against this buffer_head's journal_head. If it ended up not |
2926 | * having a journal_head, return NULL |
2927 | */ |
2928 | struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh) |
2929 | { |
2930 | struct journal_head *jh = NULL; |
2931 | |
2932 | jbd_lock_bh_journal_head(bh); |
2933 | if (buffer_jbd(bh)) { |
2934 | jh = bh2jh(bh); |
2935 | jh->b_jcount++; |
2936 | } |
2937 | jbd_unlock_bh_journal_head(bh); |
2938 | return jh; |
2939 | } |
2940 | EXPORT_SYMBOL(jbd2_journal_grab_journal_head); |
2941 | |
2942 | static void __journal_remove_journal_head(struct buffer_head *bh) |
2943 | { |
2944 | struct journal_head *jh = bh2jh(bh); |
2945 | |
2946 | J_ASSERT_JH(jh, jh->b_transaction == NULL); |
2947 | J_ASSERT_JH(jh, jh->b_next_transaction == NULL); |
2948 | J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); |
2949 | J_ASSERT_JH(jh, jh->b_jlist == BJ_None); |
2950 | J_ASSERT_BH(bh, buffer_jbd(bh)); |
2951 | J_ASSERT_BH(bh, jh2bh(jh) == bh); |
2952 | BUFFER_TRACE(bh, "remove journal_head"); |
2953 | |
2954 | /* Unlink before dropping the lock */ |
2955 | bh->b_private = NULL; |
2956 | jh->b_bh = NULL; /* debug, really */ |
2957 | clear_buffer_jbd(bh); |
2958 | } |
2959 | |
2960 | static void journal_release_journal_head(struct journal_head *jh, size_t b_size) |
2961 | { |
2962 | if (jh->b_frozen_data) { |
2963 | printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__); |
2964 | jbd2_free(ptr: jh->b_frozen_data, size: b_size); |
2965 | } |
2966 | if (jh->b_committed_data) { |
2967 | printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__); |
2968 | jbd2_free(ptr: jh->b_committed_data, size: b_size); |
2969 | } |
2970 | journal_free_journal_head(jh); |
2971 | } |
2972 | |
2973 | /* |
2974 | * Drop a reference on the passed journal_head. If it fell to zero then |
2975 | * release the journal_head from the buffer_head. |
2976 | */ |
2977 | void jbd2_journal_put_journal_head(struct journal_head *jh) |
2978 | { |
2979 | struct buffer_head *bh = jh2bh(jh); |
2980 | |
2981 | jbd_lock_bh_journal_head(bh); |
2982 | J_ASSERT_JH(jh, jh->b_jcount > 0); |
2983 | --jh->b_jcount; |
2984 | if (!jh->b_jcount) { |
2985 | __journal_remove_journal_head(bh); |
2986 | jbd_unlock_bh_journal_head(bh); |
2987 | journal_release_journal_head(jh, b_size: bh->b_size); |
2988 | __brelse(bh); |
2989 | } else { |
2990 | jbd_unlock_bh_journal_head(bh); |
2991 | } |
2992 | } |
2993 | EXPORT_SYMBOL(jbd2_journal_put_journal_head); |
2994 | |
2995 | /* |
2996 | * Initialize jbd inode head |
2997 | */ |
2998 | void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode) |
2999 | { |
3000 | jinode->i_transaction = NULL; |
3001 | jinode->i_next_transaction = NULL; |
3002 | jinode->i_vfs_inode = inode; |
3003 | jinode->i_flags = 0; |
3004 | jinode->i_dirty_start = 0; |
3005 | jinode->i_dirty_end = 0; |
3006 | INIT_LIST_HEAD(list: &jinode->i_list); |
3007 | } |
3008 | |
3009 | /* |
3010 | * Function to be called before we start removing inode from memory (i.e., |
3011 | * clear_inode() is a fine place to be called from). It removes inode from |
3012 | * transaction's lists. |
3013 | */ |
3014 | void jbd2_journal_release_jbd_inode(journal_t *journal, |
3015 | struct jbd2_inode *jinode) |
3016 | { |
3017 | if (!journal) |
3018 | return; |
3019 | restart: |
3020 | spin_lock(lock: &journal->j_list_lock); |
3021 | /* Is commit writing out inode - we have to wait */ |
3022 | if (jinode->i_flags & JI_COMMIT_RUNNING) { |
3023 | wait_queue_head_t *wq; |
3024 | DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING); |
3025 | wq = bit_waitqueue(word: &jinode->i_flags, __JI_COMMIT_RUNNING); |
3026 | prepare_to_wait(wq_head: wq, wq_entry: &wait.wq_entry, TASK_UNINTERRUPTIBLE); |
3027 | spin_unlock(lock: &journal->j_list_lock); |
3028 | schedule(); |
3029 | finish_wait(wq_head: wq, wq_entry: &wait.wq_entry); |
3030 | goto restart; |
3031 | } |
3032 | |
3033 | if (jinode->i_transaction) { |
3034 | list_del(entry: &jinode->i_list); |
3035 | jinode->i_transaction = NULL; |
3036 | } |
3037 | spin_unlock(lock: &journal->j_list_lock); |
3038 | } |
3039 | |
3040 | |
3041 | #ifdef CONFIG_PROC_FS |
3042 | |
3043 | #define JBD2_STATS_PROC_NAME "fs/jbd2" |
3044 | |
3045 | static void __init jbd2_create_jbd_stats_proc_entry(void) |
3046 | { |
3047 | proc_jbd2_stats = proc_mkdir(JBD2_STATS_PROC_NAME, NULL); |
3048 | } |
3049 | |
3050 | static void __exit jbd2_remove_jbd_stats_proc_entry(void) |
3051 | { |
3052 | if (proc_jbd2_stats) |
3053 | remove_proc_entry(JBD2_STATS_PROC_NAME, NULL); |
3054 | } |
3055 | |
3056 | #else |
3057 | |
3058 | #define jbd2_create_jbd_stats_proc_entry() do {} while (0) |
3059 | #define jbd2_remove_jbd_stats_proc_entry() do {} while (0) |
3060 | |
3061 | #endif |
3062 | |
3063 | struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache; |
3064 | |
3065 | static int __init jbd2_journal_init_inode_cache(void) |
3066 | { |
3067 | J_ASSERT(!jbd2_inode_cache); |
3068 | jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); |
3069 | if (!jbd2_inode_cache) { |
3070 | pr_emerg("JBD2: failed to create inode cache\n"); |
3071 | return -ENOMEM; |
3072 | } |
3073 | return 0; |
3074 | } |
3075 | |
3076 | static int __init jbd2_journal_init_handle_cache(void) |
3077 | { |
3078 | J_ASSERT(!jbd2_handle_cache); |
3079 | jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY); |
3080 | if (!jbd2_handle_cache) { |
3081 | printk(KERN_EMERG "JBD2: failed to create handle cache\n"); |
3082 | return -ENOMEM; |
3083 | } |
3084 | return 0; |
3085 | } |
3086 | |
3087 | static void jbd2_journal_destroy_inode_cache(void) |
3088 | { |
3089 | kmem_cache_destroy(s: jbd2_inode_cache); |
3090 | jbd2_inode_cache = NULL; |
3091 | } |
3092 | |
3093 | static void jbd2_journal_destroy_handle_cache(void) |
3094 | { |
3095 | kmem_cache_destroy(s: jbd2_handle_cache); |
3096 | jbd2_handle_cache = NULL; |
3097 | } |
3098 | |
3099 | /* |
3100 | * Module startup and shutdown |
3101 | */ |
3102 | |
3103 | static int __init journal_init_caches(void) |
3104 | { |
3105 | int ret; |
3106 | |
3107 | ret = jbd2_journal_init_revoke_record_cache(); |
3108 | if (ret == 0) |
3109 | ret = jbd2_journal_init_revoke_table_cache(); |
3110 | if (ret == 0) |
3111 | ret = jbd2_journal_init_journal_head_cache(); |
3112 | if (ret == 0) |
3113 | ret = jbd2_journal_init_handle_cache(); |
3114 | if (ret == 0) |
3115 | ret = jbd2_journal_init_inode_cache(); |
3116 | if (ret == 0) |
3117 | ret = jbd2_journal_init_transaction_cache(); |
3118 | return ret; |
3119 | } |
3120 | |
3121 | static void jbd2_journal_destroy_caches(void) |
3122 | { |
3123 | jbd2_journal_destroy_revoke_record_cache(); |
3124 | jbd2_journal_destroy_revoke_table_cache(); |
3125 | jbd2_journal_destroy_journal_head_cache(); |
3126 | jbd2_journal_destroy_handle_cache(); |
3127 | jbd2_journal_destroy_inode_cache(); |
3128 | jbd2_journal_destroy_transaction_cache(); |
3129 | jbd2_journal_destroy_slabs(); |
3130 | } |
3131 | |
3132 | static int __init journal_init(void) |
3133 | { |
3134 | int ret; |
3135 | |
3136 | BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024); |
3137 | |
3138 | ret = journal_init_caches(); |
3139 | if (ret == 0) { |
3140 | jbd2_create_jbd_stats_proc_entry(); |
3141 | } else { |
3142 | jbd2_journal_destroy_caches(); |
3143 | } |
3144 | return ret; |
3145 | } |
3146 | |
3147 | static void __exit journal_exit(void) |
3148 | { |
3149 | #ifdef CONFIG_JBD2_DEBUG |
3150 | int n = atomic_read(v: &nr_journal_heads); |
3151 | if (n) |
3152 | printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); |
3153 | #endif |
3154 | jbd2_remove_jbd_stats_proc_entry(); |
3155 | jbd2_journal_destroy_caches(); |
3156 | } |
3157 | |
3158 | MODULE_DESCRIPTION("Generic filesystem journal-writing module"); |
3159 | MODULE_LICENSE("GPL"); |
3160 | module_init(journal_init); |
3161 | module_exit(journal_exit); |
3162 | |
3163 |
Definitions
- jbd2_journal_enable_debug
- __jbd2_debug
- jbd2_superblock_csum
- commit_timeout
- kjournald2
- jbd2_journal_start_thread
- journal_kill_thread
- jbd2_data_needs_escaping
- jbd2_data_do_escape
- jbd2_journal_write_metadata_buffer
- __jbd2_log_start_commit
- jbd2_log_start_commit
- __jbd2_journal_force_commit
- jbd2_journal_force_commit_nested
- jbd2_journal_force_commit
- jbd2_journal_start_commit
- jbd2_trans_will_send_data_barrier
- jbd2_log_wait_commit
- jbd2_fc_begin_commit
- __jbd2_fc_end_commit
- jbd2_fc_end_commit
- jbd2_fc_end_commit_fallback
- jbd2_transaction_committed
- jbd2_complete_transaction
- jbd2_journal_next_log_block
- jbd2_fc_get_buf
- jbd2_fc_wait_bufs
- jbd2_fc_release_bufs
- jbd2_journal_bmap
- jbd2_journal_get_descriptor_buffer
- jbd2_descriptor_block_csum_set
- jbd2_journal_get_log_tail
- __jbd2_update_log_tail
- jbd2_update_log_tail
- jbd2_stats_proc_session
- jbd2_seq_info_start
- jbd2_seq_info_next
- jbd2_seq_info_show
- jbd2_seq_info_stop
- jbd2_seq_info_ops
- jbd2_seq_info_open
- jbd2_seq_info_release
- jbd2_info_proc_ops
- proc_jbd2_stats
- jbd2_stats_proc_init
- jbd2_stats_proc_exit
- jbd2_min_tag_size
- jbd2_journal_shrink_scan
- jbd2_journal_shrink_count
- journal_fail_superblock
- journal_check_superblock
- journal_revoke_records_per_block
- jbd2_journal_get_max_txn_bufs
- jbd2_descriptor_blocks_per_trans
- jbd2_journal_init_transaction_limits
- journal_load_superblock
- journal_init_common
- jbd2_journal_init_dev
- jbd2_journal_init_inode
- journal_reset
- jbd2_write_superblock
- jbd2_journal_update_sb_log_tail
- jbd2_mark_journal_empty
- __jbd2_journal_erase
- jbd2_journal_update_sb_errno
- jbd2_journal_load
- jbd2_journal_destroy
- jbd2_journal_check_used_features
- jbd2_journal_check_available_features
- jbd2_journal_initialize_fast_commit
- jbd2_journal_set_features
- jbd2_journal_clear_features
- jbd2_journal_flush
- jbd2_journal_wipe
- jbd2_journal_abort
- jbd2_journal_errno
- jbd2_journal_clear_err
- jbd2_journal_ack_err
- jbd2_journal_blocks_per_folio
- journal_tag_bytes
- jbd2_slab
- jbd2_slab_names
- jbd2_journal_destroy_slabs
- jbd2_journal_create_slab
- get_slab
- jbd2_alloc
- jbd2_free
- jbd2_journal_head_cache
- nr_journal_heads
- jbd2_journal_init_journal_head_cache
- jbd2_journal_destroy_journal_head_cache
- journal_alloc_journal_head
- journal_free_journal_head
- jbd2_journal_add_journal_head
- jbd2_journal_grab_journal_head
- __journal_remove_journal_head
- journal_release_journal_head
- jbd2_journal_put_journal_head
- jbd2_journal_init_jbd_inode
- jbd2_journal_release_jbd_inode
- jbd2_create_jbd_stats_proc_entry
- jbd2_remove_jbd_stats_proc_entry
- jbd2_handle_cache
- jbd2_inode_cache
- jbd2_journal_init_inode_cache
- jbd2_journal_init_handle_cache
- jbd2_journal_destroy_inode_cache
- jbd2_journal_destroy_handle_cache
- journal_init_caches
- jbd2_journal_destroy_caches
- journal_init
Improve your Profiling and Debugging skills
Find out more