1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #ifndef BTRFS_TRANSACTION_H |
7 | #define BTRFS_TRANSACTION_H |
8 | |
9 | #include <linux/atomic.h> |
10 | #include <linux/refcount.h> |
11 | #include <linux/list.h> |
12 | #include <linux/time64.h> |
13 | #include <linux/mutex.h> |
14 | #include <linux/wait.h> |
15 | #include "btrfs_inode.h" |
16 | #include "delayed-ref.h" |
17 | #include "extent-io-tree.h" |
18 | #include "block-rsv.h" |
19 | #include "messages.h" |
20 | #include "misc.h" |
21 | |
22 | struct dentry; |
23 | struct inode; |
24 | struct btrfs_pending_snapshot; |
25 | struct btrfs_fs_info; |
26 | struct btrfs_root_item; |
27 | struct btrfs_root; |
28 | struct btrfs_path; |
29 | |
30 | /* Radix-tree tag for roots that are part of the trasaction. */ |
31 | #define BTRFS_ROOT_TRANS_TAG 0 |
32 | |
33 | enum btrfs_trans_state { |
34 | TRANS_STATE_RUNNING, |
35 | TRANS_STATE_COMMIT_PREP, |
36 | TRANS_STATE_COMMIT_START, |
37 | TRANS_STATE_COMMIT_DOING, |
38 | TRANS_STATE_UNBLOCKED, |
39 | TRANS_STATE_SUPER_COMMITTED, |
40 | TRANS_STATE_COMPLETED, |
41 | TRANS_STATE_MAX, |
42 | }; |
43 | |
44 | #define BTRFS_TRANS_HAVE_FREE_BGS 0 |
45 | #define BTRFS_TRANS_DIRTY_BG_RUN 1 |
46 | #define BTRFS_TRANS_CACHE_ENOSPC 2 |
47 | |
48 | struct btrfs_transaction { |
49 | u64 transid; |
50 | /* |
51 | * total external writers(USERSPACE/START/ATTACH) in this |
52 | * transaction, it must be zero before the transaction is |
53 | * being committed |
54 | */ |
55 | atomic_t num_extwriters; |
56 | /* |
57 | * total writers in this transaction, it must be zero before the |
58 | * transaction can end |
59 | */ |
60 | atomic_t num_writers; |
61 | refcount_t use_count; |
62 | |
63 | unsigned long flags; |
64 | |
65 | /* Be protected by fs_info->trans_lock when we want to change it. */ |
66 | enum btrfs_trans_state state; |
67 | int aborted; |
68 | struct list_head list; |
69 | struct extent_io_tree dirty_pages; |
70 | time64_t start_time; |
71 | wait_queue_head_t writer_wait; |
72 | wait_queue_head_t commit_wait; |
73 | struct list_head pending_snapshots; |
74 | struct list_head dev_update_list; |
75 | struct list_head switch_commits; |
76 | struct list_head dirty_bgs; |
77 | |
78 | /* |
79 | * There is no explicit lock which protects io_bgs, rather its |
80 | * consistency is implied by the fact that all the sites which modify |
81 | * it do so under some form of transaction critical section, namely: |
82 | * |
83 | * - btrfs_start_dirty_block_groups - This function can only ever be |
84 | * run by one of the transaction committers. Refer to |
85 | * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction |
86 | * |
87 | * - btrfs_write_dirty_blockgroups - this is called by |
88 | * commit_cowonly_roots from transaction critical section |
89 | * (TRANS_STATE_COMMIT_DOING) |
90 | * |
91 | * - btrfs_cleanup_dirty_bgs - called on transaction abort |
92 | */ |
93 | struct list_head io_bgs; |
94 | struct list_head dropped_roots; |
95 | struct extent_io_tree pinned_extents; |
96 | |
97 | /* |
98 | * we need to make sure block group deletion doesn't race with |
99 | * free space cache writeout. This mutex keeps them from stomping |
100 | * on each other |
101 | */ |
102 | struct mutex cache_write_mutex; |
103 | spinlock_t dirty_bgs_lock; |
104 | /* Protected by spin lock fs_info->unused_bgs_lock. */ |
105 | struct list_head deleted_bgs; |
106 | spinlock_t dropped_roots_lock; |
107 | struct btrfs_delayed_ref_root delayed_refs; |
108 | struct btrfs_fs_info *fs_info; |
109 | |
110 | /* |
111 | * Number of ordered extents the transaction must wait for before |
112 | * committing. These are ordered extents started by a fast fsync. |
113 | */ |
114 | atomic_t pending_ordered; |
115 | wait_queue_head_t pending_wait; |
116 | }; |
117 | |
118 | enum { |
119 | ENUM_BIT(__TRANS_FREEZABLE), |
120 | ENUM_BIT(__TRANS_START), |
121 | ENUM_BIT(__TRANS_ATTACH), |
122 | ENUM_BIT(__TRANS_JOIN), |
123 | ENUM_BIT(__TRANS_JOIN_NOLOCK), |
124 | ENUM_BIT(__TRANS_DUMMY), |
125 | ENUM_BIT(__TRANS_JOIN_NOSTART), |
126 | }; |
127 | |
128 | #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) |
129 | #define TRANS_ATTACH (__TRANS_ATTACH) |
130 | #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) |
131 | #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) |
132 | #define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART) |
133 | |
134 | #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) |
135 | |
136 | struct btrfs_trans_handle { |
137 | u64 transid; |
138 | u64 bytes_reserved; |
139 | u64 delayed_refs_bytes_reserved; |
140 | u64 chunk_bytes_reserved; |
141 | unsigned long delayed_ref_updates; |
142 | unsigned long delayed_ref_csum_deletions; |
143 | struct btrfs_transaction *transaction; |
144 | struct btrfs_block_rsv *block_rsv; |
145 | struct btrfs_block_rsv *orig_rsv; |
146 | /* Set by a task that wants to create a snapshot. */ |
147 | struct btrfs_pending_snapshot *pending_snapshot; |
148 | refcount_t use_count; |
149 | unsigned int type; |
150 | /* |
151 | * Error code of transaction abort, set outside of locks and must use |
152 | * the READ_ONCE/WRITE_ONCE access |
153 | */ |
154 | short aborted; |
155 | bool adding_csums; |
156 | bool allocating_chunk; |
157 | bool removing_chunk; |
158 | bool reloc_reserved; |
159 | bool in_fsync; |
160 | struct btrfs_fs_info *fs_info; |
161 | struct list_head new_bgs; |
162 | struct btrfs_block_rsv delayed_rsv; |
163 | }; |
164 | |
165 | /* |
166 | * The abort status can be changed between calls and is not protected by locks. |
167 | * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's |
168 | * set to a non-zero value it does not change, so the macro should be in checks |
169 | * but is not necessary for further reads of the value. |
170 | */ |
171 | #define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted))) |
172 | |
173 | struct btrfs_pending_snapshot { |
174 | struct dentry *dentry; |
175 | struct inode *dir; |
176 | struct btrfs_root *root; |
177 | struct btrfs_root_item *root_item; |
178 | struct btrfs_root *snap; |
179 | struct btrfs_qgroup_inherit *inherit; |
180 | struct btrfs_path *path; |
181 | /* block reservation for the operation */ |
182 | struct btrfs_block_rsv block_rsv; |
183 | /* extra metadata reservation for relocation */ |
184 | int error; |
185 | /* Preallocated anonymous block device number */ |
186 | dev_t anon_dev; |
187 | bool readonly; |
188 | struct list_head list; |
189 | }; |
190 | |
191 | static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, |
192 | struct btrfs_inode *inode) |
193 | { |
194 | spin_lock(lock: &inode->lock); |
195 | inode->last_trans = trans->transaction->transid; |
196 | inode->last_sub_trans = btrfs_get_root_log_transid(root: inode->root); |
197 | inode->last_log_commit = inode->last_sub_trans - 1; |
198 | spin_unlock(lock: &inode->lock); |
199 | } |
200 | |
201 | /* |
202 | * Make qgroup codes to skip given qgroupid, means the old/new_roots for |
203 | * qgroup won't contain the qgroupid in it. |
204 | */ |
205 | static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans, |
206 | u64 qgroupid) |
207 | { |
208 | struct btrfs_delayed_ref_root *delayed_refs; |
209 | |
210 | delayed_refs = &trans->transaction->delayed_refs; |
211 | WARN_ON(delayed_refs->qgroup_to_skip); |
212 | delayed_refs->qgroup_to_skip = qgroupid; |
213 | } |
214 | |
215 | static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans) |
216 | { |
217 | struct btrfs_delayed_ref_root *delayed_refs; |
218 | |
219 | delayed_refs = &trans->transaction->delayed_refs; |
220 | WARN_ON(!delayed_refs->qgroup_to_skip); |
221 | delayed_refs->qgroup_to_skip = 0; |
222 | } |
223 | |
224 | bool __cold abort_should_print_stack(int error); |
225 | |
226 | /* |
227 | * Call btrfs_abort_transaction as early as possible when an error condition is |
228 | * detected, that way the exact stack trace is reported for some errors. |
229 | */ |
230 | #define btrfs_abort_transaction(trans, error) \ |
231 | do { \ |
232 | bool first = false; \ |
233 | /* Report first abort since mount */ \ |
234 | if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ |
235 | &((trans)->fs_info->fs_state))) { \ |
236 | first = true; \ |
237 | if (WARN(abort_should_print_stack(error), \ |
238 | KERN_ERR \ |
239 | "BTRFS: Transaction aborted (error %d)\n", \ |
240 | (error))) { \ |
241 | /* Stack trace printed. */ \ |
242 | } else { \ |
243 | btrfs_err((trans)->fs_info, \ |
244 | "Transaction aborted (error %d)", \ |
245 | (error)); \ |
246 | } \ |
247 | } \ |
248 | __btrfs_abort_transaction((trans), __func__, \ |
249 | __LINE__, (error), first); \ |
250 | } while (0) |
251 | |
252 | int btrfs_end_transaction(struct btrfs_trans_handle *trans); |
253 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, |
254 | unsigned int num_items); |
255 | struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( |
256 | struct btrfs_root *root, |
257 | unsigned int num_items); |
258 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); |
259 | struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); |
260 | struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); |
261 | struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); |
262 | struct btrfs_trans_handle *btrfs_attach_transaction_barrier( |
263 | struct btrfs_root *root); |
264 | int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid); |
265 | |
266 | void btrfs_add_dead_root(struct btrfs_root *root); |
267 | void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info); |
268 | int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info); |
269 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans); |
270 | void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans); |
271 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); |
272 | bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans); |
273 | void btrfs_throttle(struct btrfs_fs_info *fs_info); |
274 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, |
275 | struct btrfs_root *root); |
276 | int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, |
277 | struct extent_io_tree *dirty_pages, int mark); |
278 | int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark); |
279 | int btrfs_transaction_blocked(struct btrfs_fs_info *info); |
280 | void btrfs_put_transaction(struct btrfs_transaction *transaction); |
281 | void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, |
282 | struct btrfs_root *root); |
283 | void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); |
284 | void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, |
285 | const char *function, |
286 | unsigned int line, int error, bool first_hit); |
287 | |
288 | int __init btrfs_transaction_init(void); |
289 | void __cold btrfs_transaction_exit(void); |
290 | |
291 | #endif |
292 | |