1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * NILFS B-tree node cache |
4 | * |
5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
6 | * |
7 | * Originally written by Seiji Kihara. |
8 | * Fully revised by Ryusuke Konishi for stabilization and simplification. |
9 | * |
10 | */ |
11 | |
12 | #include <linux/types.h> |
13 | #include <linux/buffer_head.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/backing-dev.h> |
16 | #include <linux/gfp.h> |
17 | #include "nilfs.h" |
18 | #include "mdt.h" |
19 | #include "dat.h" |
20 | #include "page.h" |
21 | #include "btnode.h" |
22 | |
23 | |
24 | /** |
25 | * nilfs_init_btnc_inode - initialize B-tree node cache inode |
26 | * @btnc_inode: inode to be initialized |
27 | * |
28 | * nilfs_init_btnc_inode() sets up an inode for B-tree node cache. |
29 | */ |
30 | void nilfs_init_btnc_inode(struct inode *btnc_inode) |
31 | { |
32 | struct nilfs_inode_info *ii = NILFS_I(inode: btnc_inode); |
33 | |
34 | btnc_inode->i_mode = S_IFREG; |
35 | ii->i_flags = 0; |
36 | memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap)); |
37 | mapping_set_gfp_mask(m: btnc_inode->i_mapping, GFP_NOFS); |
38 | } |
39 | |
40 | void nilfs_btnode_cache_clear(struct address_space *btnc) |
41 | { |
42 | invalidate_mapping_pages(mapping: btnc, start: 0, end: -1); |
43 | truncate_inode_pages(btnc, 0); |
44 | } |
45 | |
46 | struct buffer_head * |
47 | nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) |
48 | { |
49 | struct inode *inode = btnc->host; |
50 | struct buffer_head *bh; |
51 | |
52 | bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); |
53 | if (unlikely(!bh)) |
54 | return NULL; |
55 | |
56 | if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || |
57 | buffer_dirty(bh))) { |
58 | brelse(bh); |
59 | BUG(); |
60 | } |
61 | memset(bh->b_data, 0, i_blocksize(inode)); |
62 | bh->b_bdev = inode->i_sb->s_bdev; |
63 | bh->b_blocknr = blocknr; |
64 | set_buffer_mapped(bh); |
65 | set_buffer_uptodate(bh); |
66 | |
67 | folio_unlock(folio: bh->b_folio); |
68 | folio_put(folio: bh->b_folio); |
69 | return bh; |
70 | } |
71 | |
72 | int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, |
73 | sector_t pblocknr, blk_opf_t opf, |
74 | struct buffer_head **pbh, sector_t *submit_ptr) |
75 | { |
76 | struct buffer_head *bh; |
77 | struct inode *inode = btnc->host; |
78 | struct folio *folio; |
79 | int err; |
80 | |
81 | bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); |
82 | if (unlikely(!bh)) |
83 | return -ENOMEM; |
84 | |
85 | err = -EEXIST; /* internal code */ |
86 | folio = bh->b_folio; |
87 | |
88 | if (buffer_uptodate(bh) || buffer_dirty(bh)) |
89 | goto found; |
90 | |
91 | if (pblocknr == 0) { |
92 | pblocknr = blocknr; |
93 | if (inode->i_ino != NILFS_DAT_INO) { |
94 | struct the_nilfs *nilfs = inode->i_sb->s_fs_info; |
95 | |
96 | /* blocknr is a virtual block number */ |
97 | err = nilfs_dat_translate(nilfs->ns_dat, blocknr, |
98 | &pblocknr); |
99 | if (unlikely(err)) { |
100 | brelse(bh); |
101 | goto out_locked; |
102 | } |
103 | } |
104 | } |
105 | |
106 | if (opf & REQ_RAHEAD) { |
107 | if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { |
108 | err = -EBUSY; /* internal code */ |
109 | brelse(bh); |
110 | goto out_locked; |
111 | } |
112 | } else { /* opf == REQ_OP_READ */ |
113 | lock_buffer(bh); |
114 | } |
115 | if (buffer_uptodate(bh)) { |
116 | unlock_buffer(bh); |
117 | err = -EEXIST; /* internal code */ |
118 | goto found; |
119 | } |
120 | set_buffer_mapped(bh); |
121 | bh->b_bdev = inode->i_sb->s_bdev; |
122 | bh->b_blocknr = pblocknr; /* set block address for read */ |
123 | bh->b_end_io = end_buffer_read_sync; |
124 | get_bh(bh); |
125 | submit_bh(opf, bh); |
126 | bh->b_blocknr = blocknr; /* set back to the given block address */ |
127 | *submit_ptr = pblocknr; |
128 | err = 0; |
129 | found: |
130 | *pbh = bh; |
131 | |
132 | out_locked: |
133 | folio_unlock(folio); |
134 | folio_put(folio); |
135 | return err; |
136 | } |
137 | |
138 | /** |
139 | * nilfs_btnode_delete - delete B-tree node buffer |
140 | * @bh: buffer to be deleted |
141 | * |
142 | * nilfs_btnode_delete() invalidates the specified buffer and delete the page |
143 | * including the buffer if the page gets unbusy. |
144 | */ |
145 | void nilfs_btnode_delete(struct buffer_head *bh) |
146 | { |
147 | struct address_space *mapping; |
148 | struct folio *folio = bh->b_folio; |
149 | pgoff_t index = folio->index; |
150 | int still_dirty; |
151 | |
152 | folio_get(folio); |
153 | folio_lock(folio); |
154 | folio_wait_writeback(folio); |
155 | |
156 | nilfs_forget_buffer(bh); |
157 | still_dirty = folio_test_dirty(folio); |
158 | mapping = folio->mapping; |
159 | folio_unlock(folio); |
160 | folio_put(folio); |
161 | |
162 | if (!still_dirty && mapping) |
163 | invalidate_inode_pages2_range(mapping, start: index, end: index); |
164 | } |
165 | |
166 | /** |
167 | * nilfs_btnode_prepare_change_key |
168 | * prepare to move contents of the block for old key to one of new key. |
169 | * the old buffer will not be removed, but might be reused for new buffer. |
170 | * it might return -ENOMEM because of memory allocation errors, |
171 | * and might return -EIO because of disk read errors. |
172 | */ |
173 | int nilfs_btnode_prepare_change_key(struct address_space *btnc, |
174 | struct nilfs_btnode_chkey_ctxt *ctxt) |
175 | { |
176 | struct buffer_head *obh, *nbh; |
177 | struct inode *inode = btnc->host; |
178 | __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; |
179 | int err; |
180 | |
181 | if (oldkey == newkey) |
182 | return 0; |
183 | |
184 | obh = ctxt->bh; |
185 | ctxt->newbh = NULL; |
186 | |
187 | if (inode->i_blkbits == PAGE_SHIFT) { |
188 | struct folio *ofolio = obh->b_folio; |
189 | folio_lock(folio: ofolio); |
190 | retry: |
191 | /* BUG_ON(oldkey != obh->b_folio->index); */ |
192 | if (unlikely(oldkey != ofolio->index)) |
193 | NILFS_FOLIO_BUG(ofolio, |
194 | "invalid oldkey %lld (newkey=%lld)" , |
195 | (unsigned long long)oldkey, |
196 | (unsigned long long)newkey); |
197 | |
198 | xa_lock_irq(&btnc->i_pages); |
199 | err = __xa_insert(&btnc->i_pages, index: newkey, entry: ofolio, GFP_NOFS); |
200 | xa_unlock_irq(&btnc->i_pages); |
201 | /* |
202 | * Note: folio->index will not change to newkey until |
203 | * nilfs_btnode_commit_change_key() will be called. |
204 | * To protect the folio in intermediate state, the folio lock |
205 | * is held. |
206 | */ |
207 | if (!err) |
208 | return 0; |
209 | else if (err != -EBUSY) |
210 | goto failed_unlock; |
211 | |
212 | err = invalidate_inode_pages2_range(mapping: btnc, start: newkey, end: newkey); |
213 | if (!err) |
214 | goto retry; |
215 | /* fallback to copy mode */ |
216 | folio_unlock(folio: ofolio); |
217 | } |
218 | |
219 | nbh = nilfs_btnode_create_block(btnc, blocknr: newkey); |
220 | if (!nbh) |
221 | return -ENOMEM; |
222 | |
223 | BUG_ON(nbh == obh); |
224 | ctxt->newbh = nbh; |
225 | return 0; |
226 | |
227 | failed_unlock: |
228 | folio_unlock(folio: obh->b_folio); |
229 | return err; |
230 | } |
231 | |
232 | /** |
233 | * nilfs_btnode_commit_change_key |
234 | * commit the change_key operation prepared by prepare_change_key(). |
235 | */ |
236 | void nilfs_btnode_commit_change_key(struct address_space *btnc, |
237 | struct nilfs_btnode_chkey_ctxt *ctxt) |
238 | { |
239 | struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; |
240 | __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; |
241 | struct folio *ofolio; |
242 | |
243 | if (oldkey == newkey) |
244 | return; |
245 | |
246 | if (nbh == NULL) { /* blocksize == pagesize */ |
247 | ofolio = obh->b_folio; |
248 | if (unlikely(oldkey != ofolio->index)) |
249 | NILFS_FOLIO_BUG(ofolio, |
250 | "invalid oldkey %lld (newkey=%lld)" , |
251 | (unsigned long long)oldkey, |
252 | (unsigned long long)newkey); |
253 | mark_buffer_dirty(bh: obh); |
254 | |
255 | xa_lock_irq(&btnc->i_pages); |
256 | __xa_erase(&btnc->i_pages, index: oldkey); |
257 | __xa_set_mark(&btnc->i_pages, index: newkey, PAGECACHE_TAG_DIRTY); |
258 | xa_unlock_irq(&btnc->i_pages); |
259 | |
260 | ofolio->index = obh->b_blocknr = newkey; |
261 | folio_unlock(folio: ofolio); |
262 | } else { |
263 | nilfs_copy_buffer(nbh, obh); |
264 | mark_buffer_dirty(bh: nbh); |
265 | |
266 | nbh->b_blocknr = newkey; |
267 | ctxt->bh = nbh; |
268 | nilfs_btnode_delete(bh: obh); /* will decrement bh->b_count */ |
269 | } |
270 | } |
271 | |
272 | /** |
273 | * nilfs_btnode_abort_change_key |
274 | * abort the change_key operation prepared by prepare_change_key(). |
275 | */ |
276 | void nilfs_btnode_abort_change_key(struct address_space *btnc, |
277 | struct nilfs_btnode_chkey_ctxt *ctxt) |
278 | { |
279 | struct buffer_head *nbh = ctxt->newbh; |
280 | __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; |
281 | |
282 | if (oldkey == newkey) |
283 | return; |
284 | |
285 | if (nbh == NULL) { /* blocksize == pagesize */ |
286 | xa_erase_irq(xa: &btnc->i_pages, index: newkey); |
287 | folio_unlock(folio: ctxt->bh->b_folio); |
288 | } else { |
289 | /* |
290 | * When canceling a buffer that a prepare operation has |
291 | * allocated to copy a node block to another location, use |
292 | * nilfs_btnode_delete() to initialize and release the buffer |
293 | * so that the buffer flags will not be in an inconsistent |
294 | * state when it is reallocated. |
295 | */ |
296 | nilfs_btnode_delete(bh: nbh); |
297 | } |
298 | } |
299 | |