1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/hfsplus/btree.c |
4 | * |
5 | * Copyright (C) 2001 |
6 | * Brad Boyer (flar@allandria.com) |
7 | * (C) 2003 Ardis Technologies <roman@ardistech.com> |
8 | * |
9 | * Handle opening/closing btree |
10 | */ |
11 | |
12 | #include <linux/slab.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/log2.h> |
15 | |
16 | #include "hfsplus_fs.h" |
17 | #include "hfsplus_raw.h" |
18 | |
19 | /* |
20 | * Initial source code of clump size calculation is gotten |
21 | * from http://opensource.apple.com/tarballs/diskdev_cmds/ |
22 | */ |
23 | #define CLUMP_ENTRIES 15 |
24 | |
25 | static short clumptbl[CLUMP_ENTRIES * 3] = { |
26 | /* |
27 | * Volume Attributes Catalog Extents |
28 | * Size Clump (MB) Clump (MB) Clump (MB) |
29 | */ |
30 | /* 1GB */ 4, 4, 4, |
31 | /* 2GB */ 6, 6, 4, |
32 | /* 4GB */ 8, 8, 4, |
33 | /* 8GB */ 11, 11, 5, |
34 | /* |
35 | * For volumes 16GB and larger, we want to make sure that a full OS |
36 | * install won't require fragmentation of the Catalog or Attributes |
37 | * B-trees. We do this by making the clump sizes sufficiently large, |
38 | * and by leaving a gap after the B-trees for them to grow into. |
39 | * |
40 | * For SnowLeopard 10A298, a FullNetInstall with all packages selected |
41 | * results in: |
42 | * Catalog B-tree Header |
43 | * nodeSize: 8192 |
44 | * totalNodes: 31616 |
45 | * freeNodes: 1978 |
46 | * (used = 231.55 MB) |
47 | * Attributes B-tree Header |
48 | * nodeSize: 8192 |
49 | * totalNodes: 63232 |
50 | * freeNodes: 958 |
51 | * (used = 486.52 MB) |
52 | * |
53 | * We also want Time Machine backup volumes to have a sufficiently |
54 | * large clump size to reduce fragmentation. |
55 | * |
56 | * The series of numbers for Catalog and Attribute form a geometric |
57 | * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times |
58 | * the previous term. For Attributes (16GB to 512GB), each term is |
59 | * 4**(1/5) times the previous term. For 1TB to 16TB, each term is |
60 | * 2**(1/5) times the previous term. |
61 | */ |
62 | /* 16GB */ 64, 32, 5, |
63 | /* 32GB */ 84, 49, 6, |
64 | /* 64GB */ 111, 74, 7, |
65 | /* 128GB */ 147, 111, 8, |
66 | /* 256GB */ 194, 169, 9, |
67 | /* 512GB */ 256, 256, 11, |
68 | /* 1TB */ 294, 294, 14, |
69 | /* 2TB */ 338, 338, 16, |
70 | /* 4TB */ 388, 388, 20, |
71 | /* 8TB */ 446, 446, 25, |
72 | /* 16TB */ 512, 512, 32 |
73 | }; |
74 | |
75 | u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, |
76 | u64 sectors, int file_id) |
77 | { |
78 | u32 mod = max(node_size, block_size); |
79 | u32 clump_size; |
80 | int column; |
81 | int i; |
82 | |
83 | /* Figure out which column of the above table to use for this file. */ |
84 | switch (file_id) { |
85 | case HFSPLUS_ATTR_CNID: |
86 | column = 0; |
87 | break; |
88 | case HFSPLUS_CAT_CNID: |
89 | column = 1; |
90 | break; |
91 | default: |
92 | column = 2; |
93 | break; |
94 | } |
95 | |
96 | /* |
97 | * The default clump size is 0.8% of the volume size. And |
98 | * it must also be a multiple of the node and block size. |
99 | */ |
100 | if (sectors < 0x200000) { |
101 | clump_size = sectors << 2; /* 0.8 % */ |
102 | if (clump_size < (8 * node_size)) |
103 | clump_size = 8 * node_size; |
104 | } else { |
105 | /* turn exponent into table index... */ |
106 | for (i = 0, sectors = sectors >> 22; |
107 | sectors && (i < CLUMP_ENTRIES - 1); |
108 | ++i, sectors = sectors >> 1) { |
109 | /* empty body */ |
110 | } |
111 | |
112 | clump_size = clumptbl[column + (i) * 3] * 1024 * 1024; |
113 | } |
114 | |
115 | /* |
116 | * Round the clump size to a multiple of node and block size. |
117 | * NOTE: This rounds down. |
118 | */ |
119 | clump_size /= mod; |
120 | clump_size *= mod; |
121 | |
122 | /* |
123 | * Rounding down could have rounded down to 0 if the block size was |
124 | * greater than the clump size. If so, just use one block or node. |
125 | */ |
126 | if (clump_size == 0) |
127 | clump_size = mod; |
128 | |
129 | return clump_size; |
130 | } |
131 | |
132 | /* Get a reference to a B*Tree and do some initial checks */ |
133 | struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) |
134 | { |
135 | struct hfs_btree *tree; |
136 | struct hfs_btree_header_rec *head; |
137 | struct address_space *mapping; |
138 | struct inode *inode; |
139 | struct page *page; |
140 | unsigned int size; |
141 | |
142 | tree = kzalloc(size: sizeof(*tree), GFP_KERNEL); |
143 | if (!tree) |
144 | return NULL; |
145 | |
146 | mutex_init(&tree->tree_lock); |
147 | spin_lock_init(&tree->hash_lock); |
148 | tree->sb = sb; |
149 | tree->cnid = id; |
150 | inode = hfsplus_iget(sb, ino: id); |
151 | if (IS_ERR(ptr: inode)) |
152 | goto free_tree; |
153 | tree->inode = inode; |
154 | |
155 | if (!HFSPLUS_I(inode: tree->inode)->first_blocks) { |
156 | pr_err("invalid btree extent records (0 size)\n" ); |
157 | goto free_inode; |
158 | } |
159 | |
160 | mapping = tree->inode->i_mapping; |
161 | page = read_mapping_page(mapping, index: 0, NULL); |
162 | if (IS_ERR(ptr: page)) |
163 | goto free_inode; |
164 | |
165 | /* Load the header */ |
166 | head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + |
167 | sizeof(struct hfs_bnode_desc)); |
168 | tree->root = be32_to_cpu(head->root); |
169 | tree->leaf_count = be32_to_cpu(head->leaf_count); |
170 | tree->leaf_head = be32_to_cpu(head->leaf_head); |
171 | tree->leaf_tail = be32_to_cpu(head->leaf_tail); |
172 | tree->node_count = be32_to_cpu(head->node_count); |
173 | tree->free_nodes = be32_to_cpu(head->free_nodes); |
174 | tree->attributes = be32_to_cpu(head->attributes); |
175 | tree->node_size = be16_to_cpu(head->node_size); |
176 | tree->max_key_len = be16_to_cpu(head->max_key_len); |
177 | tree->depth = be16_to_cpu(head->depth); |
178 | |
179 | /* Verify the tree and set the correct compare function */ |
180 | switch (id) { |
181 | case HFSPLUS_EXT_CNID: |
182 | if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) { |
183 | pr_err("invalid extent max_key_len %d\n" , |
184 | tree->max_key_len); |
185 | goto fail_page; |
186 | } |
187 | if (tree->attributes & HFS_TREE_VARIDXKEYS) { |
188 | pr_err("invalid extent btree flag\n" ); |
189 | goto fail_page; |
190 | } |
191 | |
192 | tree->keycmp = hfsplus_ext_cmp_key; |
193 | break; |
194 | case HFSPLUS_CAT_CNID: |
195 | if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) { |
196 | pr_err("invalid catalog max_key_len %d\n" , |
197 | tree->max_key_len); |
198 | goto fail_page; |
199 | } |
200 | if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { |
201 | pr_err("invalid catalog btree flag\n" ); |
202 | goto fail_page; |
203 | } |
204 | |
205 | if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) && |
206 | (head->key_type == HFSPLUS_KEY_BINARY)) |
207 | tree->keycmp = hfsplus_cat_bin_cmp_key; |
208 | else { |
209 | tree->keycmp = hfsplus_cat_case_cmp_key; |
210 | set_bit(HFSPLUS_SB_CASEFOLD, addr: &HFSPLUS_SB(sb)->flags); |
211 | } |
212 | break; |
213 | case HFSPLUS_ATTR_CNID: |
214 | if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) { |
215 | pr_err("invalid attributes max_key_len %d\n" , |
216 | tree->max_key_len); |
217 | goto fail_page; |
218 | } |
219 | tree->keycmp = hfsplus_attr_bin_cmp_key; |
220 | break; |
221 | default: |
222 | pr_err("unknown B*Tree requested\n" ); |
223 | goto fail_page; |
224 | } |
225 | |
226 | if (!(tree->attributes & HFS_TREE_BIGKEYS)) { |
227 | pr_err("invalid btree flag\n" ); |
228 | goto fail_page; |
229 | } |
230 | |
231 | size = tree->node_size; |
232 | if (!is_power_of_2(n: size)) |
233 | goto fail_page; |
234 | if (!tree->node_count) |
235 | goto fail_page; |
236 | |
237 | tree->node_size_shift = ffs(size) - 1; |
238 | |
239 | tree->pages_per_bnode = |
240 | (tree->node_size + PAGE_SIZE - 1) >> |
241 | PAGE_SHIFT; |
242 | |
243 | kunmap_local(head); |
244 | put_page(page); |
245 | return tree; |
246 | |
247 | fail_page: |
248 | kunmap_local(head); |
249 | put_page(page); |
250 | free_inode: |
251 | tree->inode->i_mapping->a_ops = &hfsplus_aops; |
252 | iput(tree->inode); |
253 | free_tree: |
254 | kfree(objp: tree); |
255 | return NULL; |
256 | } |
257 | |
258 | /* Release resources used by a btree */ |
259 | void hfs_btree_close(struct hfs_btree *tree) |
260 | { |
261 | struct hfs_bnode *node; |
262 | int i; |
263 | |
264 | if (!tree) |
265 | return; |
266 | |
267 | for (i = 0; i < NODE_HASH_SIZE; i++) { |
268 | while ((node = tree->node_hash[i])) { |
269 | tree->node_hash[i] = node->next_hash; |
270 | if (atomic_read(v: &node->refcnt)) |
271 | pr_crit("node %d:%d " |
272 | "still has %d user(s)!\n" , |
273 | node->tree->cnid, node->this, |
274 | atomic_read(&node->refcnt)); |
275 | hfs_bnode_free(node); |
276 | tree->node_hash_cnt--; |
277 | } |
278 | } |
279 | iput(tree->inode); |
280 | kfree(objp: tree); |
281 | } |
282 | |
283 | int hfs_btree_write(struct hfs_btree *tree) |
284 | { |
285 | struct hfs_btree_header_rec *head; |
286 | struct hfs_bnode *node; |
287 | struct page *page; |
288 | |
289 | node = hfs_bnode_find(tree, num: 0); |
290 | if (IS_ERR(ptr: node)) |
291 | /* panic? */ |
292 | return -EIO; |
293 | /* Load the header */ |
294 | page = node->page[0]; |
295 | head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + |
296 | sizeof(struct hfs_bnode_desc)); |
297 | |
298 | head->root = cpu_to_be32(tree->root); |
299 | head->leaf_count = cpu_to_be32(tree->leaf_count); |
300 | head->leaf_head = cpu_to_be32(tree->leaf_head); |
301 | head->leaf_tail = cpu_to_be32(tree->leaf_tail); |
302 | head->node_count = cpu_to_be32(tree->node_count); |
303 | head->free_nodes = cpu_to_be32(tree->free_nodes); |
304 | head->attributes = cpu_to_be32(tree->attributes); |
305 | head->depth = cpu_to_be16(tree->depth); |
306 | |
307 | kunmap_local(head); |
308 | set_page_dirty(page); |
309 | hfs_bnode_put(node); |
310 | return 0; |
311 | } |
312 | |
313 | static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) |
314 | { |
315 | struct hfs_btree *tree = prev->tree; |
316 | struct hfs_bnode *node; |
317 | struct hfs_bnode_desc desc; |
318 | __be32 cnid; |
319 | |
320 | node = hfs_bnode_create(tree, num: idx); |
321 | if (IS_ERR(ptr: node)) |
322 | return node; |
323 | |
324 | tree->free_nodes--; |
325 | prev->next = idx; |
326 | cnid = cpu_to_be32(idx); |
327 | hfs_bnode_write(node: prev, buf: &cnid, offsetof(struct hfs_bnode_desc, next), len: 4); |
328 | |
329 | node->type = HFS_NODE_MAP; |
330 | node->num_recs = 1; |
331 | hfs_bnode_clear(node, off: 0, len: tree->node_size); |
332 | desc.next = 0; |
333 | desc.prev = 0; |
334 | desc.type = HFS_NODE_MAP; |
335 | desc.height = 0; |
336 | desc.num_recs = cpu_to_be16(1); |
337 | desc.reserved = 0; |
338 | hfs_bnode_write(node, buf: &desc, off: 0, len: sizeof(desc)); |
339 | hfs_bnode_write_u16(node, off: 14, data: 0x8000); |
340 | hfs_bnode_write_u16(node, off: tree->node_size - 2, data: 14); |
341 | hfs_bnode_write_u16(node, off: tree->node_size - 4, data: tree->node_size - 6); |
342 | |
343 | return node; |
344 | } |
345 | |
346 | /* Make sure @tree has enough space for the @rsvd_nodes */ |
347 | int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) |
348 | { |
349 | struct inode *inode = tree->inode; |
350 | struct hfsplus_inode_info *hip = HFSPLUS_I(inode); |
351 | u32 count; |
352 | int res; |
353 | |
354 | if (rsvd_nodes <= 0) |
355 | return 0; |
356 | |
357 | while (tree->free_nodes < rsvd_nodes) { |
358 | res = hfsplus_file_extend(inode, zeroout: hfs_bnode_need_zeroout(tree)); |
359 | if (res) |
360 | return res; |
361 | hip->phys_size = inode->i_size = |
362 | (loff_t)hip->alloc_blocks << |
363 | HFSPLUS_SB(sb: tree->sb)->alloc_blksz_shift; |
364 | hip->fs_blocks = |
365 | hip->alloc_blocks << HFSPLUS_SB(sb: tree->sb)->fs_shift; |
366 | inode_set_bytes(inode, bytes: inode->i_size); |
367 | count = inode->i_size >> tree->node_size_shift; |
368 | tree->free_nodes += count - tree->node_count; |
369 | tree->node_count = count; |
370 | } |
371 | return 0; |
372 | } |
373 | |
374 | struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) |
375 | { |
376 | struct hfs_bnode *node, *next_node; |
377 | struct page **pagep; |
378 | u32 nidx, idx; |
379 | unsigned off; |
380 | u16 off16; |
381 | u16 len; |
382 | u8 *data, byte, m; |
383 | int i, res; |
384 | |
385 | res = hfs_bmap_reserve(tree, rsvd_nodes: 1); |
386 | if (res) |
387 | return ERR_PTR(error: res); |
388 | |
389 | nidx = 0; |
390 | node = hfs_bnode_find(tree, num: nidx); |
391 | if (IS_ERR(ptr: node)) |
392 | return node; |
393 | len = hfs_brec_lenoff(node, rec: 2, off: &off16); |
394 | off = off16; |
395 | |
396 | off += node->page_offset; |
397 | pagep = node->page + (off >> PAGE_SHIFT); |
398 | data = kmap_local_page(page: *pagep); |
399 | off &= ~PAGE_MASK; |
400 | idx = 0; |
401 | |
402 | for (;;) { |
403 | while (len) { |
404 | byte = data[off]; |
405 | if (byte != 0xff) { |
406 | for (m = 0x80, i = 0; i < 8; m >>= 1, i++) { |
407 | if (!(byte & m)) { |
408 | idx += i; |
409 | data[off] |= m; |
410 | set_page_dirty(*pagep); |
411 | kunmap_local(data); |
412 | tree->free_nodes--; |
413 | mark_inode_dirty(inode: tree->inode); |
414 | hfs_bnode_put(node); |
415 | return hfs_bnode_create(tree, |
416 | num: idx); |
417 | } |
418 | } |
419 | } |
420 | if (++off >= PAGE_SIZE) { |
421 | kunmap_local(data); |
422 | data = kmap_local_page(page: *++pagep); |
423 | off = 0; |
424 | } |
425 | idx += 8; |
426 | len--; |
427 | } |
428 | kunmap_local(data); |
429 | nidx = node->next; |
430 | if (!nidx) { |
431 | hfs_dbg(BNODE_MOD, "create new bmap node\n" ); |
432 | next_node = hfs_bmap_new_bmap(prev: node, idx); |
433 | } else |
434 | next_node = hfs_bnode_find(tree, num: nidx); |
435 | hfs_bnode_put(node); |
436 | if (IS_ERR(ptr: next_node)) |
437 | return next_node; |
438 | node = next_node; |
439 | |
440 | len = hfs_brec_lenoff(node, rec: 0, off: &off16); |
441 | off = off16; |
442 | off += node->page_offset; |
443 | pagep = node->page + (off >> PAGE_SHIFT); |
444 | data = kmap_local_page(page: *pagep); |
445 | off &= ~PAGE_MASK; |
446 | } |
447 | } |
448 | |
449 | void hfs_bmap_free(struct hfs_bnode *node) |
450 | { |
451 | struct hfs_btree *tree; |
452 | struct page *page; |
453 | u16 off, len; |
454 | u32 nidx; |
455 | u8 *data, byte, m; |
456 | |
457 | hfs_dbg(BNODE_MOD, "btree_free_node: %u\n" , node->this); |
458 | BUG_ON(!node->this); |
459 | tree = node->tree; |
460 | nidx = node->this; |
461 | node = hfs_bnode_find(tree, num: 0); |
462 | if (IS_ERR(ptr: node)) |
463 | return; |
464 | len = hfs_brec_lenoff(node, rec: 2, off: &off); |
465 | while (nidx >= len * 8) { |
466 | u32 i; |
467 | |
468 | nidx -= len * 8; |
469 | i = node->next; |
470 | if (!i) { |
471 | /* panic */; |
472 | pr_crit("unable to free bnode %u. " |
473 | "bmap not found!\n" , |
474 | node->this); |
475 | hfs_bnode_put(node); |
476 | return; |
477 | } |
478 | hfs_bnode_put(node); |
479 | node = hfs_bnode_find(tree, num: i); |
480 | if (IS_ERR(ptr: node)) |
481 | return; |
482 | if (node->type != HFS_NODE_MAP) { |
483 | /* panic */; |
484 | pr_crit("invalid bmap found! " |
485 | "(%u,%d)\n" , |
486 | node->this, node->type); |
487 | hfs_bnode_put(node); |
488 | return; |
489 | } |
490 | len = hfs_brec_lenoff(node, rec: 0, off: &off); |
491 | } |
492 | off += node->page_offset + nidx / 8; |
493 | page = node->page[off >> PAGE_SHIFT]; |
494 | data = kmap_local_page(page); |
495 | off &= ~PAGE_MASK; |
496 | m = 1 << (~nidx & 7); |
497 | byte = data[off]; |
498 | if (!(byte & m)) { |
499 | pr_crit("trying to free free bnode " |
500 | "%u(%d)\n" , |
501 | node->this, node->type); |
502 | kunmap_local(data); |
503 | hfs_bnode_put(node); |
504 | return; |
505 | } |
506 | data[off] = byte & ~m; |
507 | set_page_dirty(page); |
508 | kunmap_local(data); |
509 | hfs_bnode_put(node); |
510 | tree->free_nodes++; |
511 | mark_inode_dirty(inode: tree->inode); |
512 | } |
513 | |