1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * |
4 | * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. |
5 | * |
6 | * TODO: try to use extents tree (instead of array) |
7 | */ |
8 | |
9 | #include <linux/blkdev.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/log2.h> |
12 | |
13 | #include "debug.h" |
14 | #include "ntfs.h" |
15 | #include "ntfs_fs.h" |
16 | |
17 | /* runs_tree is a continues memory. Try to avoid big size. */ |
18 | #define NTFS3_RUN_MAX_BYTES 0x10000 |
19 | |
20 | struct ntfs_run { |
21 | CLST vcn; /* Virtual cluster number. */ |
22 | CLST len; /* Length in clusters. */ |
23 | CLST lcn; /* Logical cluster number. */ |
24 | }; |
25 | |
26 | /* |
27 | * run_lookup - Lookup the index of a MCB entry that is first <= vcn. |
28 | * |
29 | * Case of success it will return non-zero value and set |
30 | * @index parameter to index of entry been found. |
31 | * Case of entry missing from list 'index' will be set to |
32 | * point to insertion position for the entry question. |
33 | */ |
34 | static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) |
35 | { |
36 | size_t min_idx, max_idx, mid_idx; |
37 | struct ntfs_run *r; |
38 | |
39 | if (!run->count) { |
40 | *index = 0; |
41 | return false; |
42 | } |
43 | |
44 | min_idx = 0; |
45 | max_idx = run->count - 1; |
46 | |
47 | /* Check boundary cases specially, 'cause they cover the often requests. */ |
48 | r = run->runs; |
49 | if (vcn < r->vcn) { |
50 | *index = 0; |
51 | return false; |
52 | } |
53 | |
54 | if (vcn < r->vcn + r->len) { |
55 | *index = 0; |
56 | return true; |
57 | } |
58 | |
59 | r += max_idx; |
60 | if (vcn >= r->vcn + r->len) { |
61 | *index = run->count; |
62 | return false; |
63 | } |
64 | |
65 | if (vcn >= r->vcn) { |
66 | *index = max_idx; |
67 | return true; |
68 | } |
69 | |
70 | do { |
71 | mid_idx = min_idx + ((max_idx - min_idx) >> 1); |
72 | r = run->runs + mid_idx; |
73 | |
74 | if (vcn < r->vcn) { |
75 | max_idx = mid_idx - 1; |
76 | if (!mid_idx) |
77 | break; |
78 | } else if (vcn >= r->vcn + r->len) { |
79 | min_idx = mid_idx + 1; |
80 | } else { |
81 | *index = mid_idx; |
82 | return true; |
83 | } |
84 | } while (min_idx <= max_idx); |
85 | |
86 | *index = max_idx + 1; |
87 | return false; |
88 | } |
89 | |
90 | /* |
91 | * run_consolidate - Consolidate runs starting from a given one. |
92 | */ |
93 | static void run_consolidate(struct runs_tree *run, size_t index) |
94 | { |
95 | size_t i; |
96 | struct ntfs_run *r = run->runs + index; |
97 | |
98 | while (index + 1 < run->count) { |
99 | /* |
100 | * I should merge current run with next |
101 | * if start of the next run lies inside one being tested. |
102 | */ |
103 | struct ntfs_run *n = r + 1; |
104 | CLST end = r->vcn + r->len; |
105 | CLST dl; |
106 | |
107 | /* Stop if runs are not aligned one to another. */ |
108 | if (n->vcn > end) |
109 | break; |
110 | |
111 | dl = end - n->vcn; |
112 | |
113 | /* |
114 | * If range at index overlaps with next one |
115 | * then I will either adjust it's start position |
116 | * or (if completely matches) dust remove one from the list. |
117 | */ |
118 | if (dl > 0) { |
119 | if (n->len <= dl) |
120 | goto remove_next_range; |
121 | |
122 | n->len -= dl; |
123 | n->vcn += dl; |
124 | if (n->lcn != SPARSE_LCN) |
125 | n->lcn += dl; |
126 | dl = 0; |
127 | } |
128 | |
129 | /* |
130 | * Stop if sparse mode does not match |
131 | * both current and next runs. |
132 | */ |
133 | if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) { |
134 | index += 1; |
135 | r = n; |
136 | continue; |
137 | } |
138 | |
139 | /* |
140 | * Check if volume block |
141 | * of a next run lcn does not match |
142 | * last volume block of the current run. |
143 | */ |
144 | if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len) |
145 | break; |
146 | |
147 | /* |
148 | * Next and current are siblings. |
149 | * Eat/join. |
150 | */ |
151 | r->len += n->len - dl; |
152 | |
153 | remove_next_range: |
154 | i = run->count - (index + 1); |
155 | if (i > 1) |
156 | memmove(n, n + 1, sizeof(*n) * (i - 1)); |
157 | |
158 | run->count -= 1; |
159 | } |
160 | } |
161 | |
162 | /* |
163 | * run_is_mapped_full |
164 | * |
165 | * Return: True if range [svcn - evcn] is mapped. |
166 | */ |
167 | bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn) |
168 | { |
169 | size_t i; |
170 | const struct ntfs_run *r, *end; |
171 | CLST next_vcn; |
172 | |
173 | if (!run_lookup(run, vcn: svcn, index: &i)) |
174 | return false; |
175 | |
176 | end = run->runs + run->count; |
177 | r = run->runs + i; |
178 | |
179 | for (;;) { |
180 | next_vcn = r->vcn + r->len; |
181 | if (next_vcn > evcn) |
182 | return true; |
183 | |
184 | if (++r >= end) |
185 | return false; |
186 | |
187 | if (r->vcn != next_vcn) |
188 | return false; |
189 | } |
190 | } |
191 | |
192 | bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, |
193 | CLST *len, size_t *index) |
194 | { |
195 | size_t idx; |
196 | CLST gap; |
197 | struct ntfs_run *r; |
198 | |
199 | /* Fail immediately if nrun was not touched yet. */ |
200 | if (!run->runs) |
201 | return false; |
202 | |
203 | if (!run_lookup(run, vcn, index: &idx)) |
204 | return false; |
205 | |
206 | r = run->runs + idx; |
207 | |
208 | if (vcn >= r->vcn + r->len) |
209 | return false; |
210 | |
211 | gap = vcn - r->vcn; |
212 | if (r->len <= gap) |
213 | return false; |
214 | |
215 | *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap); |
216 | |
217 | if (len) |
218 | *len = r->len - gap; |
219 | if (index) |
220 | *index = idx; |
221 | |
222 | return true; |
223 | } |
224 | |
225 | /* |
226 | * run_truncate_head - Decommit the range before vcn. |
227 | */ |
228 | void run_truncate_head(struct runs_tree *run, CLST vcn) |
229 | { |
230 | size_t index; |
231 | struct ntfs_run *r; |
232 | |
233 | if (run_lookup(run, vcn, index: &index)) { |
234 | r = run->runs + index; |
235 | |
236 | if (vcn > r->vcn) { |
237 | CLST dlen = vcn - r->vcn; |
238 | |
239 | r->vcn = vcn; |
240 | r->len -= dlen; |
241 | if (r->lcn != SPARSE_LCN) |
242 | r->lcn += dlen; |
243 | } |
244 | |
245 | if (!index) |
246 | return; |
247 | } |
248 | r = run->runs; |
249 | memmove(r, r + index, sizeof(*r) * (run->count - index)); |
250 | |
251 | run->count -= index; |
252 | |
253 | if (!run->count) { |
254 | kvfree(addr: run->runs); |
255 | run->runs = NULL; |
256 | run->allocated = 0; |
257 | } |
258 | } |
259 | |
260 | /* |
261 | * run_truncate - Decommit the range after vcn. |
262 | */ |
263 | void run_truncate(struct runs_tree *run, CLST vcn) |
264 | { |
265 | size_t index; |
266 | |
267 | /* |
268 | * If I hit the range then |
269 | * I have to truncate one. |
270 | * If range to be truncated is becoming empty |
271 | * then it will entirely be removed. |
272 | */ |
273 | if (run_lookup(run, vcn, index: &index)) { |
274 | struct ntfs_run *r = run->runs + index; |
275 | |
276 | r->len = vcn - r->vcn; |
277 | |
278 | if (r->len > 0) |
279 | index += 1; |
280 | } |
281 | |
282 | /* |
283 | * At this point 'index' is set to position that |
284 | * should be thrown away (including index itself) |
285 | * Simple one - just set the limit. |
286 | */ |
287 | run->count = index; |
288 | |
289 | /* Do not reallocate array 'runs'. Only free if possible. */ |
290 | if (!index) { |
291 | kvfree(addr: run->runs); |
292 | run->runs = NULL; |
293 | run->allocated = 0; |
294 | } |
295 | } |
296 | |
297 | /* |
298 | * run_truncate_around - Trim head and tail if necessary. |
299 | */ |
300 | void run_truncate_around(struct runs_tree *run, CLST vcn) |
301 | { |
302 | run_truncate_head(run, vcn); |
303 | |
304 | if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2) |
305 | run_truncate(run, vcn: (run->runs + (run->count >> 1))->vcn); |
306 | } |
307 | |
308 | /* |
309 | * run_add_entry |
310 | * |
311 | * Sets location to known state. |
312 | * Run to be added may overlap with existing location. |
313 | * |
314 | * Return: false if of memory. |
315 | */ |
316 | bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, |
317 | bool is_mft) |
318 | { |
319 | size_t used, index; |
320 | struct ntfs_run *r; |
321 | bool inrange; |
322 | CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0; |
323 | bool should_add_tail = false; |
324 | |
325 | /* |
326 | * Lookup the insertion point. |
327 | * |
328 | * Execute bsearch for the entry containing |
329 | * start position question. |
330 | */ |
331 | inrange = run_lookup(run, vcn, index: &index); |
332 | |
333 | /* |
334 | * Shortcut here would be case of |
335 | * range not been found but one been added |
336 | * continues previous run. |
337 | * This case I can directly make use of |
338 | * existing range as my start point. |
339 | */ |
340 | if (!inrange && index > 0) { |
341 | struct ntfs_run *t = run->runs + index - 1; |
342 | |
343 | if (t->vcn + t->len == vcn && |
344 | (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) && |
345 | (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) { |
346 | inrange = true; |
347 | index -= 1; |
348 | } |
349 | } |
350 | |
351 | /* |
352 | * At this point 'index' either points to the range |
353 | * containing start position or to the insertion position |
354 | * for a new range. |
355 | * So first let's check if range I'm probing is here already. |
356 | */ |
357 | if (!inrange) { |
358 | requires_new_range: |
359 | /* |
360 | * Range was not found. |
361 | * Insert at position 'index' |
362 | */ |
363 | used = run->count * sizeof(struct ntfs_run); |
364 | |
365 | /* |
366 | * Check allocated space. |
367 | * If one is not enough to get one more entry |
368 | * then it will be reallocated. |
369 | */ |
370 | if (run->allocated < used + sizeof(struct ntfs_run)) { |
371 | size_t bytes; |
372 | struct ntfs_run *new_ptr; |
373 | |
374 | /* Use power of 2 for 'bytes'. */ |
375 | if (!used) { |
376 | bytes = 64; |
377 | } else if (used <= 16 * PAGE_SIZE) { |
378 | if (is_power_of_2(n: run->allocated)) |
379 | bytes = run->allocated << 1; |
380 | else |
381 | bytes = (size_t)1 |
382 | << (2 + blksize_bits(size: used)); |
383 | } else { |
384 | bytes = run->allocated + (16 * PAGE_SIZE); |
385 | } |
386 | |
387 | WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES); |
388 | |
389 | new_ptr = kvmalloc(size: bytes, GFP_KERNEL); |
390 | |
391 | if (!new_ptr) |
392 | return false; |
393 | |
394 | r = new_ptr + index; |
395 | memcpy(new_ptr, run->runs, |
396 | index * sizeof(struct ntfs_run)); |
397 | memcpy(r + 1, run->runs + index, |
398 | sizeof(struct ntfs_run) * (run->count - index)); |
399 | |
400 | kvfree(addr: run->runs); |
401 | run->runs = new_ptr; |
402 | run->allocated = bytes; |
403 | |
404 | } else { |
405 | size_t i = run->count - index; |
406 | |
407 | r = run->runs + index; |
408 | |
409 | /* memmove appears to be a bottle neck here... */ |
410 | if (i > 0) |
411 | memmove(r + 1, r, sizeof(struct ntfs_run) * i); |
412 | } |
413 | |
414 | r->vcn = vcn; |
415 | r->lcn = lcn; |
416 | r->len = len; |
417 | run->count += 1; |
418 | } else { |
419 | r = run->runs + index; |
420 | |
421 | /* |
422 | * If one of ranges was not allocated then we |
423 | * have to split location we just matched and |
424 | * insert current one. |
425 | * A common case this requires tail to be reinserted |
426 | * a recursive call. |
427 | */ |
428 | if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) || |
429 | (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) { |
430 | CLST to_eat = vcn - r->vcn; |
431 | CLST Tovcn = to_eat + len; |
432 | |
433 | should_add_tail = Tovcn < r->len; |
434 | |
435 | if (should_add_tail) { |
436 | tail_lcn = r->lcn == SPARSE_LCN ? |
437 | SPARSE_LCN : |
438 | (r->lcn + Tovcn); |
439 | tail_vcn = r->vcn + Tovcn; |
440 | tail_len = r->len - Tovcn; |
441 | } |
442 | |
443 | if (to_eat > 0) { |
444 | r->len = to_eat; |
445 | inrange = false; |
446 | index += 1; |
447 | goto requires_new_range; |
448 | } |
449 | |
450 | /* lcn should match one were going to add. */ |
451 | r->lcn = lcn; |
452 | } |
453 | |
454 | /* |
455 | * If existing range fits then were done. |
456 | * Otherwise extend found one and fall back to range jocode. |
457 | */ |
458 | if (r->vcn + r->len < vcn + len) |
459 | r->len += len - ((r->vcn + r->len) - vcn); |
460 | } |
461 | |
462 | /* |
463 | * And normalize it starting from insertion point. |
464 | * It's possible that no insertion needed case if |
465 | * start point lies within the range of an entry |
466 | * that 'index' points to. |
467 | */ |
468 | if (inrange && index > 0) |
469 | index -= 1; |
470 | run_consolidate(run, index); |
471 | run_consolidate(run, index: index + 1); |
472 | |
473 | /* |
474 | * A special case. |
475 | * We have to add extra range a tail. |
476 | */ |
477 | if (should_add_tail && |
478 | !run_add_entry(run, vcn: tail_vcn, lcn: tail_lcn, len: tail_len, is_mft)) |
479 | return false; |
480 | |
481 | return true; |
482 | } |
483 | |
484 | /* run_collapse_range |
485 | * |
486 | * Helper for attr_collapse_range(), |
487 | * which is helper for fallocate(collapse_range). |
488 | */ |
489 | bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) |
490 | { |
491 | size_t index, eat; |
492 | struct ntfs_run *r, *e, *eat_start, *eat_end; |
493 | CLST end; |
494 | |
495 | if (WARN_ON(!run_lookup(run, vcn, &index))) |
496 | return true; /* Should never be here. */ |
497 | |
498 | e = run->runs + run->count; |
499 | r = run->runs + index; |
500 | end = vcn + len; |
501 | |
502 | if (vcn > r->vcn) { |
503 | if (r->vcn + r->len <= end) { |
504 | /* Collapse tail of run .*/ |
505 | r->len = vcn - r->vcn; |
506 | } else if (r->lcn == SPARSE_LCN) { |
507 | /* Collapse a middle part of sparsed run. */ |
508 | r->len -= len; |
509 | } else { |
510 | /* Collapse a middle part of normal run, split. */ |
511 | if (!run_add_entry(run, vcn, SPARSE_LCN, len, is_mft: false)) |
512 | return false; |
513 | return run_collapse_range(run, vcn, len); |
514 | } |
515 | |
516 | r += 1; |
517 | } |
518 | |
519 | eat_start = r; |
520 | eat_end = r; |
521 | |
522 | for (; r < e; r++) { |
523 | CLST d; |
524 | |
525 | if (r->vcn >= end) { |
526 | r->vcn -= len; |
527 | continue; |
528 | } |
529 | |
530 | if (r->vcn + r->len <= end) { |
531 | /* Eat this run. */ |
532 | eat_end = r + 1; |
533 | continue; |
534 | } |
535 | |
536 | d = end - r->vcn; |
537 | if (r->lcn != SPARSE_LCN) |
538 | r->lcn += d; |
539 | r->len -= d; |
540 | r->vcn -= len - d; |
541 | } |
542 | |
543 | eat = eat_end - eat_start; |
544 | memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); |
545 | run->count -= eat; |
546 | |
547 | return true; |
548 | } |
549 | |
550 | /* run_insert_range |
551 | * |
552 | * Helper for attr_insert_range(), |
553 | * which is helper for fallocate(insert_range). |
554 | */ |
555 | bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len) |
556 | { |
557 | size_t index; |
558 | struct ntfs_run *r, *e; |
559 | |
560 | if (WARN_ON(!run_lookup(run, vcn, &index))) |
561 | return false; /* Should never be here. */ |
562 | |
563 | e = run->runs + run->count; |
564 | r = run->runs + index; |
565 | |
566 | if (vcn > r->vcn) |
567 | r += 1; |
568 | |
569 | for (; r < e; r++) |
570 | r->vcn += len; |
571 | |
572 | r = run->runs + index; |
573 | |
574 | if (vcn > r->vcn) { |
575 | /* split fragment. */ |
576 | CLST len1 = vcn - r->vcn; |
577 | CLST len2 = r->len - len1; |
578 | CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1); |
579 | |
580 | r->len = len1; |
581 | |
582 | if (!run_add_entry(run, vcn: vcn + len, lcn: lcn2, len: len2, is_mft: false)) |
583 | return false; |
584 | } |
585 | |
586 | if (!run_add_entry(run, vcn, SPARSE_LCN, len, is_mft: false)) |
587 | return false; |
588 | |
589 | return true; |
590 | } |
591 | |
592 | /* |
593 | * run_get_entry - Return index-th mapped region. |
594 | */ |
595 | bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, |
596 | CLST *lcn, CLST *len) |
597 | { |
598 | const struct ntfs_run *r; |
599 | |
600 | if (index >= run->count) |
601 | return false; |
602 | |
603 | r = run->runs + index; |
604 | |
605 | if (!r->len) |
606 | return false; |
607 | |
608 | if (vcn) |
609 | *vcn = r->vcn; |
610 | if (lcn) |
611 | *lcn = r->lcn; |
612 | if (len) |
613 | *len = r->len; |
614 | return true; |
615 | } |
616 | |
617 | /* |
618 | * run_packed_size - Calculate the size of packed int64. |
619 | */ |
620 | #ifdef __BIG_ENDIAN |
621 | static inline int run_packed_size(const s64 n) |
622 | { |
623 | const u8 *p = (const u8 *)&n + sizeof(n) - 1; |
624 | |
625 | if (n >= 0) { |
626 | if (p[-7] || p[-6] || p[-5] || p[-4]) |
627 | p -= 4; |
628 | if (p[-3] || p[-2]) |
629 | p -= 2; |
630 | if (p[-1]) |
631 | p -= 1; |
632 | if (p[0] & 0x80) |
633 | p -= 1; |
634 | } else { |
635 | if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff || |
636 | p[-4] != 0xff) |
637 | p -= 4; |
638 | if (p[-3] != 0xff || p[-2] != 0xff) |
639 | p -= 2; |
640 | if (p[-1] != 0xff) |
641 | p -= 1; |
642 | if (!(p[0] & 0x80)) |
643 | p -= 1; |
644 | } |
645 | return (const u8 *)&n + sizeof(n) - p; |
646 | } |
647 | |
648 | /* Full trusted function. It does not check 'size' for errors. */ |
649 | static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v) |
650 | { |
651 | const u8 *p = (u8 *)&v; |
652 | |
653 | switch (size) { |
654 | case 8: |
655 | run_buf[7] = p[0]; |
656 | fallthrough; |
657 | case 7: |
658 | run_buf[6] = p[1]; |
659 | fallthrough; |
660 | case 6: |
661 | run_buf[5] = p[2]; |
662 | fallthrough; |
663 | case 5: |
664 | run_buf[4] = p[3]; |
665 | fallthrough; |
666 | case 4: |
667 | run_buf[3] = p[4]; |
668 | fallthrough; |
669 | case 3: |
670 | run_buf[2] = p[5]; |
671 | fallthrough; |
672 | case 2: |
673 | run_buf[1] = p[6]; |
674 | fallthrough; |
675 | case 1: |
676 | run_buf[0] = p[7]; |
677 | } |
678 | } |
679 | |
680 | /* Full trusted function. It does not check 'size' for errors. */ |
681 | static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v) |
682 | { |
683 | u8 *p = (u8 *)&v; |
684 | |
685 | switch (size) { |
686 | case 8: |
687 | p[0] = run_buf[7]; |
688 | fallthrough; |
689 | case 7: |
690 | p[1] = run_buf[6]; |
691 | fallthrough; |
692 | case 6: |
693 | p[2] = run_buf[5]; |
694 | fallthrough; |
695 | case 5: |
696 | p[3] = run_buf[4]; |
697 | fallthrough; |
698 | case 4: |
699 | p[4] = run_buf[3]; |
700 | fallthrough; |
701 | case 3: |
702 | p[5] = run_buf[2]; |
703 | fallthrough; |
704 | case 2: |
705 | p[6] = run_buf[1]; |
706 | fallthrough; |
707 | case 1: |
708 | p[7] = run_buf[0]; |
709 | } |
710 | return v; |
711 | } |
712 | |
713 | #else |
714 | |
715 | static inline int run_packed_size(const s64 n) |
716 | { |
717 | const u8 *p = (const u8 *)&n; |
718 | |
719 | if (n >= 0) { |
720 | if (p[7] || p[6] || p[5] || p[4]) |
721 | p += 4; |
722 | if (p[3] || p[2]) |
723 | p += 2; |
724 | if (p[1]) |
725 | p += 1; |
726 | if (p[0] & 0x80) |
727 | p += 1; |
728 | } else { |
729 | if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff || |
730 | p[4] != 0xff) |
731 | p += 4; |
732 | if (p[3] != 0xff || p[2] != 0xff) |
733 | p += 2; |
734 | if (p[1] != 0xff) |
735 | p += 1; |
736 | if (!(p[0] & 0x80)) |
737 | p += 1; |
738 | } |
739 | |
740 | return 1 + p - (const u8 *)&n; |
741 | } |
742 | |
743 | /* Full trusted function. It does not check 'size' for errors. */ |
744 | static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v) |
745 | { |
746 | const u8 *p = (u8 *)&v; |
747 | |
748 | /* memcpy( run_buf, &v, size); Is it faster? */ |
749 | switch (size) { |
750 | case 8: |
751 | run_buf[7] = p[7]; |
752 | fallthrough; |
753 | case 7: |
754 | run_buf[6] = p[6]; |
755 | fallthrough; |
756 | case 6: |
757 | run_buf[5] = p[5]; |
758 | fallthrough; |
759 | case 5: |
760 | run_buf[4] = p[4]; |
761 | fallthrough; |
762 | case 4: |
763 | run_buf[3] = p[3]; |
764 | fallthrough; |
765 | case 3: |
766 | run_buf[2] = p[2]; |
767 | fallthrough; |
768 | case 2: |
769 | run_buf[1] = p[1]; |
770 | fallthrough; |
771 | case 1: |
772 | run_buf[0] = p[0]; |
773 | } |
774 | } |
775 | |
776 | /* full trusted function. It does not check 'size' for errors */ |
777 | static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v) |
778 | { |
779 | u8 *p = (u8 *)&v; |
780 | |
781 | /* memcpy( &v, run_buf, size); Is it faster? */ |
782 | switch (size) { |
783 | case 8: |
784 | p[7] = run_buf[7]; |
785 | fallthrough; |
786 | case 7: |
787 | p[6] = run_buf[6]; |
788 | fallthrough; |
789 | case 6: |
790 | p[5] = run_buf[5]; |
791 | fallthrough; |
792 | case 5: |
793 | p[4] = run_buf[4]; |
794 | fallthrough; |
795 | case 4: |
796 | p[3] = run_buf[3]; |
797 | fallthrough; |
798 | case 3: |
799 | p[2] = run_buf[2]; |
800 | fallthrough; |
801 | case 2: |
802 | p[1] = run_buf[1]; |
803 | fallthrough; |
804 | case 1: |
805 | p[0] = run_buf[0]; |
806 | } |
807 | return v; |
808 | } |
809 | #endif |
810 | |
811 | /* |
812 | * run_pack - Pack runs into buffer. |
813 | * |
814 | * packed_vcns - How much runs we have packed. |
815 | * packed_size - How much bytes we have used run_buf. |
816 | */ |
817 | int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, |
818 | u32 run_buf_size, CLST *packed_vcns) |
819 | { |
820 | CLST next_vcn, vcn, lcn; |
821 | CLST prev_lcn = 0; |
822 | CLST evcn1 = svcn + len; |
823 | const struct ntfs_run *r, *r_end; |
824 | int packed_size = 0; |
825 | size_t i; |
826 | s64 dlcn; |
827 | int offset_size, size_size, tmp; |
828 | |
829 | *packed_vcns = 0; |
830 | |
831 | if (!len) |
832 | goto out; |
833 | |
834 | /* Check all required entries [svcn, encv1) available. */ |
835 | if (!run_lookup(run, vcn: svcn, index: &i)) |
836 | return -ENOENT; |
837 | |
838 | r_end = run->runs + run->count; |
839 | r = run->runs + i; |
840 | |
841 | for (next_vcn = r->vcn + r->len; next_vcn < evcn1; |
842 | next_vcn = r->vcn + r->len) { |
843 | if (++r >= r_end || r->vcn != next_vcn) |
844 | return -ENOENT; |
845 | } |
846 | |
847 | /* Repeat cycle above and pack runs. Assume no errors. */ |
848 | r = run->runs + i; |
849 | len = svcn - r->vcn; |
850 | vcn = svcn; |
851 | lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len); |
852 | len = r->len - len; |
853 | |
854 | for (;;) { |
855 | next_vcn = vcn + len; |
856 | if (next_vcn > evcn1) |
857 | len = evcn1 - vcn; |
858 | |
859 | /* How much bytes required to pack len. */ |
860 | size_size = run_packed_size(n: len); |
861 | |
862 | /* offset_size - How much bytes is packed dlcn. */ |
863 | if (lcn == SPARSE_LCN) { |
864 | offset_size = 0; |
865 | dlcn = 0; |
866 | } else { |
867 | /* NOTE: lcn can be less than prev_lcn! */ |
868 | dlcn = (s64)lcn - prev_lcn; |
869 | offset_size = run_packed_size(n: dlcn); |
870 | prev_lcn = lcn; |
871 | } |
872 | |
873 | tmp = run_buf_size - packed_size - 2 - offset_size; |
874 | if (tmp <= 0) |
875 | goto out; |
876 | |
877 | /* Can we store this entire run. */ |
878 | if (tmp < size_size) |
879 | goto out; |
880 | |
881 | if (run_buf) { |
882 | /* Pack run header. */ |
883 | run_buf[0] = ((u8)(size_size | (offset_size << 4))); |
884 | run_buf += 1; |
885 | |
886 | /* Pack the length of run. */ |
887 | run_pack_s64(run_buf, size: size_size, v: len); |
888 | |
889 | run_buf += size_size; |
890 | /* Pack the offset from previous LCN. */ |
891 | run_pack_s64(run_buf, size: offset_size, v: dlcn); |
892 | run_buf += offset_size; |
893 | } |
894 | |
895 | packed_size += 1 + offset_size + size_size; |
896 | *packed_vcns += len; |
897 | |
898 | if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1) |
899 | goto out; |
900 | |
901 | r += 1; |
902 | vcn = r->vcn; |
903 | lcn = r->lcn; |
904 | len = r->len; |
905 | } |
906 | |
907 | out: |
908 | /* Store last zero. */ |
909 | if (run_buf) |
910 | run_buf[0] = 0; |
911 | |
912 | return packed_size + 1; |
913 | } |
914 | |
915 | /* |
916 | * run_unpack - Unpack packed runs from @run_buf. |
917 | * |
918 | * Return: Error if negative, or real used bytes. |
919 | */ |
920 | int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, |
921 | CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, |
922 | int run_buf_size) |
923 | { |
924 | u64 prev_lcn, vcn64, lcn, next_vcn; |
925 | const u8 *run_last, *run_0; |
926 | bool is_mft = ino == MFT_REC_MFT; |
927 | |
928 | if (run_buf_size < 0) |
929 | return -EINVAL; |
930 | |
931 | /* Check for empty. */ |
932 | if (evcn + 1 == svcn) |
933 | return 0; |
934 | |
935 | if (evcn < svcn) |
936 | return -EINVAL; |
937 | |
938 | run_0 = run_buf; |
939 | run_last = run_buf + run_buf_size; |
940 | prev_lcn = 0; |
941 | vcn64 = svcn; |
942 | |
943 | /* Read all runs the chain. */ |
944 | /* size_size - How much bytes is packed len. */ |
945 | while (run_buf < run_last) { |
946 | /* size_size - How much bytes is packed len. */ |
947 | u8 size_size = *run_buf & 0xF; |
948 | /* offset_size - How much bytes is packed dlcn. */ |
949 | u8 offset_size = *run_buf++ >> 4; |
950 | u64 len; |
951 | |
952 | if (!size_size) |
953 | break; |
954 | |
955 | /* |
956 | * Unpack runs. |
957 | * NOTE: Runs are stored little endian order |
958 | * "len" is unsigned value, "dlcn" is signed. |
959 | * Large positive number requires to store 5 bytes |
960 | * e.g.: 05 FF 7E FF FF 00 00 00 |
961 | */ |
962 | if (size_size > 8) |
963 | return -EINVAL; |
964 | |
965 | len = run_unpack_s64(run_buf, size: size_size, v: 0); |
966 | /* Skip size_size. */ |
967 | run_buf += size_size; |
968 | |
969 | if (!len) |
970 | return -EINVAL; |
971 | |
972 | if (!offset_size) |
973 | lcn = SPARSE_LCN64; |
974 | else if (offset_size <= 8) { |
975 | s64 dlcn; |
976 | |
977 | /* Initial value of dlcn is -1 or 0. */ |
978 | dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0; |
979 | dlcn = run_unpack_s64(run_buf, size: offset_size, v: dlcn); |
980 | /* Skip offset_size. */ |
981 | run_buf += offset_size; |
982 | |
983 | if (!dlcn) |
984 | return -EINVAL; |
985 | lcn = prev_lcn + dlcn; |
986 | prev_lcn = lcn; |
987 | } else |
988 | return -EINVAL; |
989 | |
990 | next_vcn = vcn64 + len; |
991 | /* Check boundary. */ |
992 | if (next_vcn > evcn + 1) |
993 | return -EINVAL; |
994 | |
995 | #ifndef CONFIG_NTFS3_64BIT_CLUSTER |
996 | if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) { |
997 | ntfs_err( |
998 | sbi->sb, |
999 | "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n" |
1000 | "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n" |
1001 | "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case" , |
1002 | vcn64, lcn, len); |
1003 | return -EOPNOTSUPP; |
1004 | } |
1005 | #endif |
1006 | if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) { |
1007 | /* LCN range is out of volume. */ |
1008 | return -EINVAL; |
1009 | } |
1010 | |
1011 | if (!run) |
1012 | ; /* Called from check_attr(fslog.c) to check run. */ |
1013 | else if (run == RUN_DEALLOCATE) { |
1014 | /* |
1015 | * Called from ni_delete_all to free clusters |
1016 | * without storing in run. |
1017 | */ |
1018 | if (lcn != SPARSE_LCN64) |
1019 | mark_as_free_ex(sbi, lcn, len, trim: true); |
1020 | } else if (vcn64 >= vcn) { |
1021 | if (!run_add_entry(run, vcn: vcn64, lcn, len, is_mft)) |
1022 | return -ENOMEM; |
1023 | } else if (next_vcn > vcn) { |
1024 | u64 dlen = vcn - vcn64; |
1025 | |
1026 | if (!run_add_entry(run, vcn, lcn: lcn + dlen, len: len - dlen, |
1027 | is_mft)) |
1028 | return -ENOMEM; |
1029 | } |
1030 | |
1031 | vcn64 = next_vcn; |
1032 | } |
1033 | |
1034 | if (vcn64 != evcn + 1) { |
1035 | /* Not expected length of unpacked runs. */ |
1036 | return -EINVAL; |
1037 | } |
1038 | |
1039 | return run_buf - run_0; |
1040 | } |
1041 | |
1042 | #ifdef NTFS3_CHECK_FREE_CLST |
1043 | /* |
1044 | * run_unpack_ex - Unpack packed runs from "run_buf". |
1045 | * |
1046 | * Checks unpacked runs to be used in bitmap. |
1047 | * |
1048 | * Return: Error if negative, or real used bytes. |
1049 | */ |
1050 | int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, |
1051 | CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, |
1052 | int run_buf_size) |
1053 | { |
1054 | int ret, err; |
1055 | CLST next_vcn, lcn, len; |
1056 | size_t index; |
1057 | bool ok; |
1058 | struct wnd_bitmap *wnd; |
1059 | |
1060 | ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); |
1061 | if (ret <= 0) |
1062 | return ret; |
1063 | |
1064 | if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE) |
1065 | return ret; |
1066 | |
1067 | if (ino == MFT_REC_BADCLUST) |
1068 | return ret; |
1069 | |
1070 | next_vcn = vcn = svcn; |
1071 | wnd = &sbi->used.bitmap; |
1072 | |
1073 | for (ok = run_lookup_entry(run, vcn, lcn: &lcn, len: &len, index: &index); |
1074 | next_vcn <= evcn; |
1075 | ok = run_get_entry(run, index: ++index, vcn: &vcn, lcn: &lcn, len: &len)) { |
1076 | if (!ok || next_vcn != vcn) |
1077 | return -EINVAL; |
1078 | |
1079 | next_vcn = vcn + len; |
1080 | |
1081 | if (lcn == SPARSE_LCN) |
1082 | continue; |
1083 | |
1084 | if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) |
1085 | continue; |
1086 | |
1087 | down_read_nested(sem: &wnd->rw_lock, subclass: BITMAP_MUTEX_CLUSTERS); |
1088 | /* Check for free blocks. */ |
1089 | ok = wnd_is_used(wnd, bit: lcn, bits: len); |
1090 | up_read(sem: &wnd->rw_lock); |
1091 | if (ok) |
1092 | continue; |
1093 | |
1094 | /* Looks like volume is corrupted. */ |
1095 | ntfs_set_state(sbi, dirty: NTFS_DIRTY_ERROR); |
1096 | |
1097 | if (down_write_trylock(sem: &wnd->rw_lock)) { |
1098 | /* Mark all zero bits as used in range [lcn, lcn+len). */ |
1099 | size_t done; |
1100 | err = wnd_set_used_safe(wnd, bit: lcn, bits: len, done: &done); |
1101 | up_write(sem: &wnd->rw_lock); |
1102 | if (err) |
1103 | return err; |
1104 | } |
1105 | } |
1106 | |
1107 | return ret; |
1108 | } |
1109 | #endif |
1110 | |
1111 | /* |
1112 | * run_get_highest_vcn |
1113 | * |
1114 | * Return the highest vcn from a mapping pairs array |
1115 | * it used while replaying log file. |
1116 | */ |
1117 | int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn) |
1118 | { |
1119 | u64 vcn64 = vcn; |
1120 | u8 size_size; |
1121 | |
1122 | while ((size_size = *run_buf & 0xF)) { |
1123 | u8 offset_size = *run_buf++ >> 4; |
1124 | u64 len; |
1125 | |
1126 | if (size_size > 8 || offset_size > 8) |
1127 | return -EINVAL; |
1128 | |
1129 | len = run_unpack_s64(run_buf, size: size_size, v: 0); |
1130 | if (!len) |
1131 | return -EINVAL; |
1132 | |
1133 | run_buf += size_size + offset_size; |
1134 | vcn64 += len; |
1135 | |
1136 | #ifndef CONFIG_NTFS3_64BIT_CLUSTER |
1137 | if (vcn64 > 0x100000000ull) |
1138 | return -EINVAL; |
1139 | #endif |
1140 | } |
1141 | |
1142 | *highest_vcn = vcn64 - 1; |
1143 | return 0; |
1144 | } |
1145 | |
1146 | /* |
1147 | * run_clone |
1148 | * |
1149 | * Make a copy of run |
1150 | */ |
1151 | int run_clone(const struct runs_tree *run, struct runs_tree *new_run) |
1152 | { |
1153 | size_t bytes = run->count * sizeof(struct ntfs_run); |
1154 | |
1155 | if (bytes > new_run->allocated) { |
1156 | struct ntfs_run *new_ptr = kvmalloc(size: bytes, GFP_KERNEL); |
1157 | |
1158 | if (!new_ptr) |
1159 | return -ENOMEM; |
1160 | |
1161 | kvfree(addr: new_run->runs); |
1162 | new_run->runs = new_ptr; |
1163 | new_run->allocated = bytes; |
1164 | } |
1165 | |
1166 | memcpy(new_run->runs, run->runs, bytes); |
1167 | new_run->count = run->count; |
1168 | return 0; |
1169 | } |
1170 | |