1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8#include <linux/fs.h>
9
10#include "debug.h"
11#include "ntfs.h"
12#include "ntfs_fs.h"
13
14static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
15 const __le16 *name, u8 name_len,
16 const u16 *upcase)
17{
18 /* First, compare the type codes. */
19 int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
20
21 if (diff)
22 return diff;
23
24 /* They have the same type code, so we have to compare the names. */
25 return ntfs_cmp_names(s1: attr_name(attr: left), l1: left->name_len, s2: name, l2: name_len,
26 upcase, bothcase: true);
27}
28
29/*
30 * mi_new_attt_id
31 *
32 * Return: Unused attribute id that is less than mrec->next_attr_id.
33 */
34static __le16 mi_new_attt_id(struct mft_inode *mi)
35{
36 u16 free_id, max_id, t16;
37 struct MFT_REC *rec = mi->mrec;
38 struct ATTRIB *attr;
39 __le16 id;
40
41 id = rec->next_attr_id;
42 free_id = le16_to_cpu(id);
43 if (free_id < 0x7FFF) {
44 rec->next_attr_id = cpu_to_le16(free_id + 1);
45 return id;
46 }
47
48 /* One record can store up to 1024/24 ~= 42 attributes. */
49 free_id = 0;
50 max_id = 0;
51
52 attr = NULL;
53
54 for (;;) {
55 attr = mi_enum_attr(mi, attr);
56 if (!attr) {
57 rec->next_attr_id = cpu_to_le16(max_id + 1);
58 mi->dirty = true;
59 return cpu_to_le16(free_id);
60 }
61
62 t16 = le16_to_cpu(attr->id);
63 if (t16 == free_id) {
64 free_id += 1;
65 attr = NULL;
66 } else if (max_id < t16)
67 max_id = t16;
68 }
69}
70
71int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
72{
73 int err;
74 struct mft_inode *m = kzalloc(size: sizeof(struct mft_inode), GFP_NOFS);
75
76 if (!m)
77 return -ENOMEM;
78
79 err = mi_init(mi: m, sbi, rno);
80 if (err) {
81 kfree(objp: m);
82 return err;
83 }
84
85 err = mi_read(mi: m, is_mft: false);
86 if (err) {
87 mi_put(mi: m);
88 return err;
89 }
90
91 *mi = m;
92 return 0;
93}
94
95void mi_put(struct mft_inode *mi)
96{
97 mi_clear(mi);
98 kfree(objp: mi);
99}
100
101int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
102{
103 mi->sbi = sbi;
104 mi->rno = rno;
105 mi->mrec = kmalloc(size: sbi->record_size, GFP_NOFS);
106 if (!mi->mrec)
107 return -ENOMEM;
108
109 return 0;
110}
111
112/*
113 * mi_read - Read MFT data.
114 */
115int mi_read(struct mft_inode *mi, bool is_mft)
116{
117 int err;
118 struct MFT_REC *rec = mi->mrec;
119 struct ntfs_sb_info *sbi = mi->sbi;
120 u32 bpr = sbi->record_size;
121 u64 vbo = (u64)mi->rno << sbi->record_bits;
122 struct ntfs_inode *mft_ni = sbi->mft.ni;
123 struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
124 struct rw_semaphore *rw_lock = NULL;
125
126 if (is_mounted(sbi)) {
127 if (!is_mft && mft_ni) {
128 rw_lock = &mft_ni->file.run_lock;
129 down_read(sem: rw_lock);
130 }
131 }
132
133 err = ntfs_read_bh(sbi, run, vbo, rhdr: &rec->rhdr, bytes: bpr, nb: &mi->nb);
134 if (rw_lock)
135 up_read(sem: rw_lock);
136 if (!err)
137 goto ok;
138
139 if (err == -E_NTFS_FIXUP) {
140 mi->dirty = true;
141 goto ok;
142 }
143
144 if (err != -ENOENT)
145 goto out;
146
147 if (rw_lock) {
148 ni_lock(ni: mft_ni);
149 down_write(sem: rw_lock);
150 }
151 err = attr_load_runs_vcn(ni: mft_ni, type: ATTR_DATA, NULL, name_len: 0, run,
152 vcn: vbo >> sbi->cluster_bits);
153 if (rw_lock) {
154 up_write(sem: rw_lock);
155 ni_unlock(ni: mft_ni);
156 }
157 if (err)
158 goto out;
159
160 if (rw_lock)
161 down_read(sem: rw_lock);
162 err = ntfs_read_bh(sbi, run, vbo, rhdr: &rec->rhdr, bytes: bpr, nb: &mi->nb);
163 if (rw_lock)
164 up_read(sem: rw_lock);
165
166 if (err == -E_NTFS_FIXUP) {
167 mi->dirty = true;
168 goto ok;
169 }
170 if (err)
171 goto out;
172
173ok:
174 /* Check field 'total' only here. */
175 if (le32_to_cpu(rec->total) != bpr) {
176 err = -EINVAL;
177 goto out;
178 }
179
180 return 0;
181
182out:
183 if (err == -E_NTFS_CORRUPT) {
184 ntfs_err(sbi->sb, "mft corrupted");
185 ntfs_set_state(sbi, dirty: NTFS_DIRTY_ERROR);
186 err = -EINVAL;
187 }
188
189 return err;
190}
191
192/*
193 * mi_enum_attr - start/continue attributes enumeration in record.
194 *
195 * NOTE: mi->mrec - memory of size sbi->record_size
196 * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
197 */
198struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
199{
200 const struct MFT_REC *rec = mi->mrec;
201 u32 used = le32_to_cpu(rec->used);
202 u32 t32, off, asize, prev_type;
203 u16 t16;
204 u64 data_size, alloc_size, tot_size;
205
206 if (!attr) {
207 u32 total = le32_to_cpu(rec->total);
208
209 off = le16_to_cpu(rec->attr_off);
210
211 if (used > total)
212 return NULL;
213
214 if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
215 !IS_ALIGNED(off, 4)) {
216 return NULL;
217 }
218
219 /* Skip non-resident records. */
220 if (!is_rec_inuse(rec))
221 return NULL;
222
223 prev_type = 0;
224 attr = Add2Ptr(rec, off);
225 } else {
226 /* Check if input attr inside record. */
227 off = PtrOffset(rec, attr);
228 if (off >= used)
229 return NULL;
230
231 asize = le32_to_cpu(attr->size);
232 if (asize < SIZEOF_RESIDENT) {
233 /* Impossible 'cause we should not return such attribute. */
234 return NULL;
235 }
236
237 /* Overflow check. */
238 if (off + asize < off)
239 return NULL;
240
241 prev_type = le32_to_cpu(attr->type);
242 attr = Add2Ptr(attr, asize);
243 off += asize;
244 }
245
246 asize = le32_to_cpu(attr->size);
247
248 /* Can we use the first field (attr->type). */
249 if (off + 8 > used) {
250 static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
251 return NULL;
252 }
253
254 if (attr->type == ATTR_END) {
255 /* End of enumeration. */
256 return NULL;
257 }
258
259 /* 0x100 is last known attribute for now. */
260 t32 = le32_to_cpu(attr->type);
261 if (!t32 || (t32 & 0xf) || (t32 > 0x100))
262 return NULL;
263
264 /* attributes in record must be ordered by type */
265 if (t32 < prev_type)
266 return NULL;
267
268 /* Check overflow and boundary. */
269 if (off + asize < off || off + asize > used)
270 return NULL;
271
272 /* Check size of attribute. */
273 if (!attr->non_res) {
274 /* Check resident fields. */
275 if (asize < SIZEOF_RESIDENT)
276 return NULL;
277
278 t16 = le16_to_cpu(attr->res.data_off);
279 if (t16 > asize)
280 return NULL;
281
282 if (le32_to_cpu(attr->res.data_size) > asize - t16)
283 return NULL;
284
285 t32 = sizeof(short) * attr->name_len;
286 if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
287 return NULL;
288
289 return attr;
290 }
291
292 /* Check nonresident fields. */
293 if (attr->non_res != 1)
294 return NULL;
295
296 t16 = le16_to_cpu(attr->nres.run_off);
297 if (t16 > asize)
298 return NULL;
299
300 t32 = sizeof(short) * attr->name_len;
301 if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
302 return NULL;
303
304 /* Check start/end vcn. */
305 if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
306 return NULL;
307
308 data_size = le64_to_cpu(attr->nres.data_size);
309 if (le64_to_cpu(attr->nres.valid_size) > data_size)
310 return NULL;
311
312 alloc_size = le64_to_cpu(attr->nres.alloc_size);
313 if (data_size > alloc_size)
314 return NULL;
315
316 t32 = mi->sbi->cluster_mask;
317 if (alloc_size & t32)
318 return NULL;
319
320 if (!attr->nres.svcn && is_attr_ext(attr)) {
321 /* First segment of sparse/compressed attribute */
322 if (asize + 8 < SIZEOF_NONRESIDENT_EX)
323 return NULL;
324
325 tot_size = le64_to_cpu(attr->nres.total_size);
326 if (tot_size & t32)
327 return NULL;
328
329 if (tot_size > alloc_size)
330 return NULL;
331 } else {
332 if (asize + 8 < SIZEOF_NONRESIDENT)
333 return NULL;
334
335 if (attr->nres.c_unit)
336 return NULL;
337 }
338
339 return attr;
340}
341
342/*
343 * mi_find_attr - Find the attribute by type and name and id.
344 */
345struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
346 enum ATTR_TYPE type, const __le16 *name,
347 u8 name_len, const __le16 *id)
348{
349 u32 type_in = le32_to_cpu(type);
350 u32 atype;
351
352next_attr:
353 attr = mi_enum_attr(mi, attr);
354 if (!attr)
355 return NULL;
356
357 atype = le32_to_cpu(attr->type);
358 if (atype > type_in)
359 return NULL;
360
361 if (atype < type_in)
362 goto next_attr;
363
364 if (attr->name_len != name_len)
365 goto next_attr;
366
367 if (name_len && memcmp(p: attr_name(attr), q: name, size: name_len * sizeof(short)))
368 goto next_attr;
369
370 if (id && *id != attr->id)
371 goto next_attr;
372
373 return attr;
374}
375
376int mi_write(struct mft_inode *mi, int wait)
377{
378 struct MFT_REC *rec;
379 int err;
380 struct ntfs_sb_info *sbi;
381
382 if (!mi->dirty)
383 return 0;
384
385 sbi = mi->sbi;
386 rec = mi->mrec;
387
388 err = ntfs_write_bh(sbi, rhdr: &rec->rhdr, nb: &mi->nb, sync: wait);
389 if (err)
390 return err;
391
392 if (mi->rno < sbi->mft.recs_mirr)
393 sbi->flags |= NTFS_FLAGS_MFTMIRR;
394
395 mi->dirty = false;
396
397 return 0;
398}
399
400int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
401 __le16 flags, bool is_mft)
402{
403 int err;
404 u16 seq = 1;
405 struct MFT_REC *rec;
406 u64 vbo = (u64)rno << sbi->record_bits;
407
408 err = mi_init(mi, sbi, rno);
409 if (err)
410 return err;
411
412 rec = mi->mrec;
413
414 if (rno == MFT_REC_MFT) {
415 ;
416 } else if (rno < MFT_REC_FREE) {
417 seq = rno;
418 } else if (rno >= sbi->mft.used) {
419 ;
420 } else if (mi_read(mi, is_mft)) {
421 ;
422 } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
423 /* Record is reused. Update its sequence number. */
424 seq = le16_to_cpu(rec->seq) + 1;
425 if (!seq)
426 seq = 1;
427 }
428
429 memcpy(rec, sbi->new_rec, sbi->record_size);
430
431 rec->seq = cpu_to_le16(seq);
432 rec->flags = RECORD_FLAG_IN_USE | flags;
433 if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
434 rec->mft_record = cpu_to_le32(rno);
435
436 mi->dirty = true;
437
438 if (!mi->nb.nbufs) {
439 struct ntfs_inode *ni = sbi->mft.ni;
440 bool lock = false;
441
442 if (is_mounted(sbi) && !is_mft) {
443 down_read(sem: &ni->file.run_lock);
444 lock = true;
445 }
446
447 err = ntfs_get_bh(sbi, run: &ni->file.run, vbo, bytes: sbi->record_size,
448 nb: &mi->nb);
449 if (lock)
450 up_read(sem: &ni->file.run_lock);
451 }
452
453 return err;
454}
455
456/*
457 * mi_insert_attr - Reserve space for new attribute.
458 *
459 * Return: Not full constructed attribute or NULL if not possible to create.
460 */
461struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
462 const __le16 *name, u8 name_len, u32 asize,
463 u16 name_off)
464{
465 size_t tail;
466 struct ATTRIB *attr;
467 __le16 id;
468 struct MFT_REC *rec = mi->mrec;
469 struct ntfs_sb_info *sbi = mi->sbi;
470 u32 used = le32_to_cpu(rec->used);
471 const u16 *upcase = sbi->upcase;
472
473 /* Can we insert mi attribute? */
474 if (used + asize > sbi->record_size)
475 return NULL;
476
477 /*
478 * Scan through the list of attributes to find the point
479 * at which we should insert it.
480 */
481 attr = NULL;
482 while ((attr = mi_enum_attr(mi, attr))) {
483 int diff = compare_attr(left: attr, type, name, name_len, upcase);
484
485 if (diff < 0)
486 continue;
487
488 if (!diff && !is_attr_indexed(attr))
489 return NULL;
490 break;
491 }
492
493 if (!attr) {
494 /* Append. */
495 tail = 8;
496 attr = Add2Ptr(rec, used - 8);
497 } else {
498 /* Insert before 'attr'. */
499 tail = used - PtrOffset(rec, attr);
500 }
501
502 id = mi_new_attt_id(mi);
503
504 memmove(Add2Ptr(attr, asize), attr, tail);
505 memset(attr, 0, asize);
506
507 attr->type = type;
508 attr->size = cpu_to_le32(asize);
509 attr->name_len = name_len;
510 attr->name_off = cpu_to_le16(name_off);
511 attr->id = id;
512
513 memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
514 rec->used = cpu_to_le32(used + asize);
515
516 mi->dirty = true;
517
518 return attr;
519}
520
521/*
522 * mi_remove_attr - Remove the attribute from record.
523 *
524 * NOTE: The source attr will point to next attribute.
525 */
526bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
527 struct ATTRIB *attr)
528{
529 struct MFT_REC *rec = mi->mrec;
530 u32 aoff = PtrOffset(rec, attr);
531 u32 used = le32_to_cpu(rec->used);
532 u32 asize = le32_to_cpu(attr->size);
533
534 if (aoff + asize > used)
535 return false;
536
537 if (ni && is_attr_indexed(attr)) {
538 u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
539 struct ATTR_FILE_NAME *fname =
540 attr->type != ATTR_NAME ?
541 NULL :
542 resident_data_ex(attr,
543 SIZEOF_ATTRIBUTE_FILENAME);
544 if (fname && fname->type == FILE_NAME_DOS) {
545 /* Do not decrease links count deleting DOS name. */
546 } else if (!links) {
547 /* minor error. Not critical. */
548 } else {
549 ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
550 ni->mi.dirty = true;
551 }
552 }
553
554 used -= asize;
555 memmove(attr, Add2Ptr(attr, asize), used - aoff);
556 rec->used = cpu_to_le32(used);
557 mi->dirty = true;
558
559 return true;
560}
561
562/* bytes = "new attribute size" - "old attribute size" */
563bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
564{
565 struct MFT_REC *rec = mi->mrec;
566 u32 aoff = PtrOffset(rec, attr);
567 u32 total, used = le32_to_cpu(rec->used);
568 u32 nsize, asize = le32_to_cpu(attr->size);
569 u32 rsize = le32_to_cpu(attr->res.data_size);
570 int tail = (int)(used - aoff - asize);
571 int dsize;
572 char *next;
573
574 if (tail < 0 || aoff >= used)
575 return false;
576
577 if (!bytes)
578 return true;
579
580 total = le32_to_cpu(rec->total);
581 next = Add2Ptr(attr, asize);
582
583 if (bytes > 0) {
584 dsize = ALIGN(bytes, 8);
585 if (used + dsize > total)
586 return false;
587 nsize = asize + dsize;
588 /* Move tail */
589 memmove(next + dsize, next, tail);
590 memset(next, 0, dsize);
591 used += dsize;
592 rsize += dsize;
593 } else {
594 dsize = ALIGN(-bytes, 8);
595 if (dsize > asize)
596 return false;
597 nsize = asize - dsize;
598 memmove(next - dsize, next, tail);
599 used -= dsize;
600 rsize -= dsize;
601 }
602
603 rec->used = cpu_to_le32(used);
604 attr->size = cpu_to_le32(nsize);
605 if (!attr->non_res)
606 attr->res.data_size = cpu_to_le32(rsize);
607 mi->dirty = true;
608
609 return true;
610}
611
612/*
613 * Pack runs in MFT record.
614 * If failed record is not changed.
615 */
616int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
617 struct runs_tree *run, CLST len)
618{
619 int err = 0;
620 struct ntfs_sb_info *sbi = mi->sbi;
621 u32 new_run_size;
622 CLST plen;
623 struct MFT_REC *rec = mi->mrec;
624 CLST svcn = le64_to_cpu(attr->nres.svcn);
625 u32 used = le32_to_cpu(rec->used);
626 u32 aoff = PtrOffset(rec, attr);
627 u32 asize = le32_to_cpu(attr->size);
628 char *next = Add2Ptr(attr, asize);
629 u16 run_off = le16_to_cpu(attr->nres.run_off);
630 u32 run_size = asize - run_off;
631 u32 tail = used - aoff - asize;
632 u32 dsize = sbi->record_size - used;
633
634 /* Make a maximum gap in current record. */
635 memmove(next + dsize, next, tail);
636
637 /* Pack as much as possible. */
638 err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_buf_size: run_size + dsize,
639 packed_vcns: &plen);
640 if (err < 0) {
641 memmove(next, next + dsize, tail);
642 return err;
643 }
644
645 new_run_size = ALIGN(err, 8);
646
647 memmove(next + new_run_size - run_size, next + dsize, tail);
648
649 attr->size = cpu_to_le32(asize + new_run_size - run_size);
650 attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
651 rec->used = cpu_to_le32(used + new_run_size - run_size);
652 mi->dirty = true;
653
654 return 0;
655}
656

source code of linux/fs/ntfs3/record.c