1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include "bcachefs.h" |
3 | #include "alloc_background.h" |
4 | #include "alloc_foreground.h" |
5 | #include "backpointers.h" |
6 | #include "btree_cache.h" |
7 | #include "btree_io.h" |
8 | #include "btree_key_cache.h" |
9 | #include "btree_update.h" |
10 | #include "btree_update_interior.h" |
11 | #include "btree_gc.h" |
12 | #include "btree_write_buffer.h" |
13 | #include "buckets.h" |
14 | #include "buckets_waiting_for_journal.h" |
15 | #include "clock.h" |
16 | #include "debug.h" |
17 | #include "ec.h" |
18 | #include "error.h" |
19 | #include "lru.h" |
20 | #include "recovery.h" |
21 | #include "trace.h" |
22 | #include "varint.h" |
23 | |
24 | #include <linux/kthread.h> |
25 | #include <linux/math64.h> |
26 | #include <linux/random.h> |
27 | #include <linux/rculist.h> |
28 | #include <linux/rcupdate.h> |
29 | #include <linux/sched/task.h> |
30 | #include <linux/sort.h> |
31 | |
32 | static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket); |
33 | |
34 | /* Persistent alloc info: */ |
35 | |
36 | static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { |
37 | #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8, |
38 | BCH_ALLOC_FIELDS_V1() |
39 | #undef x |
40 | }; |
41 | |
42 | struct bkey_alloc_unpacked { |
43 | u64 journal_seq; |
44 | u8 gen; |
45 | u8 oldest_gen; |
46 | u8 data_type; |
47 | bool need_discard:1; |
48 | bool need_inc_gen:1; |
49 | #define x(_name, _bits) u##_bits _name; |
50 | BCH_ALLOC_FIELDS_V2() |
51 | #undef x |
52 | }; |
53 | |
54 | static inline u64 alloc_field_v1_get(const struct bch_alloc *a, |
55 | const void **p, unsigned field) |
56 | { |
57 | unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field]; |
58 | u64 v; |
59 | |
60 | if (!(a->fields & (1 << field))) |
61 | return 0; |
62 | |
63 | switch (bytes) { |
64 | case 1: |
65 | v = *((const u8 *) *p); |
66 | break; |
67 | case 2: |
68 | v = le16_to_cpup(p: *p); |
69 | break; |
70 | case 4: |
71 | v = le32_to_cpup(p: *p); |
72 | break; |
73 | case 8: |
74 | v = le64_to_cpup(p: *p); |
75 | break; |
76 | default: |
77 | BUG(); |
78 | } |
79 | |
80 | *p += bytes; |
81 | return v; |
82 | } |
83 | |
84 | static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out, |
85 | struct bkey_s_c k) |
86 | { |
87 | const struct bch_alloc *in = bkey_s_c_to_alloc(k).v; |
88 | const void *d = in->data; |
89 | unsigned idx = 0; |
90 | |
91 | out->gen = in->gen; |
92 | |
93 | #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++); |
94 | BCH_ALLOC_FIELDS_V1() |
95 | #undef x |
96 | } |
97 | |
98 | static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out, |
99 | struct bkey_s_c k) |
100 | { |
101 | struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k); |
102 | const u8 *in = a.v->data; |
103 | const u8 *end = bkey_val_end(a); |
104 | unsigned fieldnr = 0; |
105 | int ret; |
106 | u64 v; |
107 | |
108 | out->gen = a.v->gen; |
109 | out->oldest_gen = a.v->oldest_gen; |
110 | out->data_type = a.v->data_type; |
111 | |
112 | #define x(_name, _bits) \ |
113 | if (fieldnr < a.v->nr_fields) { \ |
114 | ret = bch2_varint_decode_fast(in, end, &v); \ |
115 | if (ret < 0) \ |
116 | return ret; \ |
117 | in += ret; \ |
118 | } else { \ |
119 | v = 0; \ |
120 | } \ |
121 | out->_name = v; \ |
122 | if (v != out->_name) \ |
123 | return -1; \ |
124 | fieldnr++; |
125 | |
126 | BCH_ALLOC_FIELDS_V2() |
127 | #undef x |
128 | return 0; |
129 | } |
130 | |
131 | static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out, |
132 | struct bkey_s_c k) |
133 | { |
134 | struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k); |
135 | const u8 *in = a.v->data; |
136 | const u8 *end = bkey_val_end(a); |
137 | unsigned fieldnr = 0; |
138 | int ret; |
139 | u64 v; |
140 | |
141 | out->gen = a.v->gen; |
142 | out->oldest_gen = a.v->oldest_gen; |
143 | out->data_type = a.v->data_type; |
144 | out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(k: a.v); |
145 | out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(k: a.v); |
146 | out->journal_seq = le64_to_cpu(a.v->journal_seq); |
147 | |
148 | #define x(_name, _bits) \ |
149 | if (fieldnr < a.v->nr_fields) { \ |
150 | ret = bch2_varint_decode_fast(in, end, &v); \ |
151 | if (ret < 0) \ |
152 | return ret; \ |
153 | in += ret; \ |
154 | } else { \ |
155 | v = 0; \ |
156 | } \ |
157 | out->_name = v; \ |
158 | if (v != out->_name) \ |
159 | return -1; \ |
160 | fieldnr++; |
161 | |
162 | BCH_ALLOC_FIELDS_V2() |
163 | #undef x |
164 | return 0; |
165 | } |
166 | |
167 | static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) |
168 | { |
169 | struct bkey_alloc_unpacked ret = { .gen = 0 }; |
170 | |
171 | switch (k.k->type) { |
172 | case KEY_TYPE_alloc: |
173 | bch2_alloc_unpack_v1(out: &ret, k); |
174 | break; |
175 | case KEY_TYPE_alloc_v2: |
176 | bch2_alloc_unpack_v2(out: &ret, k); |
177 | break; |
178 | case KEY_TYPE_alloc_v3: |
179 | bch2_alloc_unpack_v3(out: &ret, k); |
180 | break; |
181 | } |
182 | |
183 | return ret; |
184 | } |
185 | |
186 | static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) |
187 | { |
188 | unsigned i, bytes = offsetof(struct bch_alloc, data); |
189 | |
190 | for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++) |
191 | if (a->fields & (1 << i)) |
192 | bytes += BCH_ALLOC_V1_FIELD_BYTES[i]; |
193 | |
194 | return DIV_ROUND_UP(bytes, sizeof(u64)); |
195 | } |
196 | |
197 | int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k, |
198 | enum bkey_invalid_flags flags, |
199 | struct printbuf *err) |
200 | { |
201 | struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); |
202 | int ret = 0; |
203 | |
204 | /* allow for unknown fields */ |
205 | bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err, |
206 | alloc_v1_val_size_bad, |
207 | "incorrect value size (%zu < %u)" , |
208 | bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); |
209 | fsck_err: |
210 | return ret; |
211 | } |
212 | |
213 | int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k, |
214 | enum bkey_invalid_flags flags, |
215 | struct printbuf *err) |
216 | { |
217 | struct bkey_alloc_unpacked u; |
218 | int ret = 0; |
219 | |
220 | bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err, |
221 | alloc_v2_unpack_error, |
222 | "unpack error" ); |
223 | fsck_err: |
224 | return ret; |
225 | } |
226 | |
227 | int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k, |
228 | enum bkey_invalid_flags flags, |
229 | struct printbuf *err) |
230 | { |
231 | struct bkey_alloc_unpacked u; |
232 | int ret = 0; |
233 | |
234 | bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err, |
235 | alloc_v2_unpack_error, |
236 | "unpack error" ); |
237 | fsck_err: |
238 | return ret; |
239 | } |
240 | |
241 | int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k, |
242 | enum bkey_invalid_flags flags, struct printbuf *err) |
243 | { |
244 | struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); |
245 | int ret = 0; |
246 | |
247 | bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err, |
248 | alloc_v4_val_size_bad, |
249 | "bad val size (%u > %zu)" , |
250 | alloc_v4_u64s(a.v), bkey_val_u64s(k.k)); |
251 | |
252 | bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) && |
253 | BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err, |
254 | alloc_v4_backpointers_start_bad, |
255 | "invalid backpointers_start" ); |
256 | |
257 | bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err, |
258 | alloc_key_data_type_bad, |
259 | "invalid data type (got %u should be %u)" , |
260 | a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); |
261 | |
262 | switch (a.v->data_type) { |
263 | case BCH_DATA_free: |
264 | case BCH_DATA_need_gc_gens: |
265 | case BCH_DATA_need_discard: |
266 | bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe, |
267 | c, err, alloc_key_empty_but_have_data, |
268 | "empty data type free but have data" ); |
269 | break; |
270 | case BCH_DATA_sb: |
271 | case BCH_DATA_journal: |
272 | case BCH_DATA_btree: |
273 | case BCH_DATA_user: |
274 | case BCH_DATA_parity: |
275 | bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v), |
276 | c, err, alloc_key_dirty_sectors_0, |
277 | "data_type %s but dirty_sectors==0" , |
278 | bch2_data_type_str(a.v->data_type)); |
279 | break; |
280 | case BCH_DATA_cached: |
281 | bkey_fsck_err_on(!a.v->cached_sectors || |
282 | bch2_bucket_sectors_dirty(*a.v) || |
283 | a.v->stripe, |
284 | c, err, alloc_key_cached_inconsistency, |
285 | "data type inconsistency" ); |
286 | |
287 | bkey_fsck_err_on(!a.v->io_time[READ] && |
288 | c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, |
289 | c, err, alloc_key_cached_but_read_time_zero, |
290 | "cached bucket with read_time == 0" ); |
291 | break; |
292 | case BCH_DATA_stripe: |
293 | break; |
294 | } |
295 | fsck_err: |
296 | return ret; |
297 | } |
298 | |
299 | void bch2_alloc_v4_swab(struct bkey_s k) |
300 | { |
301 | struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; |
302 | struct bch_backpointer *bp, *bps; |
303 | |
304 | a->journal_seq = swab64(a->journal_seq); |
305 | a->flags = swab32(a->flags); |
306 | a->dirty_sectors = swab32(a->dirty_sectors); |
307 | a->cached_sectors = swab32(a->cached_sectors); |
308 | a->io_time[0] = swab64(a->io_time[0]); |
309 | a->io_time[1] = swab64(a->io_time[1]); |
310 | a->stripe = swab32(a->stripe); |
311 | a->nr_external_backpointers = swab32(a->nr_external_backpointers); |
312 | a->fragmentation_lru = swab64(a->fragmentation_lru); |
313 | |
314 | bps = alloc_v4_backpointers(a); |
315 | for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(k: a); bp++) { |
316 | bp->bucket_offset = swab40(x: bp->bucket_offset); |
317 | bp->bucket_len = swab32(bp->bucket_len); |
318 | bch2_bpos_swab(&bp->pos); |
319 | } |
320 | } |
321 | |
322 | void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) |
323 | { |
324 | struct bch_alloc_v4 _a; |
325 | const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, convert: &_a); |
326 | |
327 | prt_newline(out); |
328 | printbuf_indent_add(out, 2); |
329 | |
330 | prt_printf(out, "gen %u oldest_gen %u data_type " , a->gen, a->oldest_gen); |
331 | bch2_prt_data_type(out, a->data_type); |
332 | prt_newline(out); |
333 | prt_printf(out, "journal_seq %llu" , a->journal_seq); |
334 | prt_newline(out); |
335 | prt_printf(out, "need_discard %llu" , BCH_ALLOC_V4_NEED_DISCARD(a)); |
336 | prt_newline(out); |
337 | prt_printf(out, "need_inc_gen %llu" , BCH_ALLOC_V4_NEED_INC_GEN(a)); |
338 | prt_newline(out); |
339 | prt_printf(out, "dirty_sectors %u" , a->dirty_sectors); |
340 | prt_newline(out); |
341 | prt_printf(out, "cached_sectors %u" , a->cached_sectors); |
342 | prt_newline(out); |
343 | prt_printf(out, "stripe %u" , a->stripe); |
344 | prt_newline(out); |
345 | prt_printf(out, "stripe_redundancy %u" , a->stripe_redundancy); |
346 | prt_newline(out); |
347 | prt_printf(out, "io_time[READ] %llu" , a->io_time[READ]); |
348 | prt_newline(out); |
349 | prt_printf(out, "io_time[WRITE] %llu" , a->io_time[WRITE]); |
350 | prt_newline(out); |
351 | prt_printf(out, "fragmentation %llu" , a->fragmentation_lru); |
352 | prt_newline(out); |
353 | prt_printf(out, "bp_start %llu" , BCH_ALLOC_V4_BACKPOINTERS_START(a)); |
354 | printbuf_indent_sub(out, 2); |
355 | } |
356 | |
357 | void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) |
358 | { |
359 | if (k.k->type == KEY_TYPE_alloc_v4) { |
360 | void *src, *dst; |
361 | |
362 | *out = *bkey_s_c_to_alloc_v4(k).v; |
363 | |
364 | src = alloc_v4_backpointers(a: out); |
365 | SET_BCH_ALLOC_V4_BACKPOINTERS_START(k: out, BCH_ALLOC_V4_U64s); |
366 | dst = alloc_v4_backpointers(a: out); |
367 | |
368 | if (src < dst) |
369 | memset(src, 0, dst - src); |
370 | |
371 | SET_BCH_ALLOC_V4_NR_BACKPOINTERS(k: out, v: 0); |
372 | } else { |
373 | struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); |
374 | |
375 | *out = (struct bch_alloc_v4) { |
376 | .journal_seq = u.journal_seq, |
377 | .flags = u.need_discard, |
378 | .gen = u.gen, |
379 | .oldest_gen = u.oldest_gen, |
380 | .data_type = u.data_type, |
381 | .stripe_redundancy = u.stripe_redundancy, |
382 | .dirty_sectors = u.dirty_sectors, |
383 | .cached_sectors = u.cached_sectors, |
384 | .io_time[READ] = u.read_time, |
385 | .io_time[WRITE] = u.write_time, |
386 | .stripe = u.stripe, |
387 | }; |
388 | |
389 | SET_BCH_ALLOC_V4_BACKPOINTERS_START(k: out, BCH_ALLOC_V4_U64s); |
390 | } |
391 | } |
392 | |
393 | static noinline struct bkey_i_alloc_v4 * |
394 | __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) |
395 | { |
396 | struct bkey_i_alloc_v4 *ret; |
397 | |
398 | ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4))); |
399 | if (IS_ERR(ptr: ret)) |
400 | return ret; |
401 | |
402 | if (k.k->type == KEY_TYPE_alloc_v4) { |
403 | void *src, *dst; |
404 | |
405 | bkey_reassemble(dst: &ret->k_i, src: k); |
406 | |
407 | src = alloc_v4_backpointers(a: &ret->v); |
408 | SET_BCH_ALLOC_V4_BACKPOINTERS_START(k: &ret->v, BCH_ALLOC_V4_U64s); |
409 | dst = alloc_v4_backpointers(a: &ret->v); |
410 | |
411 | if (src < dst) |
412 | memset(src, 0, dst - src); |
413 | |
414 | SET_BCH_ALLOC_V4_NR_BACKPOINTERS(k: &ret->v, v: 0); |
415 | set_alloc_v4_u64s(ret); |
416 | } else { |
417 | bkey_alloc_v4_init(k: &ret->k_i); |
418 | ret->k.p = k.k->p; |
419 | bch2_alloc_to_v4(k, convert: &ret->v); |
420 | } |
421 | return ret; |
422 | } |
423 | |
424 | static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k) |
425 | { |
426 | struct bkey_s_c_alloc_v4 a; |
427 | |
428 | if (likely(k.k->type == KEY_TYPE_alloc_v4) && |
429 | ((a = bkey_s_c_to_alloc_v4(k), true) && |
430 | BCH_ALLOC_V4_NR_BACKPOINTERS(k: a.v) == 0)) |
431 | return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4); |
432 | |
433 | return __bch2_alloc_to_v4_mut(trans, k); |
434 | } |
435 | |
436 | struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) |
437 | { |
438 | return bch2_alloc_to_v4_mut_inlined(trans, k); |
439 | } |
440 | |
441 | struct bkey_i_alloc_v4 * |
442 | bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter, |
443 | struct bpos pos) |
444 | { |
445 | struct bkey_s_c k; |
446 | struct bkey_i_alloc_v4 *a; |
447 | int ret; |
448 | |
449 | k = bch2_bkey_get_iter(trans, iter, btree_id: BTREE_ID_alloc, pos, |
450 | flags: BTREE_ITER_WITH_UPDATES| |
451 | BTREE_ITER_CACHED| |
452 | BTREE_ITER_INTENT); |
453 | ret = bkey_err(k); |
454 | if (unlikely(ret)) |
455 | return ERR_PTR(error: ret); |
456 | |
457 | a = bch2_alloc_to_v4_mut_inlined(trans, k); |
458 | ret = PTR_ERR_OR_ZERO(ptr: a); |
459 | if (unlikely(ret)) |
460 | goto err; |
461 | return a; |
462 | err: |
463 | bch2_trans_iter_exit(trans, iter); |
464 | return ERR_PTR(error: ret); |
465 | } |
466 | |
467 | static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset) |
468 | { |
469 | *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK; |
470 | |
471 | pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS; |
472 | return pos; |
473 | } |
474 | |
475 | static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset) |
476 | { |
477 | pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS; |
478 | pos.offset += offset; |
479 | return pos; |
480 | } |
481 | |
482 | static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) |
483 | { |
484 | return k.k->type == KEY_TYPE_bucket_gens |
485 | ? bkey_s_c_to_bucket_gens(k).v->gens[offset] |
486 | : 0; |
487 | } |
488 | |
489 | int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k, |
490 | enum bkey_invalid_flags flags, |
491 | struct printbuf *err) |
492 | { |
493 | int ret = 0; |
494 | |
495 | bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err, |
496 | bucket_gens_val_size_bad, |
497 | "bad val size (%zu != %zu)" , |
498 | bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); |
499 | fsck_err: |
500 | return ret; |
501 | } |
502 | |
503 | void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) |
504 | { |
505 | struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k); |
506 | unsigned i; |
507 | |
508 | for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) { |
509 | if (i) |
510 | prt_char(out, c: ' '); |
511 | prt_printf(out, "%u" , g.v->gens[i]); |
512 | } |
513 | } |
514 | |
515 | int bch2_bucket_gens_init(struct bch_fs *c) |
516 | { |
517 | struct btree_trans *trans = bch2_trans_get(c); |
518 | struct bkey_i_bucket_gens g; |
519 | bool have_bucket_gens_key = false; |
520 | int ret; |
521 | |
522 | ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, |
523 | BTREE_ITER_PREFETCH, k, ({ |
524 | /* |
525 | * Not a fsck error because this is checked/repaired by |
526 | * bch2_check_alloc_key() which runs later: |
527 | */ |
528 | if (!bch2_dev_bucket_exists(c, k.k->p)) |
529 | continue; |
530 | |
531 | struct bch_alloc_v4 a; |
532 | u8 gen = bch2_alloc_to_v4(k, &a)->gen; |
533 | unsigned offset; |
534 | struct bpos pos = alloc_gens_pos(iter.pos, &offset); |
535 | int ret2 = 0; |
536 | |
537 | if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) { |
538 | ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?: |
539 | bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); |
540 | if (ret2) |
541 | goto iter_err; |
542 | have_bucket_gens_key = false; |
543 | } |
544 | |
545 | if (!have_bucket_gens_key) { |
546 | bkey_bucket_gens_init(&g.k_i); |
547 | g.k.p = pos; |
548 | have_bucket_gens_key = true; |
549 | } |
550 | |
551 | g.v.gens[offset] = gen; |
552 | iter_err: |
553 | ret2; |
554 | })); |
555 | |
556 | if (have_bucket_gens_key && !ret) |
557 | ret = commit_do(trans, NULL, NULL, |
558 | BCH_TRANS_COMMIT_no_enospc, |
559 | bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); |
560 | |
561 | bch2_trans_put(trans); |
562 | |
563 | bch_err_fn(c, ret); |
564 | return ret; |
565 | } |
566 | |
567 | int bch2_alloc_read(struct bch_fs *c) |
568 | { |
569 | struct btree_trans *trans = bch2_trans_get(c); |
570 | int ret; |
571 | |
572 | down_read(sem: &c->gc_lock); |
573 | |
574 | if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { |
575 | ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN, |
576 | BTREE_ITER_PREFETCH, k, ({ |
577 | u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; |
578 | u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; |
579 | |
580 | if (k.k->type != KEY_TYPE_bucket_gens) |
581 | continue; |
582 | |
583 | const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v; |
584 | |
585 | /* |
586 | * Not a fsck error because this is checked/repaired by |
587 | * bch2_check_alloc_key() which runs later: |
588 | */ |
589 | if (!bch2_dev_exists2(c, k.k->p.inode)) |
590 | continue; |
591 | |
592 | struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode); |
593 | |
594 | for (u64 b = max_t(u64, ca->mi.first_bucket, start); |
595 | b < min_t(u64, ca->mi.nbuckets, end); |
596 | b++) |
597 | *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; |
598 | 0; |
599 | })); |
600 | } else { |
601 | ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, |
602 | BTREE_ITER_PREFETCH, k, ({ |
603 | /* |
604 | * Not a fsck error because this is checked/repaired by |
605 | * bch2_check_alloc_key() which runs later: |
606 | */ |
607 | if (!bch2_dev_bucket_exists(c, k.k->p)) |
608 | continue; |
609 | |
610 | struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode); |
611 | |
612 | struct bch_alloc_v4 a; |
613 | *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; |
614 | 0; |
615 | })); |
616 | } |
617 | |
618 | bch2_trans_put(trans); |
619 | up_read(sem: &c->gc_lock); |
620 | |
621 | bch_err_fn(c, ret); |
622 | return ret; |
623 | } |
624 | |
625 | /* Free space/discard btree: */ |
626 | |
627 | static int bch2_bucket_do_index(struct btree_trans *trans, |
628 | struct bkey_s_c alloc_k, |
629 | const struct bch_alloc_v4 *a, |
630 | bool set) |
631 | { |
632 | struct bch_fs *c = trans->c; |
633 | struct bch_dev *ca = bch_dev_bkey_exists(c, idx: alloc_k.k->p.inode); |
634 | struct btree_iter iter; |
635 | struct bkey_s_c old; |
636 | struct bkey_i *k; |
637 | enum btree_id btree; |
638 | enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted; |
639 | enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted; |
640 | struct printbuf buf = PRINTBUF; |
641 | int ret; |
642 | |
643 | if (a->data_type != BCH_DATA_free && |
644 | a->data_type != BCH_DATA_need_discard) |
645 | return 0; |
646 | |
647 | k = bch2_trans_kmalloc_nomemzero(trans, size: sizeof(*k)); |
648 | if (IS_ERR(ptr: k)) |
649 | return PTR_ERR(ptr: k); |
650 | |
651 | bkey_init(k: &k->k); |
652 | k->k.type = new_type; |
653 | |
654 | switch (a->data_type) { |
655 | case BCH_DATA_free: |
656 | btree = BTREE_ID_freespace; |
657 | k->k.p = alloc_freespace_pos(pos: alloc_k.k->p, a: *a); |
658 | bch2_key_resize(k: &k->k, new_size: 1); |
659 | break; |
660 | case BCH_DATA_need_discard: |
661 | btree = BTREE_ID_need_discard; |
662 | k->k.p = alloc_k.k->p; |
663 | break; |
664 | default: |
665 | return 0; |
666 | } |
667 | |
668 | old = bch2_bkey_get_iter(trans, iter: &iter, btree_id: btree, |
669 | pos: bkey_start_pos(k: &k->k), |
670 | flags: BTREE_ITER_INTENT); |
671 | ret = bkey_err(old); |
672 | if (ret) |
673 | return ret; |
674 | |
675 | if (ca->mi.freespace_initialized && |
676 | c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info && |
677 | bch2_trans_inconsistent_on(old.k->type != old_type, trans, |
678 | "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n" |
679 | " for %s" , |
680 | set ? "setting" : "clearing" , |
681 | bch2_btree_id_str(btree), |
682 | iter.pos.inode, |
683 | iter.pos.offset, |
684 | bch2_bkey_types[old.k->type], |
685 | bch2_bkey_types[old_type], |
686 | (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { |
687 | ret = -EIO; |
688 | goto err; |
689 | } |
690 | |
691 | ret = bch2_trans_update(trans, &iter, k, 0); |
692 | err: |
693 | bch2_trans_iter_exit(trans, &iter); |
694 | printbuf_exit(&buf); |
695 | return ret; |
696 | } |
697 | |
698 | static noinline int bch2_bucket_gen_update(struct btree_trans *trans, |
699 | struct bpos bucket, u8 gen) |
700 | { |
701 | struct btree_iter iter; |
702 | unsigned offset; |
703 | struct bpos pos = alloc_gens_pos(pos: bucket, offset: &offset); |
704 | struct bkey_i_bucket_gens *g; |
705 | struct bkey_s_c k; |
706 | int ret; |
707 | |
708 | g = bch2_trans_kmalloc(trans, size: sizeof(*g)); |
709 | ret = PTR_ERR_OR_ZERO(ptr: g); |
710 | if (ret) |
711 | return ret; |
712 | |
713 | k = bch2_bkey_get_iter(trans, iter: &iter, btree_id: BTREE_ID_bucket_gens, pos, |
714 | flags: BTREE_ITER_INTENT| |
715 | BTREE_ITER_WITH_UPDATES); |
716 | ret = bkey_err(k); |
717 | if (ret) |
718 | return ret; |
719 | |
720 | if (k.k->type != KEY_TYPE_bucket_gens) { |
721 | bkey_bucket_gens_init(k: &g->k_i); |
722 | g->k.p = iter.pos; |
723 | } else { |
724 | bkey_reassemble(dst: &g->k_i, src: k); |
725 | } |
726 | |
727 | g->v.gens[offset] = gen; |
728 | |
729 | ret = bch2_trans_update(trans, &iter, &g->k_i, 0); |
730 | bch2_trans_iter_exit(trans, &iter); |
731 | return ret; |
732 | } |
733 | |
734 | int bch2_trigger_alloc(struct btree_trans *trans, |
735 | enum btree_id btree, unsigned level, |
736 | struct bkey_s_c old, struct bkey_s new, |
737 | unsigned flags) |
738 | { |
739 | struct bch_fs *c = trans->c; |
740 | int ret = 0; |
741 | |
742 | if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans, |
743 | "alloc key for invalid device or bucket" )) |
744 | return -EIO; |
745 | |
746 | struct bch_dev *ca = bch_dev_bkey_exists(c, idx: new.k->p.inode); |
747 | |
748 | struct bch_alloc_v4 old_a_convert; |
749 | const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(k: old, convert: &old_a_convert); |
750 | |
751 | if (flags & BTREE_TRIGGER_TRANSACTIONAL) { |
752 | struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(k: new).v; |
753 | |
754 | new_a->data_type = alloc_data_type(a: *new_a, data_type: new_a->data_type); |
755 | |
756 | if (bch2_bucket_sectors(a: *new_a) > bch2_bucket_sectors(a: *old_a)) { |
757 | new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); |
758 | new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now)); |
759 | SET_BCH_ALLOC_V4_NEED_INC_GEN(k: new_a, v: true); |
760 | SET_BCH_ALLOC_V4_NEED_DISCARD(k: new_a, v: true); |
761 | } |
762 | |
763 | if (data_type_is_empty(type: new_a->data_type) && |
764 | BCH_ALLOC_V4_NEED_INC_GEN(k: new_a) && |
765 | !bch2_bucket_is_open_safe(c, dev: new.k->p.inode, bucket: new.k->p.offset)) { |
766 | new_a->gen++; |
767 | SET_BCH_ALLOC_V4_NEED_INC_GEN(k: new_a, v: false); |
768 | } |
769 | |
770 | if (old_a->data_type != new_a->data_type || |
771 | (new_a->data_type == BCH_DATA_free && |
772 | alloc_freespace_genbits(a: *old_a) != alloc_freespace_genbits(a: *new_a))) { |
773 | ret = bch2_bucket_do_index(trans, alloc_k: old, a: old_a, set: false) ?: |
774 | bch2_bucket_do_index(trans, alloc_k: new.s_c, a: new_a, set: true); |
775 | if (ret) |
776 | return ret; |
777 | } |
778 | |
779 | if (new_a->data_type == BCH_DATA_cached && |
780 | !new_a->io_time[READ]) |
781 | new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); |
782 | |
783 | u64 old_lru = alloc_lru_idx_read(a: *old_a); |
784 | u64 new_lru = alloc_lru_idx_read(a: *new_a); |
785 | if (old_lru != new_lru) { |
786 | ret = bch2_lru_change(trans, new.k->p.inode, |
787 | bucket_to_u64(bucket: new.k->p), |
788 | old_lru, new_lru); |
789 | if (ret) |
790 | return ret; |
791 | } |
792 | |
793 | new_a->fragmentation_lru = alloc_lru_idx_fragmentation(a: *new_a, |
794 | ca: bch_dev_bkey_exists(c, idx: new.k->p.inode)); |
795 | if (old_a->fragmentation_lru != new_a->fragmentation_lru) { |
796 | ret = bch2_lru_change(trans, |
797 | BCH_LRU_FRAGMENTATION_START, |
798 | bucket_to_u64(bucket: new.k->p), |
799 | old_a->fragmentation_lru, new_a->fragmentation_lru); |
800 | if (ret) |
801 | return ret; |
802 | } |
803 | |
804 | if (old_a->gen != new_a->gen) { |
805 | ret = bch2_bucket_gen_update(trans, bucket: new.k->p, gen: new_a->gen); |
806 | if (ret) |
807 | return ret; |
808 | } |
809 | |
810 | /* |
811 | * need to know if we're getting called from the invalidate path or |
812 | * not: |
813 | */ |
814 | |
815 | if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) && |
816 | old_a->cached_sectors) { |
817 | ret = bch2_update_cached_sectors_list(trans, new.k->p.inode, |
818 | -((s64) old_a->cached_sectors)); |
819 | if (ret) |
820 | return ret; |
821 | } |
822 | } |
823 | |
824 | if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) { |
825 | struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(k: new).v; |
826 | u64 journal_seq = trans->journal_res.seq; |
827 | u64 bucket_journal_seq = new_a->journal_seq; |
828 | |
829 | if ((flags & BTREE_TRIGGER_INSERT) && |
830 | data_type_is_empty(type: old_a->data_type) != |
831 | data_type_is_empty(type: new_a->data_type) && |
832 | new.k->type == KEY_TYPE_alloc_v4) { |
833 | struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(k: new).v; |
834 | |
835 | /* |
836 | * If the btree updates referring to a bucket weren't flushed |
837 | * before the bucket became empty again, then the we don't have |
838 | * to wait on a journal flush before we can reuse the bucket: |
839 | */ |
840 | v->journal_seq = bucket_journal_seq = |
841 | data_type_is_empty(type: new_a->data_type) && |
842 | (journal_seq == v->journal_seq || |
843 | bch2_journal_noflush_seq(&c->journal, v->journal_seq)) |
844 | ? 0 : journal_seq; |
845 | } |
846 | |
847 | if (!data_type_is_empty(type: old_a->data_type) && |
848 | data_type_is_empty(type: new_a->data_type) && |
849 | bucket_journal_seq) { |
850 | ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, |
851 | c->journal.flushed_seq_ondisk, |
852 | new.k->p.inode, new.k->p.offset, |
853 | bucket_journal_seq); |
854 | if (ret) { |
855 | bch2_fs_fatal_error(c, |
856 | "setting bucket_needs_journal_commit: %s" , bch2_err_str(ret)); |
857 | return ret; |
858 | } |
859 | } |
860 | |
861 | percpu_down_read(sem: &c->mark_lock); |
862 | if (new_a->gen != old_a->gen) |
863 | *bucket_gen(ca, b: new.k->p.offset) = new_a->gen; |
864 | |
865 | bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false); |
866 | percpu_up_read(sem: &c->mark_lock); |
867 | |
868 | #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; }) |
869 | #define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr) |
870 | #define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk) |
871 | |
872 | if (statechange(a->data_type == BCH_DATA_free) && |
873 | bucket_flushed(new_a)) |
874 | closure_wake_up(list: &c->freelist_wait); |
875 | |
876 | if (statechange(a->data_type == BCH_DATA_need_discard) && |
877 | !bch2_bucket_is_open(c, dev: new.k->p.inode, bucket: new.k->p.offset) && |
878 | bucket_flushed(new_a)) |
879 | bch2_discard_one_bucket_fast(c, bucket: new.k->p); |
880 | |
881 | if (statechange(a->data_type == BCH_DATA_cached) && |
882 | !bch2_bucket_is_open(c, dev: new.k->p.inode, bucket: new.k->p.offset) && |
883 | should_invalidate_buckets(ca, u: bch2_dev_usage_read(ca))) |
884 | bch2_do_invalidates(c); |
885 | |
886 | if (statechange(a->data_type == BCH_DATA_need_gc_gens)) |
887 | bch2_do_gc_gens(c); |
888 | } |
889 | |
890 | if ((flags & BTREE_TRIGGER_GC) && |
891 | (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) { |
892 | struct bch_alloc_v4 new_a_convert; |
893 | const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(k: new.s_c, convert: &new_a_convert); |
894 | |
895 | percpu_down_read(sem: &c->mark_lock); |
896 | struct bucket *g = gc_bucket(ca, b: new.k->p.offset); |
897 | |
898 | bucket_lock(b: g); |
899 | |
900 | g->gen_valid = 1; |
901 | g->gen = new_a->gen; |
902 | g->data_type = new_a->data_type; |
903 | g->stripe = new_a->stripe; |
904 | g->stripe_redundancy = new_a->stripe_redundancy; |
905 | g->dirty_sectors = new_a->dirty_sectors; |
906 | g->cached_sectors = new_a->cached_sectors; |
907 | |
908 | bucket_unlock(b: g); |
909 | percpu_up_read(sem: &c->mark_lock); |
910 | } |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | /* |
916 | * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for |
917 | * extents style btrees, but works on non-extents btrees: |
918 | */ |
919 | static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole) |
920 | { |
921 | struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); |
922 | |
923 | if (bkey_err(k)) |
924 | return k; |
925 | |
926 | if (k.k->type) { |
927 | return k; |
928 | } else { |
929 | struct btree_iter iter2; |
930 | struct bpos next; |
931 | |
932 | bch2_trans_copy_iter(&iter2, iter); |
933 | |
934 | struct btree_path *path = btree_iter_path(trans: iter->trans, iter); |
935 | if (!bpos_eq(l: path->l[0].b->key.k.p, SPOS_MAX)) |
936 | end = bkey_min(l: end, r: bpos_nosnap_successor(p: path->l[0].b->key.k.p)); |
937 | |
938 | end = bkey_min(l: end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)); |
939 | |
940 | /* |
941 | * btree node min/max is a closed interval, upto takes a half |
942 | * open interval: |
943 | */ |
944 | k = bch2_btree_iter_peek_upto(&iter2, end); |
945 | next = iter2.pos; |
946 | bch2_trans_iter_exit(iter->trans, &iter2); |
947 | |
948 | BUG_ON(next.offset >= iter->pos.offset + U32_MAX); |
949 | |
950 | if (bkey_err(k)) |
951 | return k; |
952 | |
953 | bkey_init(k: hole); |
954 | hole->p = iter->pos; |
955 | |
956 | bch2_key_resize(k: hole, new_size: next.offset - iter->pos.offset); |
957 | return (struct bkey_s_c) { hole, NULL }; |
958 | } |
959 | } |
960 | |
961 | static bool next_bucket(struct bch_fs *c, struct bpos *bucket) |
962 | { |
963 | struct bch_dev *ca; |
964 | |
965 | if (bch2_dev_bucket_exists(c, pos: *bucket)) |
966 | return true; |
967 | |
968 | if (bch2_dev_exists2(c, dev: bucket->inode)) { |
969 | ca = bch_dev_bkey_exists(c, idx: bucket->inode); |
970 | |
971 | if (bucket->offset < ca->mi.first_bucket) { |
972 | bucket->offset = ca->mi.first_bucket; |
973 | return true; |
974 | } |
975 | |
976 | bucket->inode++; |
977 | bucket->offset = 0; |
978 | } |
979 | |
980 | rcu_read_lock(); |
981 | ca = __bch2_next_dev_idx(c, idx: bucket->inode, NULL); |
982 | if (ca) |
983 | *bucket = POS(ca->dev_idx, ca->mi.first_bucket); |
984 | rcu_read_unlock(); |
985 | |
986 | return ca != NULL; |
987 | } |
988 | |
989 | static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole) |
990 | { |
991 | struct bch_fs *c = iter->trans->c; |
992 | struct bkey_s_c k; |
993 | again: |
994 | k = bch2_get_key_or_hole(iter, POS_MAX, hole); |
995 | if (bkey_err(k)) |
996 | return k; |
997 | |
998 | if (!k.k->type) { |
999 | struct bpos bucket = bkey_start_pos(k: k.k); |
1000 | |
1001 | if (!bch2_dev_bucket_exists(c, pos: bucket)) { |
1002 | if (!next_bucket(c, bucket: &bucket)) |
1003 | return bkey_s_c_null; |
1004 | |
1005 | bch2_btree_iter_set_pos(iter, new_pos: bucket); |
1006 | goto again; |
1007 | } |
1008 | |
1009 | if (!bch2_dev_bucket_exists(c, pos: k.k->p)) { |
1010 | struct bch_dev *ca = bch_dev_bkey_exists(c, idx: bucket.inode); |
1011 | |
1012 | bch2_key_resize(k: hole, new_size: ca->mi.nbuckets - bucket.offset); |
1013 | } |
1014 | } |
1015 | |
1016 | return k; |
1017 | } |
1018 | |
1019 | static noinline_for_stack |
1020 | int bch2_check_alloc_key(struct btree_trans *trans, |
1021 | struct bkey_s_c alloc_k, |
1022 | struct btree_iter *alloc_iter, |
1023 | struct btree_iter *discard_iter, |
1024 | struct btree_iter *freespace_iter, |
1025 | struct btree_iter *bucket_gens_iter) |
1026 | { |
1027 | struct bch_fs *c = trans->c; |
1028 | struct bch_dev *ca; |
1029 | struct bch_alloc_v4 a_convert; |
1030 | const struct bch_alloc_v4 *a; |
1031 | unsigned discard_key_type, freespace_key_type; |
1032 | unsigned gens_offset; |
1033 | struct bkey_s_c k; |
1034 | struct printbuf buf = PRINTBUF; |
1035 | int ret; |
1036 | |
1037 | if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c, |
1038 | alloc_key_to_missing_dev_bucket, |
1039 | "alloc key for invalid device:bucket %llu:%llu" , |
1040 | alloc_k.k->p.inode, alloc_k.k->p.offset)) |
1041 | return bch2_btree_delete_at(trans, alloc_iter, 0); |
1042 | |
1043 | ca = bch_dev_bkey_exists(c, idx: alloc_k.k->p.inode); |
1044 | if (!ca->mi.freespace_initialized) |
1045 | return 0; |
1046 | |
1047 | a = bch2_alloc_to_v4(k: alloc_k, convert: &a_convert); |
1048 | |
1049 | discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0; |
1050 | bch2_btree_iter_set_pos(iter: discard_iter, new_pos: alloc_k.k->p); |
1051 | k = bch2_btree_iter_peek_slot(discard_iter); |
1052 | ret = bkey_err(k); |
1053 | if (ret) |
1054 | goto err; |
1055 | |
1056 | if (fsck_err_on(k.k->type != discard_key_type, |
1057 | c, need_discard_key_wrong, |
1058 | "incorrect key in need_discard btree (got %s should be %s)\n" |
1059 | " %s" , |
1060 | bch2_bkey_types[k.k->type], |
1061 | bch2_bkey_types[discard_key_type], |
1062 | (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { |
1063 | struct bkey_i *update = |
1064 | bch2_trans_kmalloc(trans, size: sizeof(*update)); |
1065 | |
1066 | ret = PTR_ERR_OR_ZERO(ptr: update); |
1067 | if (ret) |
1068 | goto err; |
1069 | |
1070 | bkey_init(k: &update->k); |
1071 | update->k.type = discard_key_type; |
1072 | update->k.p = discard_iter->pos; |
1073 | |
1074 | ret = bch2_trans_update(trans, discard_iter, update, 0); |
1075 | if (ret) |
1076 | goto err; |
1077 | } |
1078 | |
1079 | freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0; |
1080 | bch2_btree_iter_set_pos(iter: freespace_iter, new_pos: alloc_freespace_pos(pos: alloc_k.k->p, a: *a)); |
1081 | k = bch2_btree_iter_peek_slot(freespace_iter); |
1082 | ret = bkey_err(k); |
1083 | if (ret) |
1084 | goto err; |
1085 | |
1086 | if (fsck_err_on(k.k->type != freespace_key_type, |
1087 | c, freespace_key_wrong, |
1088 | "incorrect key in freespace btree (got %s should be %s)\n" |
1089 | " %s" , |
1090 | bch2_bkey_types[k.k->type], |
1091 | bch2_bkey_types[freespace_key_type], |
1092 | (printbuf_reset(&buf), |
1093 | bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { |
1094 | struct bkey_i *update = |
1095 | bch2_trans_kmalloc(trans, size: sizeof(*update)); |
1096 | |
1097 | ret = PTR_ERR_OR_ZERO(ptr: update); |
1098 | if (ret) |
1099 | goto err; |
1100 | |
1101 | bkey_init(k: &update->k); |
1102 | update->k.type = freespace_key_type; |
1103 | update->k.p = freespace_iter->pos; |
1104 | bch2_key_resize(k: &update->k, new_size: 1); |
1105 | |
1106 | ret = bch2_trans_update(trans, freespace_iter, update, 0); |
1107 | if (ret) |
1108 | goto err; |
1109 | } |
1110 | |
1111 | bch2_btree_iter_set_pos(iter: bucket_gens_iter, new_pos: alloc_gens_pos(pos: alloc_k.k->p, offset: &gens_offset)); |
1112 | k = bch2_btree_iter_peek_slot(bucket_gens_iter); |
1113 | ret = bkey_err(k); |
1114 | if (ret) |
1115 | goto err; |
1116 | |
1117 | if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), |
1118 | c, bucket_gens_key_wrong, |
1119 | "incorrect gen in bucket_gens btree (got %u should be %u)\n" |
1120 | " %s" , |
1121 | alloc_gen(k, gens_offset), a->gen, |
1122 | (printbuf_reset(&buf), |
1123 | bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { |
1124 | struct bkey_i_bucket_gens *g = |
1125 | bch2_trans_kmalloc(trans, size: sizeof(*g)); |
1126 | |
1127 | ret = PTR_ERR_OR_ZERO(ptr: g); |
1128 | if (ret) |
1129 | goto err; |
1130 | |
1131 | if (k.k->type == KEY_TYPE_bucket_gens) { |
1132 | bkey_reassemble(dst: &g->k_i, src: k); |
1133 | } else { |
1134 | bkey_bucket_gens_init(k: &g->k_i); |
1135 | g->k.p = alloc_gens_pos(pos: alloc_k.k->p, offset: &gens_offset); |
1136 | } |
1137 | |
1138 | g->v.gens[gens_offset] = a->gen; |
1139 | |
1140 | ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); |
1141 | if (ret) |
1142 | goto err; |
1143 | } |
1144 | err: |
1145 | fsck_err: |
1146 | printbuf_exit(&buf); |
1147 | return ret; |
1148 | } |
1149 | |
1150 | static noinline_for_stack |
1151 | int bch2_check_alloc_hole_freespace(struct btree_trans *trans, |
1152 | struct bpos start, |
1153 | struct bpos *end, |
1154 | struct btree_iter *freespace_iter) |
1155 | { |
1156 | struct bch_fs *c = trans->c; |
1157 | struct bch_dev *ca; |
1158 | struct bkey_s_c k; |
1159 | struct printbuf buf = PRINTBUF; |
1160 | int ret; |
1161 | |
1162 | ca = bch_dev_bkey_exists(c, idx: start.inode); |
1163 | if (!ca->mi.freespace_initialized) |
1164 | return 0; |
1165 | |
1166 | bch2_btree_iter_set_pos(iter: freespace_iter, new_pos: start); |
1167 | |
1168 | k = bch2_btree_iter_peek_slot(freespace_iter); |
1169 | ret = bkey_err(k); |
1170 | if (ret) |
1171 | goto err; |
1172 | |
1173 | *end = bkey_min(l: k.k->p, r: *end); |
1174 | |
1175 | if (fsck_err_on(k.k->type != KEY_TYPE_set, |
1176 | c, freespace_hole_missing, |
1177 | "hole in alloc btree missing in freespace btree\n" |
1178 | " device %llu buckets %llu-%llu" , |
1179 | freespace_iter->pos.inode, |
1180 | freespace_iter->pos.offset, |
1181 | end->offset)) { |
1182 | struct bkey_i *update = |
1183 | bch2_trans_kmalloc(trans, size: sizeof(*update)); |
1184 | |
1185 | ret = PTR_ERR_OR_ZERO(ptr: update); |
1186 | if (ret) |
1187 | goto err; |
1188 | |
1189 | bkey_init(k: &update->k); |
1190 | update->k.type = KEY_TYPE_set; |
1191 | update->k.p = freespace_iter->pos; |
1192 | bch2_key_resize(k: &update->k, |
1193 | min_t(u64, U32_MAX, end->offset - |
1194 | freespace_iter->pos.offset)); |
1195 | |
1196 | ret = bch2_trans_update(trans, freespace_iter, update, 0); |
1197 | if (ret) |
1198 | goto err; |
1199 | } |
1200 | err: |
1201 | fsck_err: |
1202 | printbuf_exit(&buf); |
1203 | return ret; |
1204 | } |
1205 | |
1206 | static noinline_for_stack |
1207 | int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, |
1208 | struct bpos start, |
1209 | struct bpos *end, |
1210 | struct btree_iter *bucket_gens_iter) |
1211 | { |
1212 | struct bch_fs *c = trans->c; |
1213 | struct bkey_s_c k; |
1214 | struct printbuf buf = PRINTBUF; |
1215 | unsigned i, gens_offset, gens_end_offset; |
1216 | int ret; |
1217 | |
1218 | bch2_btree_iter_set_pos(iter: bucket_gens_iter, new_pos: alloc_gens_pos(pos: start, offset: &gens_offset)); |
1219 | |
1220 | k = bch2_btree_iter_peek_slot(bucket_gens_iter); |
1221 | ret = bkey_err(k); |
1222 | if (ret) |
1223 | goto err; |
1224 | |
1225 | if (bkey_cmp(l: alloc_gens_pos(pos: start, offset: &gens_offset), |
1226 | r: alloc_gens_pos(pos: *end, offset: &gens_end_offset))) |
1227 | gens_end_offset = KEY_TYPE_BUCKET_GENS_NR; |
1228 | |
1229 | if (k.k->type == KEY_TYPE_bucket_gens) { |
1230 | struct bkey_i_bucket_gens g; |
1231 | bool need_update = false; |
1232 | |
1233 | bkey_reassemble(dst: &g.k_i, src: k); |
1234 | |
1235 | for (i = gens_offset; i < gens_end_offset; i++) { |
1236 | if (fsck_err_on(g.v.gens[i], c, |
1237 | bucket_gens_hole_wrong, |
1238 | "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)" , |
1239 | bucket_gens_pos_to_alloc(k.k->p, i).inode, |
1240 | bucket_gens_pos_to_alloc(k.k->p, i).offset, |
1241 | g.v.gens[i])) { |
1242 | g.v.gens[i] = 0; |
1243 | need_update = true; |
1244 | } |
1245 | } |
1246 | |
1247 | if (need_update) { |
1248 | struct bkey_i *u = bch2_trans_kmalloc(trans, size: sizeof(g)); |
1249 | |
1250 | ret = PTR_ERR_OR_ZERO(ptr: u); |
1251 | if (ret) |
1252 | goto err; |
1253 | |
1254 | memcpy(u, &g, sizeof(g)); |
1255 | |
1256 | ret = bch2_trans_update(trans, bucket_gens_iter, u, 0); |
1257 | if (ret) |
1258 | goto err; |
1259 | } |
1260 | } |
1261 | |
1262 | *end = bkey_min(l: *end, r: bucket_gens_pos_to_alloc(pos: bpos_nosnap_successor(p: k.k->p), offset: 0)); |
1263 | err: |
1264 | fsck_err: |
1265 | printbuf_exit(&buf); |
1266 | return ret; |
1267 | } |
1268 | |
1269 | static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans, |
1270 | struct btree_iter *iter) |
1271 | { |
1272 | struct bch_fs *c = trans->c; |
1273 | struct btree_iter alloc_iter; |
1274 | struct bkey_s_c alloc_k; |
1275 | struct bch_alloc_v4 a_convert; |
1276 | const struct bch_alloc_v4 *a; |
1277 | u64 genbits; |
1278 | struct bpos pos; |
1279 | enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard |
1280 | ? BCH_DATA_need_discard |
1281 | : BCH_DATA_free; |
1282 | struct printbuf buf = PRINTBUF; |
1283 | int ret; |
1284 | |
1285 | pos = iter->pos; |
1286 | pos.offset &= ~(~0ULL << 56); |
1287 | genbits = iter->pos.offset & (~0ULL << 56); |
1288 | |
1289 | alloc_k = bch2_bkey_get_iter(trans, iter: &alloc_iter, btree_id: BTREE_ID_alloc, pos, flags: 0); |
1290 | ret = bkey_err(alloc_k); |
1291 | if (ret) |
1292 | return ret; |
1293 | |
1294 | if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c, |
1295 | need_discard_freespace_key_to_invalid_dev_bucket, |
1296 | "entry in %s btree for nonexistant dev:bucket %llu:%llu" , |
1297 | bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) |
1298 | goto delete; |
1299 | |
1300 | a = bch2_alloc_to_v4(k: alloc_k, convert: &a_convert); |
1301 | |
1302 | if (fsck_err_on(a->data_type != state || |
1303 | (state == BCH_DATA_free && |
1304 | genbits != alloc_freespace_genbits(*a)), c, |
1305 | need_discard_freespace_key_bad, |
1306 | "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)" , |
1307 | (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), |
1308 | bch2_btree_id_str(iter->btree_id), |
1309 | iter->pos.inode, |
1310 | iter->pos.offset, |
1311 | a->data_type == state, |
1312 | genbits >> 56, alloc_freespace_genbits(*a) >> 56)) |
1313 | goto delete; |
1314 | out: |
1315 | fsck_err: |
1316 | set_btree_iter_dontneed(&alloc_iter); |
1317 | bch2_trans_iter_exit(trans, &alloc_iter); |
1318 | printbuf_exit(&buf); |
1319 | return ret; |
1320 | delete: |
1321 | ret = bch2_btree_delete_extent_at(trans, iter, |
1322 | iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?: |
1323 | bch2_trans_commit(trans, NULL, NULL, |
1324 | flags: BCH_TRANS_COMMIT_no_enospc); |
1325 | goto out; |
1326 | } |
1327 | |
1328 | /* |
1329 | * We've already checked that generation numbers in the bucket_gens btree are |
1330 | * valid for buckets that exist; this just checks for keys for nonexistent |
1331 | * buckets. |
1332 | */ |
1333 | static noinline_for_stack |
1334 | int bch2_check_bucket_gens_key(struct btree_trans *trans, |
1335 | struct btree_iter *iter, |
1336 | struct bkey_s_c k) |
1337 | { |
1338 | struct bch_fs *c = trans->c; |
1339 | struct bkey_i_bucket_gens g; |
1340 | struct bch_dev *ca; |
1341 | u64 start = bucket_gens_pos_to_alloc(pos: k.k->p, offset: 0).offset; |
1342 | u64 end = bucket_gens_pos_to_alloc(pos: bpos_nosnap_successor(p: k.k->p), offset: 0).offset; |
1343 | u64 b; |
1344 | bool need_update = false, dev_exists; |
1345 | struct printbuf buf = PRINTBUF; |
1346 | int ret = 0; |
1347 | |
1348 | BUG_ON(k.k->type != KEY_TYPE_bucket_gens); |
1349 | bkey_reassemble(dst: &g.k_i, src: k); |
1350 | |
1351 | /* if no bch_dev, skip out whether we repair or not */ |
1352 | dev_exists = bch2_dev_exists2(c, dev: k.k->p.inode); |
1353 | if (!dev_exists) { |
1354 | if (fsck_err_on(!dev_exists, c, |
1355 | bucket_gens_to_invalid_dev, |
1356 | "bucket_gens key for invalid device:\n %s" , |
1357 | (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { |
1358 | ret = bch2_btree_delete_at(trans, iter, 0); |
1359 | } |
1360 | goto out; |
1361 | } |
1362 | |
1363 | ca = bch_dev_bkey_exists(c, idx: k.k->p.inode); |
1364 | if (fsck_err_on(end <= ca->mi.first_bucket || |
1365 | start >= ca->mi.nbuckets, c, |
1366 | bucket_gens_to_invalid_buckets, |
1367 | "bucket_gens key for invalid buckets:\n %s" , |
1368 | (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { |
1369 | ret = bch2_btree_delete_at(trans, iter, 0); |
1370 | goto out; |
1371 | } |
1372 | |
1373 | for (b = start; b < ca->mi.first_bucket; b++) |
1374 | if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c, |
1375 | bucket_gens_nonzero_for_invalid_buckets, |
1376 | "bucket_gens key has nonzero gen for invalid bucket" )) { |
1377 | g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; |
1378 | need_update = true; |
1379 | } |
1380 | |
1381 | for (b = ca->mi.nbuckets; b < end; b++) |
1382 | if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c, |
1383 | bucket_gens_nonzero_for_invalid_buckets, |
1384 | "bucket_gens key has nonzero gen for invalid bucket" )) { |
1385 | g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; |
1386 | need_update = true; |
1387 | } |
1388 | |
1389 | if (need_update) { |
1390 | struct bkey_i *u = bch2_trans_kmalloc(trans, size: sizeof(g)); |
1391 | |
1392 | ret = PTR_ERR_OR_ZERO(ptr: u); |
1393 | if (ret) |
1394 | goto out; |
1395 | |
1396 | memcpy(u, &g, sizeof(g)); |
1397 | ret = bch2_trans_update(trans, iter, u, 0); |
1398 | } |
1399 | out: |
1400 | fsck_err: |
1401 | printbuf_exit(&buf); |
1402 | return ret; |
1403 | } |
1404 | |
1405 | int bch2_check_alloc_info(struct bch_fs *c) |
1406 | { |
1407 | struct btree_trans *trans = bch2_trans_get(c); |
1408 | struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter; |
1409 | struct bkey hole; |
1410 | struct bkey_s_c k; |
1411 | int ret = 0; |
1412 | |
1413 | bch2_trans_iter_init(trans, iter: &iter, btree_id: BTREE_ID_alloc, POS_MIN, |
1414 | flags: BTREE_ITER_PREFETCH); |
1415 | bch2_trans_iter_init(trans, iter: &discard_iter, btree_id: BTREE_ID_need_discard, POS_MIN, |
1416 | flags: BTREE_ITER_PREFETCH); |
1417 | bch2_trans_iter_init(trans, iter: &freespace_iter, btree_id: BTREE_ID_freespace, POS_MIN, |
1418 | flags: BTREE_ITER_PREFETCH); |
1419 | bch2_trans_iter_init(trans, iter: &bucket_gens_iter, btree_id: BTREE_ID_bucket_gens, POS_MIN, |
1420 | flags: BTREE_ITER_PREFETCH); |
1421 | |
1422 | while (1) { |
1423 | struct bpos next; |
1424 | |
1425 | bch2_trans_begin(trans); |
1426 | |
1427 | k = bch2_get_key_or_real_bucket_hole(iter: &iter, hole: &hole); |
1428 | ret = bkey_err(k); |
1429 | if (ret) |
1430 | goto bkey_err; |
1431 | |
1432 | if (!k.k) |
1433 | break; |
1434 | |
1435 | if (k.k->type) { |
1436 | next = bpos_nosnap_successor(p: k.k->p); |
1437 | |
1438 | ret = bch2_check_alloc_key(trans, |
1439 | alloc_k: k, alloc_iter: &iter, |
1440 | discard_iter: &discard_iter, |
1441 | freespace_iter: &freespace_iter, |
1442 | bucket_gens_iter: &bucket_gens_iter); |
1443 | if (ret) |
1444 | goto bkey_err; |
1445 | } else { |
1446 | next = k.k->p; |
1447 | |
1448 | ret = bch2_check_alloc_hole_freespace(trans, |
1449 | start: bkey_start_pos(k: k.k), |
1450 | end: &next, |
1451 | freespace_iter: &freespace_iter) ?: |
1452 | bch2_check_alloc_hole_bucket_gens(trans, |
1453 | start: bkey_start_pos(k: k.k), |
1454 | end: &next, |
1455 | bucket_gens_iter: &bucket_gens_iter); |
1456 | if (ret) |
1457 | goto bkey_err; |
1458 | } |
1459 | |
1460 | ret = bch2_trans_commit(trans, NULL, NULL, |
1461 | flags: BCH_TRANS_COMMIT_no_enospc); |
1462 | if (ret) |
1463 | goto bkey_err; |
1464 | |
1465 | bch2_btree_iter_set_pos(iter: &iter, new_pos: next); |
1466 | bkey_err: |
1467 | if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) |
1468 | continue; |
1469 | if (ret) |
1470 | break; |
1471 | } |
1472 | bch2_trans_iter_exit(trans, &bucket_gens_iter); |
1473 | bch2_trans_iter_exit(trans, &freespace_iter); |
1474 | bch2_trans_iter_exit(trans, &discard_iter); |
1475 | bch2_trans_iter_exit(trans, &iter); |
1476 | |
1477 | if (ret < 0) |
1478 | goto err; |
1479 | |
1480 | ret = for_each_btree_key(trans, iter, |
1481 | BTREE_ID_need_discard, POS_MIN, |
1482 | BTREE_ITER_PREFETCH, k, |
1483 | bch2_check_discard_freespace_key(trans, &iter)); |
1484 | if (ret) |
1485 | goto err; |
1486 | |
1487 | bch2_trans_iter_init(trans, iter: &iter, btree_id: BTREE_ID_freespace, POS_MIN, |
1488 | flags: BTREE_ITER_PREFETCH); |
1489 | while (1) { |
1490 | bch2_trans_begin(trans); |
1491 | k = bch2_btree_iter_peek(iter: &iter); |
1492 | if (!k.k) |
1493 | break; |
1494 | |
1495 | ret = bkey_err(k) ?: |
1496 | bch2_check_discard_freespace_key(trans, iter: &iter); |
1497 | if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { |
1498 | ret = 0; |
1499 | continue; |
1500 | } |
1501 | if (ret) { |
1502 | struct printbuf buf = PRINTBUF; |
1503 | bch2_bkey_val_to_text(&buf, c, k); |
1504 | |
1505 | bch_err(c, "while checking %s" , buf.buf); |
1506 | printbuf_exit(&buf); |
1507 | break; |
1508 | } |
1509 | |
1510 | bch2_btree_iter_set_pos(iter: &iter, new_pos: bpos_nosnap_successor(p: iter.pos)); |
1511 | } |
1512 | bch2_trans_iter_exit(trans, &iter); |
1513 | if (ret) |
1514 | goto err; |
1515 | |
1516 | ret = for_each_btree_key_commit(trans, iter, |
1517 | BTREE_ID_bucket_gens, POS_MIN, |
1518 | BTREE_ITER_PREFETCH, k, |
1519 | NULL, NULL, BCH_TRANS_COMMIT_no_enospc, |
1520 | bch2_check_bucket_gens_key(trans, &iter, k)); |
1521 | err: |
1522 | bch2_trans_put(trans); |
1523 | bch_err_fn(c, ret); |
1524 | return ret; |
1525 | } |
1526 | |
1527 | static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, |
1528 | struct btree_iter *alloc_iter) |
1529 | { |
1530 | struct bch_fs *c = trans->c; |
1531 | struct btree_iter lru_iter; |
1532 | struct bch_alloc_v4 a_convert; |
1533 | const struct bch_alloc_v4 *a; |
1534 | struct bkey_s_c alloc_k, lru_k; |
1535 | struct printbuf buf = PRINTBUF; |
1536 | int ret; |
1537 | |
1538 | alloc_k = bch2_btree_iter_peek(iter: alloc_iter); |
1539 | if (!alloc_k.k) |
1540 | return 0; |
1541 | |
1542 | ret = bkey_err(alloc_k); |
1543 | if (ret) |
1544 | return ret; |
1545 | |
1546 | a = bch2_alloc_to_v4(k: alloc_k, convert: &a_convert); |
1547 | |
1548 | if (a->data_type != BCH_DATA_cached) |
1549 | return 0; |
1550 | |
1551 | if (fsck_err_on(!a->io_time[READ], c, |
1552 | alloc_key_cached_but_read_time_zero, |
1553 | "cached bucket with read_time 0\n" |
1554 | " %s" , |
1555 | (printbuf_reset(&buf), |
1556 | bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { |
1557 | struct bkey_i_alloc_v4 *a_mut = |
1558 | bch2_alloc_to_v4_mut(trans, k: alloc_k); |
1559 | ret = PTR_ERR_OR_ZERO(ptr: a_mut); |
1560 | if (ret) |
1561 | goto err; |
1562 | |
1563 | a_mut->v.io_time[READ] = atomic64_read(v: &c->io_clock[READ].now); |
1564 | ret = bch2_trans_update(trans, alloc_iter, |
1565 | &a_mut->k_i, BTREE_TRIGGER_NORUN); |
1566 | if (ret) |
1567 | goto err; |
1568 | |
1569 | a = &a_mut->v; |
1570 | } |
1571 | |
1572 | lru_k = bch2_bkey_get_iter(trans, iter: &lru_iter, btree_id: BTREE_ID_lru, |
1573 | pos: lru_pos(lru_id: alloc_k.k->p.inode, |
1574 | dev_bucket: bucket_to_u64(bucket: alloc_k.k->p), |
1575 | time: a->io_time[READ]), flags: 0); |
1576 | ret = bkey_err(lru_k); |
1577 | if (ret) |
1578 | return ret; |
1579 | |
1580 | if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c, |
1581 | alloc_key_to_missing_lru_entry, |
1582 | "missing lru entry\n" |
1583 | " %s" , |
1584 | (printbuf_reset(&buf), |
1585 | bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { |
1586 | ret = bch2_lru_set(trans, |
1587 | alloc_k.k->p.inode, |
1588 | bucket_to_u64(bucket: alloc_k.k->p), |
1589 | a->io_time[READ]); |
1590 | if (ret) |
1591 | goto err; |
1592 | } |
1593 | err: |
1594 | fsck_err: |
1595 | bch2_trans_iter_exit(trans, &lru_iter); |
1596 | printbuf_exit(&buf); |
1597 | return ret; |
1598 | } |
1599 | |
1600 | int bch2_check_alloc_to_lru_refs(struct bch_fs *c) |
1601 | { |
1602 | int ret = bch2_trans_run(c, |
1603 | for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, |
1604 | POS_MIN, BTREE_ITER_PREFETCH, k, |
1605 | NULL, NULL, BCH_TRANS_COMMIT_no_enospc, |
1606 | bch2_check_alloc_to_lru_ref(trans, &iter))); |
1607 | bch_err_fn(c, ret); |
1608 | return ret; |
1609 | } |
1610 | |
1611 | static int discard_in_flight_add(struct bch_fs *c, struct bpos bucket) |
1612 | { |
1613 | int ret; |
1614 | |
1615 | mutex_lock(&c->discard_buckets_in_flight_lock); |
1616 | darray_for_each(c->discard_buckets_in_flight, i) |
1617 | if (bkey_eq(l: *i, r: bucket)) { |
1618 | ret = -EEXIST; |
1619 | goto out; |
1620 | } |
1621 | |
1622 | ret = darray_push(&c->discard_buckets_in_flight, bucket); |
1623 | out: |
1624 | mutex_unlock(lock: &c->discard_buckets_in_flight_lock); |
1625 | return ret; |
1626 | } |
1627 | |
1628 | static void discard_in_flight_remove(struct bch_fs *c, struct bpos bucket) |
1629 | { |
1630 | mutex_lock(&c->discard_buckets_in_flight_lock); |
1631 | darray_for_each(c->discard_buckets_in_flight, i) |
1632 | if (bkey_eq(l: *i, r: bucket)) { |
1633 | darray_remove_item(&c->discard_buckets_in_flight, i); |
1634 | goto found; |
1635 | } |
1636 | BUG(); |
1637 | found: |
1638 | mutex_unlock(lock: &c->discard_buckets_in_flight_lock); |
1639 | } |
1640 | |
1641 | struct discard_buckets_state { |
1642 | u64 seen; |
1643 | u64 open; |
1644 | u64 need_journal_commit; |
1645 | u64 discarded; |
1646 | struct bch_dev *ca; |
1647 | u64 need_journal_commit_this_dev; |
1648 | }; |
1649 | |
1650 | static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca) |
1651 | { |
1652 | if (s->ca == ca) |
1653 | return; |
1654 | |
1655 | if (s->ca && s->need_journal_commit_this_dev > |
1656 | bch2_dev_usage_read(ca: s->ca).d[BCH_DATA_free].buckets) |
1657 | bch2_journal_flush_async(&c->journal, NULL); |
1658 | |
1659 | if (s->ca) |
1660 | percpu_ref_put(ref: &s->ca->ref); |
1661 | if (ca) |
1662 | percpu_ref_get(ref: &ca->ref); |
1663 | s->ca = ca; |
1664 | s->need_journal_commit_this_dev = 0; |
1665 | } |
1666 | |
1667 | static int bch2_discard_one_bucket(struct btree_trans *trans, |
1668 | struct btree_iter *need_discard_iter, |
1669 | struct bpos *discard_pos_done, |
1670 | struct discard_buckets_state *s) |
1671 | { |
1672 | struct bch_fs *c = trans->c; |
1673 | struct bpos pos = need_discard_iter->pos; |
1674 | struct btree_iter iter = { NULL }; |
1675 | struct bkey_s_c k; |
1676 | struct bch_dev *ca; |
1677 | struct bkey_i_alloc_v4 *a; |
1678 | struct printbuf buf = PRINTBUF; |
1679 | bool discard_locked = false; |
1680 | int ret = 0; |
1681 | |
1682 | ca = bch_dev_bkey_exists(c, idx: pos.inode); |
1683 | |
1684 | if (!percpu_ref_tryget(ref: &ca->io_ref)) { |
1685 | bch2_btree_iter_set_pos(iter: need_discard_iter, POS(pos.inode + 1, 0)); |
1686 | return 0; |
1687 | } |
1688 | |
1689 | discard_buckets_next_dev(c, s, ca); |
1690 | |
1691 | if (bch2_bucket_is_open_safe(c, dev: pos.inode, bucket: pos.offset)) { |
1692 | s->open++; |
1693 | goto out; |
1694 | } |
1695 | |
1696 | if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, |
1697 | c->journal.flushed_seq_ondisk, |
1698 | pos.inode, pos.offset)) { |
1699 | s->need_journal_commit++; |
1700 | s->need_journal_commit_this_dev++; |
1701 | goto out; |
1702 | } |
1703 | |
1704 | k = bch2_bkey_get_iter(trans, iter: &iter, btree_id: BTREE_ID_alloc, |
1705 | pos: need_discard_iter->pos, |
1706 | flags: BTREE_ITER_CACHED); |
1707 | ret = bkey_err(k); |
1708 | if (ret) |
1709 | goto out; |
1710 | |
1711 | a = bch2_alloc_to_v4_mut(trans, k); |
1712 | ret = PTR_ERR_OR_ZERO(ptr: a); |
1713 | if (ret) |
1714 | goto out; |
1715 | |
1716 | if (a->v.dirty_sectors) { |
1717 | if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, |
1718 | trans, "attempting to discard bucket with dirty data\n%s" , |
1719 | (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) |
1720 | ret = -EIO; |
1721 | goto out; |
1722 | } |
1723 | |
1724 | if (a->v.data_type != BCH_DATA_need_discard) { |
1725 | if (data_type_is_empty(type: a->v.data_type) && |
1726 | BCH_ALLOC_V4_NEED_INC_GEN(k: &a->v)) { |
1727 | a->v.gen++; |
1728 | SET_BCH_ALLOC_V4_NEED_INC_GEN(k: &a->v, v: false); |
1729 | goto write; |
1730 | } |
1731 | |
1732 | if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, |
1733 | trans, "bucket incorrectly set in need_discard btree\n" |
1734 | "%s" , |
1735 | (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) |
1736 | ret = -EIO; |
1737 | goto out; |
1738 | } |
1739 | |
1740 | if (a->v.journal_seq > c->journal.flushed_seq_ondisk) { |
1741 | if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, |
1742 | trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s" , |
1743 | a->v.journal_seq, |
1744 | c->journal.flushed_seq_ondisk, |
1745 | (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) |
1746 | ret = -EIO; |
1747 | goto out; |
1748 | } |
1749 | |
1750 | if (discard_in_flight_add(c, bucket: SPOS(inode: iter.pos.inode, offset: iter.pos.offset, snapshot: true))) |
1751 | goto out; |
1752 | |
1753 | discard_locked = true; |
1754 | |
1755 | if (!bkey_eq(l: *discard_pos_done, r: iter.pos) && |
1756 | ca->mi.discard && !c->opts.nochanges) { |
1757 | /* |
1758 | * This works without any other locks because this is the only |
1759 | * thread that removes items from the need_discard tree |
1760 | */ |
1761 | bch2_trans_unlock_long(trans); |
1762 | blkdev_issue_discard(bdev: ca->disk_sb.bdev, |
1763 | sector: k.k->p.offset * ca->mi.bucket_size, |
1764 | nr_sects: ca->mi.bucket_size, |
1765 | GFP_KERNEL); |
1766 | *discard_pos_done = iter.pos; |
1767 | |
1768 | ret = bch2_trans_relock_notrace(trans); |
1769 | if (ret) |
1770 | goto out; |
1771 | } |
1772 | |
1773 | SET_BCH_ALLOC_V4_NEED_DISCARD(k: &a->v, v: false); |
1774 | a->v.data_type = alloc_data_type(a: a->v, data_type: a->v.data_type); |
1775 | write: |
1776 | ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: |
1777 | bch2_trans_commit(trans, NULL, NULL, |
1778 | flags: BCH_WATERMARK_btree| |
1779 | BCH_TRANS_COMMIT_no_enospc); |
1780 | if (ret) |
1781 | goto out; |
1782 | |
1783 | count_event(c, bucket_discard); |
1784 | s->discarded++; |
1785 | out: |
1786 | if (discard_locked) |
1787 | discard_in_flight_remove(c, bucket: iter.pos); |
1788 | s->seen++; |
1789 | bch2_trans_iter_exit(trans, &iter); |
1790 | percpu_ref_put(ref: &ca->io_ref); |
1791 | printbuf_exit(&buf); |
1792 | return ret; |
1793 | } |
1794 | |
1795 | static void bch2_do_discards_work(struct work_struct *work) |
1796 | { |
1797 | struct bch_fs *c = container_of(work, struct bch_fs, discard_work); |
1798 | struct discard_buckets_state s = {}; |
1799 | struct bpos discard_pos_done = POS_MAX; |
1800 | int ret; |
1801 | |
1802 | /* |
1803 | * We're doing the commit in bch2_discard_one_bucket instead of using |
1804 | * for_each_btree_key_commit() so that we can increment counters after |
1805 | * successful commit: |
1806 | */ |
1807 | ret = bch2_trans_run(c, |
1808 | for_each_btree_key(trans, iter, |
1809 | BTREE_ID_need_discard, POS_MIN, 0, k, |
1810 | bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s))); |
1811 | |
1812 | discard_buckets_next_dev(c, s: &s, NULL); |
1813 | |
1814 | trace_discard_buckets(c, seen: s.seen, open: s.open, need_journal_commit: s.need_journal_commit, discarded: s.discarded, |
1815 | err: bch2_err_str(ret)); |
1816 | |
1817 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_discard); |
1818 | } |
1819 | |
1820 | void bch2_do_discards(struct bch_fs *c) |
1821 | { |
1822 | if (bch2_write_ref_tryget(c, ref: BCH_WRITE_REF_discard) && |
1823 | !queue_work(wq: c->write_ref_wq, work: &c->discard_work)) |
1824 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_discard); |
1825 | } |
1826 | |
1827 | static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket) |
1828 | { |
1829 | struct btree_iter iter; |
1830 | bch2_trans_iter_init(trans, iter: &iter, btree_id: BTREE_ID_alloc, pos: bucket, flags: BTREE_ITER_INTENT); |
1831 | struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); |
1832 | int ret = bkey_err(k); |
1833 | if (ret) |
1834 | goto err; |
1835 | |
1836 | struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); |
1837 | ret = PTR_ERR_OR_ZERO(ptr: a); |
1838 | if (ret) |
1839 | goto err; |
1840 | |
1841 | BUG_ON(a->v.dirty_sectors); |
1842 | SET_BCH_ALLOC_V4_NEED_DISCARD(k: &a->v, v: false); |
1843 | a->v.data_type = alloc_data_type(a: a->v, data_type: a->v.data_type); |
1844 | |
1845 | ret = bch2_trans_update(trans, &iter, &a->k_i, 0); |
1846 | err: |
1847 | bch2_trans_iter_exit(trans, &iter); |
1848 | return ret; |
1849 | } |
1850 | |
1851 | static void bch2_do_discards_fast_work(struct work_struct *work) |
1852 | { |
1853 | struct bch_fs *c = container_of(work, struct bch_fs, discard_fast_work); |
1854 | |
1855 | while (1) { |
1856 | bool got_bucket = false; |
1857 | struct bpos bucket; |
1858 | struct bch_dev *ca; |
1859 | |
1860 | mutex_lock(&c->discard_buckets_in_flight_lock); |
1861 | darray_for_each(c->discard_buckets_in_flight, i) { |
1862 | if (i->snapshot) |
1863 | continue; |
1864 | |
1865 | ca = bch_dev_bkey_exists(c, idx: i->inode); |
1866 | |
1867 | if (!percpu_ref_tryget(ref: &ca->io_ref)) { |
1868 | darray_remove_item(&c->discard_buckets_in_flight, i); |
1869 | continue; |
1870 | } |
1871 | |
1872 | got_bucket = true; |
1873 | bucket = *i; |
1874 | i->snapshot = true; |
1875 | break; |
1876 | } |
1877 | mutex_unlock(lock: &c->discard_buckets_in_flight_lock); |
1878 | |
1879 | if (!got_bucket) |
1880 | break; |
1881 | |
1882 | if (ca->mi.discard && !c->opts.nochanges) |
1883 | blkdev_issue_discard(bdev: ca->disk_sb.bdev, |
1884 | sector: bucket.offset * ca->mi.bucket_size, |
1885 | nr_sects: ca->mi.bucket_size, |
1886 | GFP_KERNEL); |
1887 | |
1888 | int ret = bch2_trans_do(c, NULL, NULL, |
1889 | BCH_WATERMARK_btree| |
1890 | BCH_TRANS_COMMIT_no_enospc, |
1891 | bch2_clear_bucket_needs_discard(trans, bucket)); |
1892 | bch_err_fn(c, ret); |
1893 | |
1894 | percpu_ref_put(ref: &ca->io_ref); |
1895 | discard_in_flight_remove(c, bucket); |
1896 | |
1897 | if (ret) |
1898 | break; |
1899 | } |
1900 | |
1901 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_discard_fast); |
1902 | } |
1903 | |
1904 | static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket) |
1905 | { |
1906 | struct bch_dev *ca = bch_dev_bkey_exists(c, idx: bucket.inode); |
1907 | |
1908 | if (!percpu_ref_is_dying(ref: &ca->io_ref) && |
1909 | !discard_in_flight_add(c, bucket) && |
1910 | bch2_write_ref_tryget(c, ref: BCH_WRITE_REF_discard_fast) && |
1911 | !queue_work(wq: c->write_ref_wq, work: &c->discard_fast_work)) |
1912 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_discard_fast); |
1913 | } |
1914 | |
1915 | static int invalidate_one_bucket(struct btree_trans *trans, |
1916 | struct btree_iter *lru_iter, |
1917 | struct bkey_s_c lru_k, |
1918 | s64 *nr_to_invalidate) |
1919 | { |
1920 | struct bch_fs *c = trans->c; |
1921 | struct btree_iter alloc_iter = { NULL }; |
1922 | struct bkey_i_alloc_v4 *a = NULL; |
1923 | struct printbuf buf = PRINTBUF; |
1924 | struct bpos bucket = u64_to_bucket(bucket: lru_k.k->p.offset); |
1925 | unsigned cached_sectors; |
1926 | int ret = 0; |
1927 | |
1928 | if (*nr_to_invalidate <= 0) |
1929 | return 1; |
1930 | |
1931 | if (!bch2_dev_bucket_exists(c, pos: bucket)) { |
1932 | prt_str(out: &buf, str: "lru entry points to invalid bucket" ); |
1933 | goto err; |
1934 | } |
1935 | |
1936 | if (bch2_bucket_is_open_safe(c, dev: bucket.inode, bucket: bucket.offset)) |
1937 | return 0; |
1938 | |
1939 | a = bch2_trans_start_alloc_update(trans, iter: &alloc_iter, pos: bucket); |
1940 | ret = PTR_ERR_OR_ZERO(ptr: a); |
1941 | if (ret) |
1942 | goto out; |
1943 | |
1944 | /* We expect harmless races here due to the btree write buffer: */ |
1945 | if (lru_pos_time(pos: lru_iter->pos) != alloc_lru_idx_read(a: a->v)) |
1946 | goto out; |
1947 | |
1948 | BUG_ON(a->v.data_type != BCH_DATA_cached); |
1949 | BUG_ON(a->v.dirty_sectors); |
1950 | |
1951 | if (!a->v.cached_sectors) |
1952 | bch_err(c, "invalidating empty bucket, confused" ); |
1953 | |
1954 | cached_sectors = a->v.cached_sectors; |
1955 | |
1956 | SET_BCH_ALLOC_V4_NEED_INC_GEN(k: &a->v, v: false); |
1957 | a->v.gen++; |
1958 | a->v.data_type = 0; |
1959 | a->v.dirty_sectors = 0; |
1960 | a->v.cached_sectors = 0; |
1961 | a->v.io_time[READ] = atomic64_read(v: &c->io_clock[READ].now); |
1962 | a->v.io_time[WRITE] = atomic64_read(v: &c->io_clock[WRITE].now); |
1963 | |
1964 | ret = bch2_trans_update(trans, &alloc_iter, &a->k_i, |
1965 | BTREE_TRIGGER_BUCKET_INVALIDATE) ?: |
1966 | bch2_trans_commit(trans, NULL, NULL, |
1967 | flags: BCH_WATERMARK_btree| |
1968 | BCH_TRANS_COMMIT_no_enospc); |
1969 | if (ret) |
1970 | goto out; |
1971 | |
1972 | trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); |
1973 | --*nr_to_invalidate; |
1974 | out: |
1975 | bch2_trans_iter_exit(trans, &alloc_iter); |
1976 | printbuf_exit(&buf); |
1977 | return ret; |
1978 | err: |
1979 | prt_str(out: &buf, str: "\n lru key: " ); |
1980 | bch2_bkey_val_to_text(&buf, c, lru_k); |
1981 | |
1982 | prt_str(out: &buf, str: "\n lru entry: " ); |
1983 | bch2_lru_pos_to_text(&buf, lru_iter->pos); |
1984 | |
1985 | prt_str(out: &buf, str: "\n alloc key: " ); |
1986 | if (!a) |
1987 | bch2_bpos_to_text(&buf, bucket); |
1988 | else |
1989 | bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k: &a->k_i)); |
1990 | |
1991 | bch_err(c, "%s" , buf.buf); |
1992 | if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) { |
1993 | bch2_inconsistent_error(c); |
1994 | ret = -EINVAL; |
1995 | } |
1996 | |
1997 | goto out; |
1998 | } |
1999 | |
2000 | static void bch2_do_invalidates_work(struct work_struct *work) |
2001 | { |
2002 | struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work); |
2003 | struct btree_trans *trans = bch2_trans_get(c); |
2004 | int ret = 0; |
2005 | |
2006 | ret = bch2_btree_write_buffer_tryflush(trans); |
2007 | if (ret) |
2008 | goto err; |
2009 | |
2010 | for_each_member_device(c, ca) { |
2011 | s64 nr_to_invalidate = |
2012 | should_invalidate_buckets(ca, u: bch2_dev_usage_read(ca)); |
2013 | |
2014 | ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, |
2015 | lru_pos(ca->dev_idx, 0, 0), |
2016 | lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX), |
2017 | BTREE_ITER_INTENT, k, |
2018 | invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate)); |
2019 | |
2020 | if (ret < 0) { |
2021 | percpu_ref_put(ref: &ca->ref); |
2022 | break; |
2023 | } |
2024 | } |
2025 | err: |
2026 | bch2_trans_put(trans); |
2027 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_invalidate); |
2028 | } |
2029 | |
2030 | void bch2_do_invalidates(struct bch_fs *c) |
2031 | { |
2032 | if (bch2_write_ref_tryget(c, ref: BCH_WRITE_REF_invalidate) && |
2033 | !queue_work(wq: c->write_ref_wq, work: &c->invalidate_work)) |
2034 | bch2_write_ref_put(c, ref: BCH_WRITE_REF_invalidate); |
2035 | } |
2036 | |
2037 | int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, |
2038 | u64 bucket_start, u64 bucket_end) |
2039 | { |
2040 | struct btree_trans *trans = bch2_trans_get(c); |
2041 | struct btree_iter iter; |
2042 | struct bkey_s_c k; |
2043 | struct bkey hole; |
2044 | struct bpos end = POS(ca->dev_idx, bucket_end); |
2045 | struct bch_member *m; |
2046 | unsigned long last_updated = jiffies; |
2047 | int ret; |
2048 | |
2049 | BUG_ON(bucket_start > bucket_end); |
2050 | BUG_ON(bucket_end > ca->mi.nbuckets); |
2051 | |
2052 | bch2_trans_iter_init(trans, iter: &iter, btree_id: BTREE_ID_alloc, |
2053 | POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), |
2054 | flags: BTREE_ITER_PREFETCH); |
2055 | /* |
2056 | * Scan the alloc btree for every bucket on @ca, and add buckets to the |
2057 | * freespace/need_discard/need_gc_gens btrees as needed: |
2058 | */ |
2059 | while (1) { |
2060 | if (last_updated + HZ * 10 < jiffies) { |
2061 | bch_info(ca, "%s: currently at %llu/%llu" , |
2062 | __func__, iter.pos.offset, ca->mi.nbuckets); |
2063 | last_updated = jiffies; |
2064 | } |
2065 | |
2066 | bch2_trans_begin(trans); |
2067 | |
2068 | if (bkey_ge(l: iter.pos, r: end)) { |
2069 | ret = 0; |
2070 | break; |
2071 | } |
2072 | |
2073 | k = bch2_get_key_or_hole(iter: &iter, end, hole: &hole); |
2074 | ret = bkey_err(k); |
2075 | if (ret) |
2076 | goto bkey_err; |
2077 | |
2078 | if (k.k->type) { |
2079 | /* |
2080 | * We process live keys in the alloc btree one at a |
2081 | * time: |
2082 | */ |
2083 | struct bch_alloc_v4 a_convert; |
2084 | const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, convert: &a_convert); |
2085 | |
2086 | ret = bch2_bucket_do_index(trans, alloc_k: k, a, set: true) ?: |
2087 | bch2_trans_commit(trans, NULL, NULL, |
2088 | flags: BCH_TRANS_COMMIT_no_enospc); |
2089 | if (ret) |
2090 | goto bkey_err; |
2091 | |
2092 | bch2_btree_iter_advance(&iter); |
2093 | } else { |
2094 | struct bkey_i *freespace; |
2095 | |
2096 | freespace = bch2_trans_kmalloc(trans, size: sizeof(*freespace)); |
2097 | ret = PTR_ERR_OR_ZERO(ptr: freespace); |
2098 | if (ret) |
2099 | goto bkey_err; |
2100 | |
2101 | bkey_init(k: &freespace->k); |
2102 | freespace->k.type = KEY_TYPE_set; |
2103 | freespace->k.p = k.k->p; |
2104 | freespace->k.size = k.k->size; |
2105 | |
2106 | ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?: |
2107 | bch2_trans_commit(trans, NULL, NULL, |
2108 | flags: BCH_TRANS_COMMIT_no_enospc); |
2109 | if (ret) |
2110 | goto bkey_err; |
2111 | |
2112 | bch2_btree_iter_set_pos(iter: &iter, new_pos: k.k->p); |
2113 | } |
2114 | bkey_err: |
2115 | if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) |
2116 | continue; |
2117 | if (ret) |
2118 | break; |
2119 | } |
2120 | |
2121 | bch2_trans_iter_exit(trans, &iter); |
2122 | bch2_trans_put(trans); |
2123 | |
2124 | if (ret < 0) { |
2125 | bch_err_msg(ca, ret, "initializing free space" ); |
2126 | return ret; |
2127 | } |
2128 | |
2129 | mutex_lock(&c->sb_lock); |
2130 | m = bch2_members_v2_get_mut(sb: c->disk_sb.sb, i: ca->dev_idx); |
2131 | SET_BCH_MEMBER_FREESPACE_INITIALIZED(k: m, v: true); |
2132 | mutex_unlock(lock: &c->sb_lock); |
2133 | |
2134 | return 0; |
2135 | } |
2136 | |
2137 | int bch2_fs_freespace_init(struct bch_fs *c) |
2138 | { |
2139 | int ret = 0; |
2140 | bool doing_init = false; |
2141 | |
2142 | /* |
2143 | * We can crash during the device add path, so we need to check this on |
2144 | * every mount: |
2145 | */ |
2146 | |
2147 | for_each_member_device(c, ca) { |
2148 | if (ca->mi.freespace_initialized) |
2149 | continue; |
2150 | |
2151 | if (!doing_init) { |
2152 | bch_info(c, "initializing freespace" ); |
2153 | doing_init = true; |
2154 | } |
2155 | |
2156 | ret = bch2_dev_freespace_init(c, ca, bucket_start: 0, bucket_end: ca->mi.nbuckets); |
2157 | if (ret) { |
2158 | percpu_ref_put(ref: &ca->ref); |
2159 | bch_err_fn(c, ret); |
2160 | return ret; |
2161 | } |
2162 | } |
2163 | |
2164 | if (doing_init) { |
2165 | mutex_lock(&c->sb_lock); |
2166 | bch2_write_super(c); |
2167 | mutex_unlock(lock: &c->sb_lock); |
2168 | bch_verbose(c, "done initializing freespace" ); |
2169 | } |
2170 | |
2171 | return 0; |
2172 | } |
2173 | |
2174 | /* Bucket IO clocks: */ |
2175 | |
2176 | int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, |
2177 | size_t bucket_nr, int rw) |
2178 | { |
2179 | struct bch_fs *c = trans->c; |
2180 | struct btree_iter iter; |
2181 | struct bkey_i_alloc_v4 *a; |
2182 | u64 now; |
2183 | int ret = 0; |
2184 | |
2185 | a = bch2_trans_start_alloc_update(trans, iter: &iter, POS(dev, bucket_nr)); |
2186 | ret = PTR_ERR_OR_ZERO(ptr: a); |
2187 | if (ret) |
2188 | return ret; |
2189 | |
2190 | now = atomic64_read(v: &c->io_clock[rw].now); |
2191 | if (a->v.io_time[rw] == now) |
2192 | goto out; |
2193 | |
2194 | a->v.io_time[rw] = now; |
2195 | |
2196 | ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: |
2197 | bch2_trans_commit(trans, NULL, NULL, flags: 0); |
2198 | out: |
2199 | bch2_trans_iter_exit(trans, &iter); |
2200 | return ret; |
2201 | } |
2202 | |
2203 | /* Startup/shutdown (ro/rw): */ |
2204 | |
2205 | void bch2_recalc_capacity(struct bch_fs *c) |
2206 | { |
2207 | u64 capacity = 0, reserved_sectors = 0, gc_reserve; |
2208 | unsigned bucket_size_max = 0; |
2209 | unsigned long ra_pages = 0; |
2210 | |
2211 | lockdep_assert_held(&c->state_lock); |
2212 | |
2213 | for_each_online_member(c, ca) { |
2214 | struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; |
2215 | |
2216 | ra_pages += bdi->ra_pages; |
2217 | } |
2218 | |
2219 | bch2_set_ra_pages(c, ra_pages); |
2220 | |
2221 | for_each_rw_member(c, ca) { |
2222 | u64 dev_reserve = 0; |
2223 | |
2224 | /* |
2225 | * We need to reserve buckets (from the number |
2226 | * of currently available buckets) against |
2227 | * foreground writes so that mainly copygc can |
2228 | * make forward progress. |
2229 | * |
2230 | * We need enough to refill the various reserves |
2231 | * from scratch - copygc will use its entire |
2232 | * reserve all at once, then run against when |
2233 | * its reserve is refilled (from the formerly |
2234 | * available buckets). |
2235 | * |
2236 | * This reserve is just used when considering if |
2237 | * allocations for foreground writes must wait - |
2238 | * not -ENOSPC calculations. |
2239 | */ |
2240 | |
2241 | dev_reserve += ca->nr_btree_reserve * 2; |
2242 | dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ |
2243 | |
2244 | dev_reserve += 1; /* btree write point */ |
2245 | dev_reserve += 1; /* copygc write point */ |
2246 | dev_reserve += 1; /* rebalance write point */ |
2247 | |
2248 | dev_reserve *= ca->mi.bucket_size; |
2249 | |
2250 | capacity += bucket_to_sector(ca, b: ca->mi.nbuckets - |
2251 | ca->mi.first_bucket); |
2252 | |
2253 | reserved_sectors += dev_reserve * 2; |
2254 | |
2255 | bucket_size_max = max_t(unsigned, bucket_size_max, |
2256 | ca->mi.bucket_size); |
2257 | } |
2258 | |
2259 | gc_reserve = c->opts.gc_reserve_bytes |
2260 | ? c->opts.gc_reserve_bytes >> 9 |
2261 | : div64_u64(dividend: capacity * c->opts.gc_reserve_percent, divisor: 100); |
2262 | |
2263 | reserved_sectors = max(gc_reserve, reserved_sectors); |
2264 | |
2265 | reserved_sectors = min(reserved_sectors, capacity); |
2266 | |
2267 | c->capacity = capacity - reserved_sectors; |
2268 | |
2269 | c->bucket_size_max = bucket_size_max; |
2270 | |
2271 | /* Wake up case someone was waiting for buckets */ |
2272 | closure_wake_up(list: &c->freelist_wait); |
2273 | } |
2274 | |
2275 | u64 bch2_min_rw_member_capacity(struct bch_fs *c) |
2276 | { |
2277 | u64 ret = U64_MAX; |
2278 | |
2279 | for_each_rw_member(c, ca) |
2280 | ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); |
2281 | return ret; |
2282 | } |
2283 | |
2284 | static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) |
2285 | { |
2286 | struct open_bucket *ob; |
2287 | bool ret = false; |
2288 | |
2289 | for (ob = c->open_buckets; |
2290 | ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); |
2291 | ob++) { |
2292 | spin_lock(lock: &ob->lock); |
2293 | if (ob->valid && !ob->on_partial_list && |
2294 | ob->dev == ca->dev_idx) |
2295 | ret = true; |
2296 | spin_unlock(lock: &ob->lock); |
2297 | } |
2298 | |
2299 | return ret; |
2300 | } |
2301 | |
2302 | /* device goes ro: */ |
2303 | void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) |
2304 | { |
2305 | unsigned i; |
2306 | |
2307 | /* First, remove device from allocation groups: */ |
2308 | |
2309 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) |
2310 | clear_bit(nr: ca->dev_idx, addr: c->rw_devs[i].d); |
2311 | |
2312 | /* |
2313 | * Capacity is calculated based off of devices in allocation groups: |
2314 | */ |
2315 | bch2_recalc_capacity(c); |
2316 | |
2317 | bch2_open_buckets_stop(c, ca, false); |
2318 | |
2319 | /* |
2320 | * Wake up threads that were blocked on allocation, so they can notice |
2321 | * the device can no longer be removed and the capacity has changed: |
2322 | */ |
2323 | closure_wake_up(list: &c->freelist_wait); |
2324 | |
2325 | /* |
2326 | * journal_res_get() can block waiting for free space in the journal - |
2327 | * it needs to notice there may not be devices to allocate from anymore: |
2328 | */ |
2329 | wake_up(&c->journal.wait); |
2330 | |
2331 | /* Now wait for any in flight writes: */ |
2332 | |
2333 | closure_wait_event(&c->open_buckets_wait, |
2334 | !bch2_dev_has_open_write_point(c, ca)); |
2335 | } |
2336 | |
2337 | /* device goes rw: */ |
2338 | void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) |
2339 | { |
2340 | unsigned i; |
2341 | |
2342 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) |
2343 | if (ca->mi.data_allowed & (1 << i)) |
2344 | set_bit(nr: ca->dev_idx, addr: c->rw_devs[i].d); |
2345 | } |
2346 | |
2347 | void bch2_fs_allocator_background_exit(struct bch_fs *c) |
2348 | { |
2349 | darray_exit(&c->discard_buckets_in_flight); |
2350 | } |
2351 | |
2352 | void bch2_fs_allocator_background_init(struct bch_fs *c) |
2353 | { |
2354 | spin_lock_init(&c->freelist_lock); |
2355 | mutex_init(&c->discard_buckets_in_flight_lock); |
2356 | INIT_WORK(&c->discard_work, bch2_do_discards_work); |
2357 | INIT_WORK(&c->discard_fast_work, bch2_do_discards_fast_work); |
2358 | INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work); |
2359 | } |
2360 | |