1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * bcache sysfs interfaces |
4 | * |
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> |
6 | * Copyright 2012 Google, Inc. |
7 | */ |
8 | |
9 | #include "bcache.h" |
10 | #include "sysfs.h" |
11 | #include "btree.h" |
12 | #include "request.h" |
13 | #include "writeback.h" |
14 | #include "features.h" |
15 | |
16 | #include <linux/blkdev.h> |
17 | #include <linux/sort.h> |
18 | #include <linux/sched/clock.h> |
19 | |
20 | extern bool bcache_is_reboot; |
21 | |
22 | /* Default is 0 ("writethrough") */ |
23 | static const char * const bch_cache_modes[] = { |
24 | "writethrough" , |
25 | "writeback" , |
26 | "writearound" , |
27 | "none" , |
28 | NULL |
29 | }; |
30 | |
31 | static const char * const bch_reada_cache_policies[] = { |
32 | "all" , |
33 | "meta-only" , |
34 | NULL |
35 | }; |
36 | |
37 | /* Default is 0 ("auto") */ |
38 | static const char * const bch_stop_on_failure_modes[] = { |
39 | "auto" , |
40 | "always" , |
41 | NULL |
42 | }; |
43 | |
44 | static const char * const cache_replacement_policies[] = { |
45 | "lru" , |
46 | "fifo" , |
47 | "random" , |
48 | NULL |
49 | }; |
50 | |
51 | static const char * const error_actions[] = { |
52 | "unregister" , |
53 | "panic" , |
54 | NULL |
55 | }; |
56 | |
57 | write_attribute(attach); |
58 | write_attribute(detach); |
59 | write_attribute(unregister); |
60 | write_attribute(stop); |
61 | write_attribute(clear_stats); |
62 | write_attribute(trigger_gc); |
63 | write_attribute(prune_cache); |
64 | write_attribute(flash_vol_create); |
65 | |
66 | read_attribute(bucket_size); |
67 | read_attribute(block_size); |
68 | read_attribute(nbuckets); |
69 | read_attribute(tree_depth); |
70 | read_attribute(root_usage_percent); |
71 | read_attribute(priority_stats); |
72 | read_attribute(btree_cache_size); |
73 | read_attribute(btree_cache_max_chain); |
74 | read_attribute(cache_available_percent); |
75 | read_attribute(written); |
76 | read_attribute(btree_written); |
77 | read_attribute(metadata_written); |
78 | read_attribute(active_journal_entries); |
79 | read_attribute(backing_dev_name); |
80 | read_attribute(backing_dev_uuid); |
81 | |
82 | sysfs_time_stats_attribute(btree_gc, sec, ms); |
83 | sysfs_time_stats_attribute(btree_split, sec, us); |
84 | sysfs_time_stats_attribute(btree_sort, ms, us); |
85 | sysfs_time_stats_attribute(btree_read, ms, us); |
86 | |
87 | read_attribute(btree_nodes); |
88 | read_attribute(btree_used_percent); |
89 | read_attribute(average_key_size); |
90 | read_attribute(dirty_data); |
91 | read_attribute(bset_tree_stats); |
92 | read_attribute(feature_compat); |
93 | read_attribute(feature_ro_compat); |
94 | read_attribute(feature_incompat); |
95 | |
96 | read_attribute(state); |
97 | read_attribute(cache_read_races); |
98 | read_attribute(reclaim); |
99 | read_attribute(reclaimed_journal_buckets); |
100 | read_attribute(flush_write); |
101 | read_attribute(writeback_keys_done); |
102 | read_attribute(writeback_keys_failed); |
103 | read_attribute(io_errors); |
104 | read_attribute(congested); |
105 | read_attribute(cutoff_writeback); |
106 | read_attribute(cutoff_writeback_sync); |
107 | rw_attribute(congested_read_threshold_us); |
108 | rw_attribute(congested_write_threshold_us); |
109 | |
110 | rw_attribute(sequential_cutoff); |
111 | rw_attribute(data_csum); |
112 | rw_attribute(cache_mode); |
113 | rw_attribute(readahead_cache_policy); |
114 | rw_attribute(stop_when_cache_set_failed); |
115 | rw_attribute(writeback_metadata); |
116 | rw_attribute(writeback_running); |
117 | rw_attribute(writeback_percent); |
118 | rw_attribute(writeback_delay); |
119 | rw_attribute(writeback_rate); |
120 | rw_attribute(writeback_consider_fragment); |
121 | |
122 | rw_attribute(writeback_rate_update_seconds); |
123 | rw_attribute(writeback_rate_i_term_inverse); |
124 | rw_attribute(writeback_rate_p_term_inverse); |
125 | rw_attribute(writeback_rate_fp_term_low); |
126 | rw_attribute(writeback_rate_fp_term_mid); |
127 | rw_attribute(writeback_rate_fp_term_high); |
128 | rw_attribute(writeback_rate_minimum); |
129 | read_attribute(writeback_rate_debug); |
130 | |
131 | read_attribute(stripe_size); |
132 | read_attribute(partial_stripes_expensive); |
133 | |
134 | rw_attribute(synchronous); |
135 | rw_attribute(journal_delay_ms); |
136 | rw_attribute(io_disable); |
137 | rw_attribute(discard); |
138 | rw_attribute(running); |
139 | rw_attribute(label); |
140 | rw_attribute(errors); |
141 | rw_attribute(io_error_limit); |
142 | rw_attribute(io_error_halflife); |
143 | rw_attribute(verify); |
144 | rw_attribute(bypass_torture_test); |
145 | rw_attribute(key_merging_disabled); |
146 | rw_attribute(gc_always_rewrite); |
147 | rw_attribute(expensive_debug_checks); |
148 | rw_attribute(cache_replacement_policy); |
149 | rw_attribute(btree_shrinker_disabled); |
150 | rw_attribute(copy_gc_enabled); |
151 | rw_attribute(idle_max_writeback_rate); |
152 | rw_attribute(gc_after_writeback); |
153 | rw_attribute(size); |
154 | |
155 | static ssize_t bch_snprint_string_list(char *buf, |
156 | size_t size, |
157 | const char * const list[], |
158 | size_t selected) |
159 | { |
160 | char *out = buf; |
161 | size_t i; |
162 | |
163 | for (i = 0; list[i]; i++) |
164 | out += scnprintf(buf: out, size: buf + size - out, |
165 | fmt: i == selected ? "[%s] " : "%s " , list[i]); |
166 | |
167 | out[-1] = '\n'; |
168 | return out - buf; |
169 | } |
170 | |
171 | SHOW(__bch_cached_dev) |
172 | { |
173 | struct cached_dev *dc = container_of(kobj, struct cached_dev, |
174 | disk.kobj); |
175 | char const *states[] = { "no cache" , "clean" , "dirty" , "inconsistent" }; |
176 | int wb = dc->writeback_running; |
177 | |
178 | #define var(stat) (dc->stat) |
179 | |
180 | if (attr == &sysfs_cache_mode) |
181 | return bch_snprint_string_list(buf, PAGE_SIZE, |
182 | list: bch_cache_modes, |
183 | selected: BDEV_CACHE_MODE(k: &dc->sb)); |
184 | |
185 | if (attr == &sysfs_readahead_cache_policy) |
186 | return bch_snprint_string_list(buf, PAGE_SIZE, |
187 | list: bch_reada_cache_policies, |
188 | selected: dc->cache_readahead_policy); |
189 | |
190 | if (attr == &sysfs_stop_when_cache_set_failed) |
191 | return bch_snprint_string_list(buf, PAGE_SIZE, |
192 | list: bch_stop_on_failure_modes, |
193 | selected: dc->stop_when_cache_set_failed); |
194 | |
195 | |
196 | sysfs_printf(data_csum, "%i" , dc->disk.data_csum); |
197 | var_printf(verify, "%i" ); |
198 | var_printf(bypass_torture_test, "%i" ); |
199 | var_printf(writeback_metadata, "%i" ); |
200 | var_printf(writeback_running, "%i" ); |
201 | var_printf(writeback_consider_fragment, "%i" ); |
202 | var_print(writeback_delay); |
203 | var_print(writeback_percent); |
204 | sysfs_hprint(writeback_rate, |
205 | wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); |
206 | sysfs_printf(io_errors, "%i" , atomic_read(&dc->io_errors)); |
207 | sysfs_printf(io_error_limit, "%i" , dc->error_limit); |
208 | sysfs_printf(io_disable, "%i" , dc->io_disable); |
209 | var_print(writeback_rate_update_seconds); |
210 | var_print(writeback_rate_i_term_inverse); |
211 | var_print(writeback_rate_p_term_inverse); |
212 | var_print(writeback_rate_fp_term_low); |
213 | var_print(writeback_rate_fp_term_mid); |
214 | var_print(writeback_rate_fp_term_high); |
215 | var_print(writeback_rate_minimum); |
216 | |
217 | if (attr == &sysfs_writeback_rate_debug) { |
218 | char rate[20]; |
219 | char dirty[20]; |
220 | char target[20]; |
221 | char proportional[20]; |
222 | char integral[20]; |
223 | char change[20]; |
224 | s64 next_io; |
225 | |
226 | /* |
227 | * Except for dirty and target, other values should |
228 | * be 0 if writeback is not running. |
229 | */ |
230 | bch_hprint(buf: rate, |
231 | v: wb ? atomic_long_read(v: &dc->writeback_rate.rate) << 9 |
232 | : 0); |
233 | bch_hprint(buf: dirty, v: bcache_dev_sectors_dirty(d: &dc->disk) << 9); |
234 | bch_hprint(buf: target, v: dc->writeback_rate_target << 9); |
235 | bch_hprint(buf: proportional, |
236 | v: wb ? dc->writeback_rate_proportional << 9 : 0); |
237 | bch_hprint(buf: integral, |
238 | v: wb ? dc->writeback_rate_integral_scaled << 9 : 0); |
239 | bch_hprint(buf: change, v: wb ? dc->writeback_rate_change << 9 : 0); |
240 | next_io = wb ? div64_s64(dividend: dc->writeback_rate.next-local_clock(), |
241 | NSEC_PER_MSEC) : 0; |
242 | |
243 | return sprintf(buf, |
244 | fmt: "rate:\t\t%s/sec\n" |
245 | "dirty:\t\t%s\n" |
246 | "target:\t\t%s\n" |
247 | "proportional:\t%s\n" |
248 | "integral:\t%s\n" |
249 | "change:\t\t%s/sec\n" |
250 | "next io:\t%llims\n" , |
251 | rate, dirty, target, proportional, |
252 | integral, change, next_io); |
253 | } |
254 | |
255 | sysfs_hprint(dirty_data, |
256 | bcache_dev_sectors_dirty(&dc->disk) << 9); |
257 | |
258 | sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); |
259 | var_printf(partial_stripes_expensive, "%u" ); |
260 | |
261 | var_hprint(sequential_cutoff); |
262 | |
263 | sysfs_print(running, atomic_read(&dc->running)); |
264 | sysfs_print(state, states[BDEV_STATE(&dc->sb)]); |
265 | |
266 | if (attr == &sysfs_label) { |
267 | memcpy(buf, dc->sb.label, SB_LABEL_SIZE); |
268 | buf[SB_LABEL_SIZE + 1] = '\0'; |
269 | strcat(p: buf, q: "\n" ); |
270 | return strlen(buf); |
271 | } |
272 | |
273 | if (attr == &sysfs_backing_dev_name) { |
274 | snprintf(buf, BDEVNAME_SIZE + 1, fmt: "%pg" , dc->bdev); |
275 | strcat(p: buf, q: "\n" ); |
276 | return strlen(buf); |
277 | } |
278 | |
279 | if (attr == &sysfs_backing_dev_uuid) { |
280 | /* convert binary uuid into 36-byte string plus '\0' */ |
281 | snprintf(buf, size: 36+1, fmt: "%pU" , dc->sb.uuid); |
282 | strcat(p: buf, q: "\n" ); |
283 | return strlen(buf); |
284 | } |
285 | |
286 | #undef var |
287 | return 0; |
288 | } |
289 | SHOW_LOCKED(bch_cached_dev) |
290 | |
291 | STORE(__cached_dev) |
292 | { |
293 | struct cached_dev *dc = container_of(kobj, struct cached_dev, |
294 | disk.kobj); |
295 | ssize_t v; |
296 | struct cache_set *c; |
297 | struct kobj_uevent_env *env; |
298 | |
299 | /* no user space access if system is rebooting */ |
300 | if (bcache_is_reboot) |
301 | return -EBUSY; |
302 | |
303 | #define d_strtoul(var) sysfs_strtoul(var, dc->var) |
304 | #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) |
305 | #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) |
306 | |
307 | sysfs_strtoul(data_csum, dc->disk.data_csum); |
308 | d_strtoul(verify); |
309 | sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); |
310 | sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); |
311 | sysfs_strtoul_bool(writeback_running, dc->writeback_running); |
312 | sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); |
313 | sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); |
314 | |
315 | sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, |
316 | 0, bch_cutoff_writeback); |
317 | |
318 | if (attr == &sysfs_writeback_rate) { |
319 | ssize_t ret; |
320 | long int v = atomic_long_read(v: &dc->writeback_rate.rate); |
321 | |
322 | ret = strtoul_safe_clamp(buf, v, 1, INT_MAX); |
323 | |
324 | if (!ret) { |
325 | atomic_long_set(v: &dc->writeback_rate.rate, i: v); |
326 | ret = size; |
327 | } |
328 | |
329 | return ret; |
330 | } |
331 | |
332 | sysfs_strtoul_clamp(writeback_rate_update_seconds, |
333 | dc->writeback_rate_update_seconds, |
334 | 1, WRITEBACK_RATE_UPDATE_SECS_MAX); |
335 | sysfs_strtoul_clamp(writeback_rate_i_term_inverse, |
336 | dc->writeback_rate_i_term_inverse, |
337 | 1, UINT_MAX); |
338 | sysfs_strtoul_clamp(writeback_rate_p_term_inverse, |
339 | dc->writeback_rate_p_term_inverse, |
340 | 1, UINT_MAX); |
341 | sysfs_strtoul_clamp(writeback_rate_fp_term_low, |
342 | dc->writeback_rate_fp_term_low, |
343 | 1, dc->writeback_rate_fp_term_mid - 1); |
344 | sysfs_strtoul_clamp(writeback_rate_fp_term_mid, |
345 | dc->writeback_rate_fp_term_mid, |
346 | dc->writeback_rate_fp_term_low + 1, |
347 | dc->writeback_rate_fp_term_high - 1); |
348 | sysfs_strtoul_clamp(writeback_rate_fp_term_high, |
349 | dc->writeback_rate_fp_term_high, |
350 | dc->writeback_rate_fp_term_mid + 1, UINT_MAX); |
351 | sysfs_strtoul_clamp(writeback_rate_minimum, |
352 | dc->writeback_rate_minimum, |
353 | 1, UINT_MAX); |
354 | |
355 | sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); |
356 | |
357 | if (attr == &sysfs_io_disable) { |
358 | int v = strtoul_or_return(buf); |
359 | |
360 | dc->io_disable = v ? 1 : 0; |
361 | } |
362 | |
363 | sysfs_strtoul_clamp(sequential_cutoff, |
364 | dc->sequential_cutoff, |
365 | 0, UINT_MAX); |
366 | |
367 | if (attr == &sysfs_clear_stats) |
368 | bch_cache_accounting_clear(acc: &dc->accounting); |
369 | |
370 | if (attr == &sysfs_running && |
371 | strtoul_or_return(buf)) { |
372 | v = bch_cached_dev_run(dc); |
373 | if (v) |
374 | return v; |
375 | } |
376 | |
377 | if (attr == &sysfs_cache_mode) { |
378 | v = __sysfs_match_string(array: bch_cache_modes, n: -1, s: buf); |
379 | if (v < 0) |
380 | return v; |
381 | |
382 | if ((unsigned int) v != BDEV_CACHE_MODE(k: &dc->sb)) { |
383 | SET_BDEV_CACHE_MODE(k: &dc->sb, v); |
384 | bch_write_bdev_super(dc, NULL); |
385 | } |
386 | } |
387 | |
388 | if (attr == &sysfs_readahead_cache_policy) { |
389 | v = __sysfs_match_string(array: bch_reada_cache_policies, n: -1, s: buf); |
390 | if (v < 0) |
391 | return v; |
392 | |
393 | if ((unsigned int) v != dc->cache_readahead_policy) |
394 | dc->cache_readahead_policy = v; |
395 | } |
396 | |
397 | if (attr == &sysfs_stop_when_cache_set_failed) { |
398 | v = __sysfs_match_string(array: bch_stop_on_failure_modes, n: -1, s: buf); |
399 | if (v < 0) |
400 | return v; |
401 | |
402 | dc->stop_when_cache_set_failed = v; |
403 | } |
404 | |
405 | if (attr == &sysfs_label) { |
406 | if (size > SB_LABEL_SIZE) |
407 | return -EINVAL; |
408 | memcpy(dc->sb.label, buf, size); |
409 | if (size < SB_LABEL_SIZE) |
410 | dc->sb.label[size] = '\0'; |
411 | if (size && dc->sb.label[size - 1] == '\n') |
412 | dc->sb.label[size - 1] = '\0'; |
413 | bch_write_bdev_super(dc, NULL); |
414 | if (dc->disk.c) { |
415 | memcpy(dc->disk.c->uuids[dc->disk.id].label, |
416 | buf, SB_LABEL_SIZE); |
417 | bch_uuid_write(c: dc->disk.c); |
418 | } |
419 | env = kzalloc(size: sizeof(struct kobj_uevent_env), GFP_KERNEL); |
420 | if (!env) |
421 | return -ENOMEM; |
422 | add_uevent_var(env, format: "DRIVER=bcache" ); |
423 | add_uevent_var(env, format: "CACHED_UUID=%pU" , dc->sb.uuid); |
424 | add_uevent_var(env, format: "CACHED_LABEL=%s" , buf); |
425 | kobject_uevent_env(kobj: &disk_to_dev(dc->disk.disk)->kobj, |
426 | action: KOBJ_CHANGE, |
427 | envp: env->envp); |
428 | kfree(objp: env); |
429 | } |
430 | |
431 | if (attr == &sysfs_attach) { |
432 | uint8_t set_uuid[16]; |
433 | |
434 | if (bch_parse_uuid(s: buf, uuid: set_uuid) < 16) |
435 | return -EINVAL; |
436 | |
437 | v = -ENOENT; |
438 | list_for_each_entry(c, &bch_cache_sets, list) { |
439 | v = bch_cached_dev_attach(dc, c, set_uuid); |
440 | if (!v) |
441 | return size; |
442 | } |
443 | if (v == -ENOENT) |
444 | pr_err("Can't attach %s: cache set not found\n" , buf); |
445 | return v; |
446 | } |
447 | |
448 | if (attr == &sysfs_detach && dc->disk.c) |
449 | bch_cached_dev_detach(dc); |
450 | |
451 | if (attr == &sysfs_stop) |
452 | bcache_device_stop(d: &dc->disk); |
453 | |
454 | return size; |
455 | } |
456 | |
457 | STORE(bch_cached_dev) |
458 | { |
459 | struct cached_dev *dc = container_of(kobj, struct cached_dev, |
460 | disk.kobj); |
461 | |
462 | /* no user space access if system is rebooting */ |
463 | if (bcache_is_reboot) |
464 | return -EBUSY; |
465 | |
466 | mutex_lock(&bch_register_lock); |
467 | size = __cached_dev_store(kobj, attr, buf, size); |
468 | |
469 | if (attr == &sysfs_writeback_running) { |
470 | /* dc->writeback_running changed in __cached_dev_store() */ |
471 | if (IS_ERR_OR_NULL(ptr: dc->writeback_thread)) { |
472 | /* |
473 | * reject setting it to 1 via sysfs if writeback |
474 | * kthread is not created yet. |
475 | */ |
476 | if (dc->writeback_running) { |
477 | dc->writeback_running = false; |
478 | pr_err("%s: failed to run non-existent writeback thread\n" , |
479 | dc->disk.disk->disk_name); |
480 | } |
481 | } else |
482 | /* |
483 | * writeback kthread will check if dc->writeback_running |
484 | * is true or false. |
485 | */ |
486 | bch_writeback_queue(dc); |
487 | } |
488 | |
489 | /* |
490 | * Only set BCACHE_DEV_WB_RUNNING when cached device attached to |
491 | * a cache set, otherwise it doesn't make sense. |
492 | */ |
493 | if (attr == &sysfs_writeback_percent) |
494 | if ((dc->disk.c != NULL) && |
495 | (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, addr: &dc->disk.flags))) |
496 | schedule_delayed_work(dwork: &dc->writeback_rate_update, |
497 | delay: dc->writeback_rate_update_seconds * HZ); |
498 | |
499 | mutex_unlock(lock: &bch_register_lock); |
500 | return size; |
501 | } |
502 | |
503 | static struct attribute *bch_cached_dev_attrs[] = { |
504 | &sysfs_attach, |
505 | &sysfs_detach, |
506 | &sysfs_stop, |
507 | #if 0 |
508 | &sysfs_data_csum, |
509 | #endif |
510 | &sysfs_cache_mode, |
511 | &sysfs_readahead_cache_policy, |
512 | &sysfs_stop_when_cache_set_failed, |
513 | &sysfs_writeback_metadata, |
514 | &sysfs_writeback_running, |
515 | &sysfs_writeback_delay, |
516 | &sysfs_writeback_percent, |
517 | &sysfs_writeback_rate, |
518 | &sysfs_writeback_consider_fragment, |
519 | &sysfs_writeback_rate_update_seconds, |
520 | &sysfs_writeback_rate_i_term_inverse, |
521 | &sysfs_writeback_rate_p_term_inverse, |
522 | &sysfs_writeback_rate_fp_term_low, |
523 | &sysfs_writeback_rate_fp_term_mid, |
524 | &sysfs_writeback_rate_fp_term_high, |
525 | &sysfs_writeback_rate_minimum, |
526 | &sysfs_writeback_rate_debug, |
527 | &sysfs_io_errors, |
528 | &sysfs_io_error_limit, |
529 | &sysfs_io_disable, |
530 | &sysfs_dirty_data, |
531 | &sysfs_stripe_size, |
532 | &sysfs_partial_stripes_expensive, |
533 | &sysfs_sequential_cutoff, |
534 | &sysfs_clear_stats, |
535 | &sysfs_running, |
536 | &sysfs_state, |
537 | &sysfs_label, |
538 | #ifdef CONFIG_BCACHE_DEBUG |
539 | &sysfs_verify, |
540 | &sysfs_bypass_torture_test, |
541 | #endif |
542 | &sysfs_backing_dev_name, |
543 | &sysfs_backing_dev_uuid, |
544 | NULL |
545 | }; |
546 | ATTRIBUTE_GROUPS(bch_cached_dev); |
547 | KTYPE(bch_cached_dev); |
548 | |
549 | SHOW(bch_flash_dev) |
550 | { |
551 | struct bcache_device *d = container_of(kobj, struct bcache_device, |
552 | kobj); |
553 | struct uuid_entry *u = &d->c->uuids[d->id]; |
554 | |
555 | sysfs_printf(data_csum, "%i" , d->data_csum); |
556 | sysfs_hprint(size, u->sectors << 9); |
557 | |
558 | if (attr == &sysfs_label) { |
559 | memcpy(buf, u->label, SB_LABEL_SIZE); |
560 | buf[SB_LABEL_SIZE + 1] = '\0'; |
561 | strcat(p: buf, q: "\n" ); |
562 | return strlen(buf); |
563 | } |
564 | |
565 | return 0; |
566 | } |
567 | |
568 | STORE(__bch_flash_dev) |
569 | { |
570 | struct bcache_device *d = container_of(kobj, struct bcache_device, |
571 | kobj); |
572 | struct uuid_entry *u = &d->c->uuids[d->id]; |
573 | |
574 | /* no user space access if system is rebooting */ |
575 | if (bcache_is_reboot) |
576 | return -EBUSY; |
577 | |
578 | sysfs_strtoul(data_csum, d->data_csum); |
579 | |
580 | if (attr == &sysfs_size) { |
581 | uint64_t v; |
582 | |
583 | strtoi_h_or_return(buf, v); |
584 | |
585 | u->sectors = v >> 9; |
586 | bch_uuid_write(c: d->c); |
587 | set_capacity(disk: d->disk, size: u->sectors); |
588 | } |
589 | |
590 | if (attr == &sysfs_label) { |
591 | memcpy(u->label, buf, SB_LABEL_SIZE); |
592 | bch_uuid_write(c: d->c); |
593 | } |
594 | |
595 | if (attr == &sysfs_unregister) { |
596 | set_bit(BCACHE_DEV_DETACHING, addr: &d->flags); |
597 | bcache_device_stop(d); |
598 | } |
599 | |
600 | return size; |
601 | } |
602 | STORE_LOCKED(bch_flash_dev) |
603 | |
604 | static struct attribute *bch_flash_dev_attrs[] = { |
605 | &sysfs_unregister, |
606 | #if 0 |
607 | &sysfs_data_csum, |
608 | #endif |
609 | &sysfs_label, |
610 | &sysfs_size, |
611 | NULL |
612 | }; |
613 | ATTRIBUTE_GROUPS(bch_flash_dev); |
614 | KTYPE(bch_flash_dev); |
615 | |
616 | struct bset_stats_op { |
617 | struct btree_op op; |
618 | size_t nodes; |
619 | struct bset_stats stats; |
620 | }; |
621 | |
622 | static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) |
623 | { |
624 | struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); |
625 | |
626 | op->nodes++; |
627 | bch_btree_keys_stats(b: &b->keys, state: &op->stats); |
628 | |
629 | return MAP_CONTINUE; |
630 | } |
631 | |
632 | static int bch_bset_print_stats(struct cache_set *c, char *buf) |
633 | { |
634 | struct bset_stats_op op; |
635 | int ret; |
636 | |
637 | memset(&op, 0, sizeof(op)); |
638 | bch_btree_op_init(op: &op.op, write_lock_level: -1); |
639 | |
640 | ret = bch_btree_map_nodes(op: &op.op, c, from: &ZERO_KEY, fn: bch_btree_bset_stats); |
641 | if (ret < 0) |
642 | return ret; |
643 | |
644 | return snprintf(buf, PAGE_SIZE, |
645 | fmt: "btree nodes: %zu\n" |
646 | "written sets: %zu\n" |
647 | "unwritten sets: %zu\n" |
648 | "written key bytes: %zu\n" |
649 | "unwritten key bytes: %zu\n" |
650 | "floats: %zu\n" |
651 | "failed: %zu\n" , |
652 | op.nodes, |
653 | op.stats.sets_written, op.stats.sets_unwritten, |
654 | op.stats.bytes_written, op.stats.bytes_unwritten, |
655 | op.stats.floats, op.stats.failed); |
656 | } |
657 | |
658 | static unsigned int bch_root_usage(struct cache_set *c) |
659 | { |
660 | unsigned int bytes = 0; |
661 | struct bkey *k; |
662 | struct btree *b; |
663 | struct btree_iter iter; |
664 | |
665 | goto lock_root; |
666 | |
667 | do { |
668 | rw_unlock(w: false, b); |
669 | lock_root: |
670 | b = c->root; |
671 | rw_lock(w: false, b, level: b->level); |
672 | } while (b != c->root); |
673 | |
674 | for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) |
675 | bytes += bkey_bytes(k); |
676 | |
677 | rw_unlock(w: false, b); |
678 | |
679 | return (bytes * 100) / btree_bytes(c); |
680 | } |
681 | |
682 | static size_t bch_cache_size(struct cache_set *c) |
683 | { |
684 | size_t ret = 0; |
685 | struct btree *b; |
686 | |
687 | mutex_lock(&c->bucket_lock); |
688 | list_for_each_entry(b, &c->btree_cache, list) |
689 | ret += 1 << (b->keys.page_order + PAGE_SHIFT); |
690 | |
691 | mutex_unlock(lock: &c->bucket_lock); |
692 | return ret; |
693 | } |
694 | |
695 | static unsigned int bch_cache_max_chain(struct cache_set *c) |
696 | { |
697 | unsigned int ret = 0; |
698 | struct hlist_head *h; |
699 | |
700 | mutex_lock(&c->bucket_lock); |
701 | |
702 | for (h = c->bucket_hash; |
703 | h < c->bucket_hash + (1 << BUCKET_HASH_BITS); |
704 | h++) { |
705 | ret = max(ret, hlist_count_nodes(h)); |
706 | } |
707 | |
708 | mutex_unlock(lock: &c->bucket_lock); |
709 | return ret; |
710 | } |
711 | |
712 | static unsigned int bch_btree_used(struct cache_set *c) |
713 | { |
714 | return div64_u64(dividend: c->gc_stats.key_bytes * 100, |
715 | divisor: (c->gc_stats.nodes ?: 1) * btree_bytes(c)); |
716 | } |
717 | |
718 | static unsigned int bch_average_key_size(struct cache_set *c) |
719 | { |
720 | return c->gc_stats.nkeys |
721 | ? div64_u64(dividend: c->gc_stats.data, divisor: c->gc_stats.nkeys) |
722 | : 0; |
723 | } |
724 | |
725 | SHOW(__bch_cache_set) |
726 | { |
727 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); |
728 | |
729 | sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb)); |
730 | sysfs_print(journal_delay_ms, c->journal_delay_ms); |
731 | sysfs_hprint(bucket_size, bucket_bytes(c->cache)); |
732 | sysfs_hprint(block_size, block_bytes(c->cache)); |
733 | sysfs_print(tree_depth, c->root->level); |
734 | sysfs_print(root_usage_percent, bch_root_usage(c)); |
735 | |
736 | sysfs_hprint(btree_cache_size, bch_cache_size(c)); |
737 | sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); |
738 | sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); |
739 | |
740 | sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); |
741 | sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); |
742 | sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); |
743 | sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); |
744 | |
745 | sysfs_print(btree_used_percent, bch_btree_used(c)); |
746 | sysfs_print(btree_nodes, c->gc_stats.nodes); |
747 | sysfs_hprint(average_key_size, bch_average_key_size(c)); |
748 | |
749 | sysfs_print(cache_read_races, |
750 | atomic_long_read(&c->cache_read_races)); |
751 | |
752 | sysfs_print(reclaim, |
753 | atomic_long_read(&c->reclaim)); |
754 | |
755 | sysfs_print(reclaimed_journal_buckets, |
756 | atomic_long_read(&c->reclaimed_journal_buckets)); |
757 | |
758 | sysfs_print(flush_write, |
759 | atomic_long_read(&c->flush_write)); |
760 | |
761 | sysfs_print(writeback_keys_done, |
762 | atomic_long_read(&c->writeback_keys_done)); |
763 | sysfs_print(writeback_keys_failed, |
764 | atomic_long_read(&c->writeback_keys_failed)); |
765 | |
766 | if (attr == &sysfs_errors) |
767 | return bch_snprint_string_list(buf, PAGE_SIZE, list: error_actions, |
768 | selected: c->on_error); |
769 | |
770 | /* See count_io_errors for why 88 */ |
771 | sysfs_print(io_error_halflife, c->error_decay * 88); |
772 | sysfs_print(io_error_limit, c->error_limit); |
773 | |
774 | sysfs_hprint(congested, |
775 | ((uint64_t) bch_get_congested(c)) << 9); |
776 | sysfs_print(congested_read_threshold_us, |
777 | c->congested_read_threshold_us); |
778 | sysfs_print(congested_write_threshold_us, |
779 | c->congested_write_threshold_us); |
780 | |
781 | sysfs_print(cutoff_writeback, bch_cutoff_writeback); |
782 | sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync); |
783 | |
784 | sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); |
785 | sysfs_printf(verify, "%i" , c->verify); |
786 | sysfs_printf(key_merging_disabled, "%i" , c->key_merging_disabled); |
787 | sysfs_printf(expensive_debug_checks, |
788 | "%i" , c->expensive_debug_checks); |
789 | sysfs_printf(gc_always_rewrite, "%i" , c->gc_always_rewrite); |
790 | sysfs_printf(btree_shrinker_disabled, "%i" , c->shrinker_disabled); |
791 | sysfs_printf(copy_gc_enabled, "%i" , c->copy_gc_enabled); |
792 | sysfs_printf(idle_max_writeback_rate, "%i" , |
793 | c->idle_max_writeback_rate_enabled); |
794 | sysfs_printf(gc_after_writeback, "%i" , c->gc_after_writeback); |
795 | sysfs_printf(io_disable, "%i" , |
796 | test_bit(CACHE_SET_IO_DISABLE, &c->flags)); |
797 | |
798 | if (attr == &sysfs_bset_tree_stats) |
799 | return bch_bset_print_stats(c, buf); |
800 | |
801 | if (attr == &sysfs_feature_compat) |
802 | return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE); |
803 | if (attr == &sysfs_feature_ro_compat) |
804 | return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE); |
805 | if (attr == &sysfs_feature_incompat) |
806 | return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE); |
807 | |
808 | return 0; |
809 | } |
810 | SHOW_LOCKED(bch_cache_set) |
811 | |
812 | STORE(__bch_cache_set) |
813 | { |
814 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); |
815 | ssize_t v; |
816 | |
817 | /* no user space access if system is rebooting */ |
818 | if (bcache_is_reboot) |
819 | return -EBUSY; |
820 | |
821 | if (attr == &sysfs_unregister) |
822 | bch_cache_set_unregister(c); |
823 | |
824 | if (attr == &sysfs_stop) |
825 | bch_cache_set_stop(c); |
826 | |
827 | if (attr == &sysfs_synchronous) { |
828 | bool sync = strtoul_or_return(buf); |
829 | |
830 | if (sync != CACHE_SYNC(k: &c->cache->sb)) { |
831 | SET_CACHE_SYNC(k: &c->cache->sb, v: sync); |
832 | bcache_write_super(c); |
833 | } |
834 | } |
835 | |
836 | if (attr == &sysfs_flash_vol_create) { |
837 | int r; |
838 | uint64_t v; |
839 | |
840 | strtoi_h_or_return(buf, v); |
841 | |
842 | r = bch_flash_dev_create(c, size: v); |
843 | if (r) |
844 | return r; |
845 | } |
846 | |
847 | if (attr == &sysfs_clear_stats) { |
848 | atomic_long_set(v: &c->writeback_keys_done, i: 0); |
849 | atomic_long_set(v: &c->writeback_keys_failed, i: 0); |
850 | |
851 | memset(&c->gc_stats, 0, sizeof(struct gc_stat)); |
852 | bch_cache_accounting_clear(acc: &c->accounting); |
853 | } |
854 | |
855 | if (attr == &sysfs_trigger_gc) |
856 | force_wake_up_gc(c); |
857 | |
858 | if (attr == &sysfs_prune_cache) { |
859 | struct shrink_control sc; |
860 | |
861 | sc.gfp_mask = GFP_KERNEL; |
862 | sc.nr_to_scan = strtoul_or_return(buf); |
863 | if (c->shrink) |
864 | c->shrink->scan_objects(c->shrink, &sc); |
865 | } |
866 | |
867 | sysfs_strtoul_clamp(congested_read_threshold_us, |
868 | c->congested_read_threshold_us, |
869 | 0, UINT_MAX); |
870 | sysfs_strtoul_clamp(congested_write_threshold_us, |
871 | c->congested_write_threshold_us, |
872 | 0, UINT_MAX); |
873 | |
874 | if (attr == &sysfs_errors) { |
875 | v = __sysfs_match_string(array: error_actions, n: -1, s: buf); |
876 | if (v < 0) |
877 | return v; |
878 | |
879 | c->on_error = v; |
880 | } |
881 | |
882 | sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX); |
883 | |
884 | /* See count_io_errors() for why 88 */ |
885 | if (attr == &sysfs_io_error_halflife) { |
886 | unsigned long v = 0; |
887 | ssize_t ret; |
888 | |
889 | ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX); |
890 | if (!ret) { |
891 | c->error_decay = v / 88; |
892 | return size; |
893 | } |
894 | return ret; |
895 | } |
896 | |
897 | if (attr == &sysfs_io_disable) { |
898 | v = strtoul_or_return(buf); |
899 | if (v) { |
900 | if (test_and_set_bit(CACHE_SET_IO_DISABLE, |
901 | addr: &c->flags)) |
902 | pr_warn("CACHE_SET_IO_DISABLE already set\n" ); |
903 | } else { |
904 | if (!test_and_clear_bit(CACHE_SET_IO_DISABLE, |
905 | addr: &c->flags)) |
906 | pr_warn("CACHE_SET_IO_DISABLE already cleared\n" ); |
907 | } |
908 | } |
909 | |
910 | sysfs_strtoul_clamp(journal_delay_ms, |
911 | c->journal_delay_ms, |
912 | 0, USHRT_MAX); |
913 | sysfs_strtoul_bool(verify, c->verify); |
914 | sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled); |
915 | sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); |
916 | sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite); |
917 | sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled); |
918 | sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled); |
919 | sysfs_strtoul_bool(idle_max_writeback_rate, |
920 | c->idle_max_writeback_rate_enabled); |
921 | |
922 | /* |
923 | * write gc_after_writeback here may overwrite an already set |
924 | * BCH_DO_AUTO_GC, it doesn't matter because this flag will be |
925 | * set in next chance. |
926 | */ |
927 | sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1); |
928 | |
929 | return size; |
930 | } |
931 | STORE_LOCKED(bch_cache_set) |
932 | |
933 | SHOW(bch_cache_set_internal) |
934 | { |
935 | struct cache_set *c = container_of(kobj, struct cache_set, internal); |
936 | |
937 | return bch_cache_set_show(kobj: &c->kobj, attr, buf); |
938 | } |
939 | |
940 | STORE(bch_cache_set_internal) |
941 | { |
942 | struct cache_set *c = container_of(kobj, struct cache_set, internal); |
943 | |
944 | /* no user space access if system is rebooting */ |
945 | if (bcache_is_reboot) |
946 | return -EBUSY; |
947 | |
948 | return bch_cache_set_store(kobj: &c->kobj, attr, buf, size); |
949 | } |
950 | |
951 | static void bch_cache_set_internal_release(struct kobject *k) |
952 | { |
953 | } |
954 | |
955 | static struct attribute *bch_cache_set_attrs[] = { |
956 | &sysfs_unregister, |
957 | &sysfs_stop, |
958 | &sysfs_synchronous, |
959 | &sysfs_journal_delay_ms, |
960 | &sysfs_flash_vol_create, |
961 | |
962 | &sysfs_bucket_size, |
963 | &sysfs_block_size, |
964 | &sysfs_tree_depth, |
965 | &sysfs_root_usage_percent, |
966 | &sysfs_btree_cache_size, |
967 | &sysfs_cache_available_percent, |
968 | |
969 | &sysfs_average_key_size, |
970 | |
971 | &sysfs_errors, |
972 | &sysfs_io_error_limit, |
973 | &sysfs_io_error_halflife, |
974 | &sysfs_congested, |
975 | &sysfs_congested_read_threshold_us, |
976 | &sysfs_congested_write_threshold_us, |
977 | &sysfs_clear_stats, |
978 | NULL |
979 | }; |
980 | ATTRIBUTE_GROUPS(bch_cache_set); |
981 | KTYPE(bch_cache_set); |
982 | |
983 | static struct attribute *bch_cache_set_internal_attrs[] = { |
984 | &sysfs_active_journal_entries, |
985 | |
986 | sysfs_time_stats_attribute_list(btree_gc, sec, ms) |
987 | sysfs_time_stats_attribute_list(btree_split, sec, us) |
988 | sysfs_time_stats_attribute_list(btree_sort, ms, us) |
989 | sysfs_time_stats_attribute_list(btree_read, ms, us) |
990 | |
991 | &sysfs_btree_nodes, |
992 | &sysfs_btree_used_percent, |
993 | &sysfs_btree_cache_max_chain, |
994 | |
995 | &sysfs_bset_tree_stats, |
996 | &sysfs_cache_read_races, |
997 | &sysfs_reclaim, |
998 | &sysfs_reclaimed_journal_buckets, |
999 | &sysfs_flush_write, |
1000 | &sysfs_writeback_keys_done, |
1001 | &sysfs_writeback_keys_failed, |
1002 | |
1003 | &sysfs_trigger_gc, |
1004 | &sysfs_prune_cache, |
1005 | #ifdef CONFIG_BCACHE_DEBUG |
1006 | &sysfs_verify, |
1007 | &sysfs_key_merging_disabled, |
1008 | &sysfs_expensive_debug_checks, |
1009 | #endif |
1010 | &sysfs_gc_always_rewrite, |
1011 | &sysfs_btree_shrinker_disabled, |
1012 | &sysfs_copy_gc_enabled, |
1013 | &sysfs_idle_max_writeback_rate, |
1014 | &sysfs_gc_after_writeback, |
1015 | &sysfs_io_disable, |
1016 | &sysfs_cutoff_writeback, |
1017 | &sysfs_cutoff_writeback_sync, |
1018 | &sysfs_feature_compat, |
1019 | &sysfs_feature_ro_compat, |
1020 | &sysfs_feature_incompat, |
1021 | NULL |
1022 | }; |
1023 | ATTRIBUTE_GROUPS(bch_cache_set_internal); |
1024 | KTYPE(bch_cache_set_internal); |
1025 | |
1026 | static int __bch_cache_cmp(const void *l, const void *r) |
1027 | { |
1028 | cond_resched(); |
1029 | return *((uint16_t *)r) - *((uint16_t *)l); |
1030 | } |
1031 | |
1032 | SHOW(__bch_cache) |
1033 | { |
1034 | struct cache *ca = container_of(kobj, struct cache, kobj); |
1035 | |
1036 | sysfs_hprint(bucket_size, bucket_bytes(ca)); |
1037 | sysfs_hprint(block_size, block_bytes(ca)); |
1038 | sysfs_print(nbuckets, ca->sb.nbuckets); |
1039 | sysfs_print(discard, ca->discard); |
1040 | sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); |
1041 | sysfs_hprint(btree_written, |
1042 | atomic_long_read(&ca->btree_sectors_written) << 9); |
1043 | sysfs_hprint(metadata_written, |
1044 | (atomic_long_read(&ca->meta_sectors_written) + |
1045 | atomic_long_read(&ca->btree_sectors_written)) << 9); |
1046 | |
1047 | sysfs_print(io_errors, |
1048 | atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); |
1049 | |
1050 | if (attr == &sysfs_cache_replacement_policy) |
1051 | return bch_snprint_string_list(buf, PAGE_SIZE, |
1052 | list: cache_replacement_policies, |
1053 | selected: CACHE_REPLACEMENT(k: &ca->sb)); |
1054 | |
1055 | if (attr == &sysfs_priority_stats) { |
1056 | struct bucket *b; |
1057 | size_t n = ca->sb.nbuckets, i; |
1058 | size_t unused = 0, available = 0, dirty = 0, meta = 0; |
1059 | uint64_t sum = 0; |
1060 | /* Compute 31 quantiles */ |
1061 | uint16_t q[31], *p, *cached; |
1062 | ssize_t ret; |
1063 | |
1064 | cached = p = vmalloc(array_size(sizeof(uint16_t), |
1065 | ca->sb.nbuckets)); |
1066 | if (!p) |
1067 | return -ENOMEM; |
1068 | |
1069 | mutex_lock(&ca->set->bucket_lock); |
1070 | for_each_bucket(b, ca) { |
1071 | if (!GC_SECTORS_USED(k: b)) |
1072 | unused++; |
1073 | if (GC_MARK(k: b) == GC_MARK_RECLAIMABLE) |
1074 | available++; |
1075 | if (GC_MARK(k: b) == GC_MARK_DIRTY) |
1076 | dirty++; |
1077 | if (GC_MARK(k: b) == GC_MARK_METADATA) |
1078 | meta++; |
1079 | } |
1080 | |
1081 | for (i = ca->sb.first_bucket; i < n; i++) |
1082 | p[i] = ca->buckets[i].prio; |
1083 | mutex_unlock(lock: &ca->set->bucket_lock); |
1084 | |
1085 | sort(base: p, num: n, size: sizeof(uint16_t), cmp_func: __bch_cache_cmp, NULL); |
1086 | |
1087 | while (n && |
1088 | !cached[n - 1]) |
1089 | --n; |
1090 | |
1091 | while (cached < p + n && |
1092 | *cached == BTREE_PRIO) { |
1093 | cached++; |
1094 | n--; |
1095 | } |
1096 | |
1097 | for (i = 0; i < n; i++) |
1098 | sum += INITIAL_PRIO - cached[i]; |
1099 | |
1100 | if (n) |
1101 | sum = div64_u64(dividend: sum, divisor: n); |
1102 | |
1103 | for (i = 0; i < ARRAY_SIZE(q); i++) |
1104 | q[i] = INITIAL_PRIO - cached[n * (i + 1) / |
1105 | (ARRAY_SIZE(q) + 1)]; |
1106 | |
1107 | vfree(addr: p); |
1108 | |
1109 | ret = sysfs_emit(buf, |
1110 | fmt: "Unused: %zu%%\n" |
1111 | "Clean: %zu%%\n" |
1112 | "Dirty: %zu%%\n" |
1113 | "Metadata: %zu%%\n" |
1114 | "Average: %llu\n" |
1115 | "Sectors per Q: %zu\n" |
1116 | "Quantiles: [" , |
1117 | unused * 100 / (size_t) ca->sb.nbuckets, |
1118 | available * 100 / (size_t) ca->sb.nbuckets, |
1119 | dirty * 100 / (size_t) ca->sb.nbuckets, |
1120 | meta * 100 / (size_t) ca->sb.nbuckets, sum, |
1121 | n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); |
1122 | |
1123 | for (i = 0; i < ARRAY_SIZE(q); i++) |
1124 | ret += sysfs_emit_at(buf, at: ret, fmt: "%u " , q[i]); |
1125 | ret--; |
1126 | |
1127 | ret += sysfs_emit_at(buf, at: ret, fmt: "]\n" ); |
1128 | |
1129 | return ret; |
1130 | } |
1131 | |
1132 | return 0; |
1133 | } |
1134 | SHOW_LOCKED(bch_cache) |
1135 | |
1136 | STORE(__bch_cache) |
1137 | { |
1138 | struct cache *ca = container_of(kobj, struct cache, kobj); |
1139 | ssize_t v; |
1140 | |
1141 | /* no user space access if system is rebooting */ |
1142 | if (bcache_is_reboot) |
1143 | return -EBUSY; |
1144 | |
1145 | if (attr == &sysfs_discard) { |
1146 | bool v = strtoul_or_return(buf); |
1147 | |
1148 | if (bdev_max_discard_sectors(bdev: ca->bdev)) |
1149 | ca->discard = v; |
1150 | |
1151 | if (v != CACHE_DISCARD(k: &ca->sb)) { |
1152 | SET_CACHE_DISCARD(k: &ca->sb, v); |
1153 | bcache_write_super(c: ca->set); |
1154 | } |
1155 | } |
1156 | |
1157 | if (attr == &sysfs_cache_replacement_policy) { |
1158 | v = __sysfs_match_string(array: cache_replacement_policies, n: -1, s: buf); |
1159 | if (v < 0) |
1160 | return v; |
1161 | |
1162 | if ((unsigned int) v != CACHE_REPLACEMENT(k: &ca->sb)) { |
1163 | mutex_lock(&ca->set->bucket_lock); |
1164 | SET_CACHE_REPLACEMENT(k: &ca->sb, v); |
1165 | mutex_unlock(lock: &ca->set->bucket_lock); |
1166 | |
1167 | bcache_write_super(c: ca->set); |
1168 | } |
1169 | } |
1170 | |
1171 | if (attr == &sysfs_clear_stats) { |
1172 | atomic_long_set(v: &ca->sectors_written, i: 0); |
1173 | atomic_long_set(v: &ca->btree_sectors_written, i: 0); |
1174 | atomic_long_set(v: &ca->meta_sectors_written, i: 0); |
1175 | atomic_set(v: &ca->io_count, i: 0); |
1176 | atomic_set(v: &ca->io_errors, i: 0); |
1177 | } |
1178 | |
1179 | return size; |
1180 | } |
1181 | STORE_LOCKED(bch_cache) |
1182 | |
1183 | static struct attribute *bch_cache_attrs[] = { |
1184 | &sysfs_bucket_size, |
1185 | &sysfs_block_size, |
1186 | &sysfs_nbuckets, |
1187 | &sysfs_priority_stats, |
1188 | &sysfs_discard, |
1189 | &sysfs_written, |
1190 | &sysfs_btree_written, |
1191 | &sysfs_metadata_written, |
1192 | &sysfs_io_errors, |
1193 | &sysfs_clear_stats, |
1194 | &sysfs_cache_replacement_policy, |
1195 | NULL |
1196 | }; |
1197 | ATTRIBUTE_GROUPS(bch_cache); |
1198 | KTYPE(bch_cache); |
1199 | |