1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/ext4/page-io.c |
4 | * |
5 | * This contains the new page_io functions for ext4 |
6 | * |
7 | * Written by Theodore Ts'o, 2010. |
8 | */ |
9 | |
10 | #include <linux/fs.h> |
11 | #include <linux/time.h> |
12 | #include <linux/highuid.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/quotaops.h> |
15 | #include <linux/string.h> |
16 | #include <linux/buffer_head.h> |
17 | #include <linux/writeback.h> |
18 | #include <linux/pagevec.h> |
19 | #include <linux/mpage.h> |
20 | #include <linux/namei.h> |
21 | #include <linux/uio.h> |
22 | #include <linux/bio.h> |
23 | #include <linux/workqueue.h> |
24 | #include <linux/kernel.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/sched/mm.h> |
28 | |
29 | #include "ext4_jbd2.h" |
30 | #include "xattr.h" |
31 | #include "acl.h" |
32 | |
33 | static struct kmem_cache *io_end_cachep; |
34 | static struct kmem_cache *io_end_vec_cachep; |
35 | |
36 | int __init ext4_init_pageio(void) |
37 | { |
38 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); |
39 | if (io_end_cachep == NULL) |
40 | return -ENOMEM; |
41 | |
42 | io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0); |
43 | if (io_end_vec_cachep == NULL) { |
44 | kmem_cache_destroy(s: io_end_cachep); |
45 | return -ENOMEM; |
46 | } |
47 | return 0; |
48 | } |
49 | |
50 | void ext4_exit_pageio(void) |
51 | { |
52 | kmem_cache_destroy(s: io_end_cachep); |
53 | kmem_cache_destroy(s: io_end_vec_cachep); |
54 | } |
55 | |
56 | struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end) |
57 | { |
58 | struct ext4_io_end_vec *io_end_vec; |
59 | |
60 | io_end_vec = kmem_cache_zalloc(k: io_end_vec_cachep, GFP_NOFS); |
61 | if (!io_end_vec) |
62 | return ERR_PTR(error: -ENOMEM); |
63 | INIT_LIST_HEAD(list: &io_end_vec->list); |
64 | list_add_tail(new: &io_end_vec->list, head: &io_end->list_vec); |
65 | return io_end_vec; |
66 | } |
67 | |
68 | static void ext4_free_io_end_vec(ext4_io_end_t *io_end) |
69 | { |
70 | struct ext4_io_end_vec *io_end_vec, *tmp; |
71 | |
72 | if (list_empty(head: &io_end->list_vec)) |
73 | return; |
74 | list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) { |
75 | list_del(entry: &io_end_vec->list); |
76 | kmem_cache_free(s: io_end_vec_cachep, objp: io_end_vec); |
77 | } |
78 | } |
79 | |
80 | struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end) |
81 | { |
82 | BUG_ON(list_empty(&io_end->list_vec)); |
83 | return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list); |
84 | } |
85 | |
86 | /* |
87 | * Print an buffer I/O error compatible with the fs/buffer.c. This |
88 | * provides compatibility with dmesg scrapers that look for a specific |
89 | * buffer I/O error message. We really need a unified error reporting |
90 | * structure to userspace ala Digital Unix's uerf system, but it's |
91 | * probably not going to happen in my lifetime, due to LKML politics... |
92 | */ |
93 | static void buffer_io_error(struct buffer_head *bh) |
94 | { |
95 | printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n" , |
96 | bh->b_bdev, |
97 | (unsigned long long)bh->b_blocknr); |
98 | } |
99 | |
100 | static void ext4_finish_bio(struct bio *bio) |
101 | { |
102 | struct folio_iter fi; |
103 | |
104 | bio_for_each_folio_all(fi, bio) { |
105 | struct folio *folio = fi.folio; |
106 | struct folio *io_folio = NULL; |
107 | struct buffer_head *bh, *head; |
108 | size_t bio_start = fi.offset; |
109 | size_t bio_end = bio_start + fi.length; |
110 | unsigned under_io = 0; |
111 | unsigned long flags; |
112 | |
113 | if (fscrypt_is_bounce_folio(folio)) { |
114 | io_folio = folio; |
115 | folio = fscrypt_pagecache_folio(bounce_folio: folio); |
116 | } |
117 | |
118 | if (bio->bi_status) { |
119 | int err = blk_status_to_errno(status: bio->bi_status); |
120 | folio_set_error(folio); |
121 | mapping_set_error(mapping: folio->mapping, error: err); |
122 | } |
123 | bh = head = folio_buffers(folio); |
124 | /* |
125 | * We check all buffers in the folio under b_uptodate_lock |
126 | * to avoid races with other end io clearing async_write flags |
127 | */ |
128 | spin_lock_irqsave(&head->b_uptodate_lock, flags); |
129 | do { |
130 | if (bh_offset(bh) < bio_start || |
131 | bh_offset(bh) + bh->b_size > bio_end) { |
132 | if (buffer_async_write(bh)) |
133 | under_io++; |
134 | continue; |
135 | } |
136 | clear_buffer_async_write(bh); |
137 | if (bio->bi_status) { |
138 | set_buffer_write_io_error(bh); |
139 | buffer_io_error(bh); |
140 | } |
141 | } while ((bh = bh->b_this_page) != head); |
142 | spin_unlock_irqrestore(lock: &head->b_uptodate_lock, flags); |
143 | if (!under_io) { |
144 | fscrypt_free_bounce_page(bounce_page: &io_folio->page); |
145 | folio_end_writeback(folio); |
146 | } |
147 | } |
148 | } |
149 | |
150 | static void ext4_release_io_end(ext4_io_end_t *io_end) |
151 | { |
152 | struct bio *bio, *next_bio; |
153 | |
154 | BUG_ON(!list_empty(&io_end->list)); |
155 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); |
156 | WARN_ON(io_end->handle); |
157 | |
158 | for (bio = io_end->bio; bio; bio = next_bio) { |
159 | next_bio = bio->bi_private; |
160 | ext4_finish_bio(bio); |
161 | bio_put(bio); |
162 | } |
163 | ext4_free_io_end_vec(io_end); |
164 | kmem_cache_free(s: io_end_cachep, objp: io_end); |
165 | } |
166 | |
167 | /* |
168 | * Check a range of space and convert unwritten extents to written. Note that |
169 | * we are protected from truncate touching same part of extent tree by the |
170 | * fact that truncate code waits for all DIO to finish (thus exclusion from |
171 | * direct IO is achieved) and also waits for PageWriteback bits. Thus we |
172 | * cannot get to ext4_ext_truncate() before all IOs overlapping that range are |
173 | * completed (happens from ext4_free_ioend()). |
174 | */ |
175 | static int ext4_end_io_end(ext4_io_end_t *io_end) |
176 | { |
177 | struct inode *inode = io_end->inode; |
178 | handle_t *handle = io_end->handle; |
179 | int ret = 0; |
180 | |
181 | ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p," |
182 | "list->prev 0x%p\n" , |
183 | io_end, inode->i_ino, io_end->list.next, io_end->list.prev); |
184 | |
185 | io_end->handle = NULL; /* Following call will use up the handle */ |
186 | ret = ext4_convert_unwritten_io_end_vec(handle, io_end); |
187 | if (ret < 0 && !ext4_forced_shutdown(sb: inode->i_sb)) { |
188 | ext4_msg(inode->i_sb, KERN_EMERG, |
189 | "failed to convert unwritten extents to written " |
190 | "extents -- potential data loss! " |
191 | "(inode %lu, error %d)" , inode->i_ino, ret); |
192 | } |
193 | ext4_clear_io_unwritten_flag(io_end); |
194 | ext4_release_io_end(io_end); |
195 | return ret; |
196 | } |
197 | |
198 | static void dump_completed_IO(struct inode *inode, struct list_head *head) |
199 | { |
200 | #ifdef EXT4FS_DEBUG |
201 | struct list_head *cur, *before, *after; |
202 | ext4_io_end_t *io_end, *io_end0, *io_end1; |
203 | |
204 | if (list_empty(head)) |
205 | return; |
206 | |
207 | ext4_debug("Dump inode %lu completed io list\n" , inode->i_ino); |
208 | list_for_each_entry(io_end, head, list) { |
209 | cur = &io_end->list; |
210 | before = cur->prev; |
211 | io_end0 = container_of(before, ext4_io_end_t, list); |
212 | after = cur->next; |
213 | io_end1 = container_of(after, ext4_io_end_t, list); |
214 | |
215 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n" , |
216 | io_end, inode->i_ino, io_end0, io_end1); |
217 | } |
218 | #endif |
219 | } |
220 | |
221 | /* Add the io_end to per-inode completed end_io list. */ |
222 | static void ext4_add_complete_io(ext4_io_end_t *io_end) |
223 | { |
224 | struct ext4_inode_info *ei = EXT4_I(inode: io_end->inode); |
225 | struct ext4_sb_info *sbi = EXT4_SB(sb: io_end->inode->i_sb); |
226 | struct workqueue_struct *wq; |
227 | unsigned long flags; |
228 | |
229 | /* Only reserved conversions from writeback should enter here */ |
230 | WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); |
231 | WARN_ON(!io_end->handle && sbi->s_journal); |
232 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
233 | wq = sbi->rsv_conversion_wq; |
234 | if (list_empty(head: &ei->i_rsv_conversion_list)) |
235 | queue_work(wq, work: &ei->i_rsv_conversion_work); |
236 | list_add_tail(new: &io_end->list, head: &ei->i_rsv_conversion_list); |
237 | spin_unlock_irqrestore(lock: &ei->i_completed_io_lock, flags); |
238 | } |
239 | |
240 | static int ext4_do_flush_completed_IO(struct inode *inode, |
241 | struct list_head *head) |
242 | { |
243 | ext4_io_end_t *io_end; |
244 | struct list_head unwritten; |
245 | unsigned long flags; |
246 | struct ext4_inode_info *ei = EXT4_I(inode); |
247 | int err, ret = 0; |
248 | |
249 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
250 | dump_completed_IO(inode, head); |
251 | list_replace_init(old: head, new: &unwritten); |
252 | spin_unlock_irqrestore(lock: &ei->i_completed_io_lock, flags); |
253 | |
254 | while (!list_empty(head: &unwritten)) { |
255 | io_end = list_entry(unwritten.next, ext4_io_end_t, list); |
256 | BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); |
257 | list_del_init(entry: &io_end->list); |
258 | |
259 | err = ext4_end_io_end(io_end); |
260 | if (unlikely(!ret && err)) |
261 | ret = err; |
262 | } |
263 | return ret; |
264 | } |
265 | |
266 | /* |
267 | * work on completed IO, to convert unwritten extents to extents |
268 | */ |
269 | void ext4_end_io_rsv_work(struct work_struct *work) |
270 | { |
271 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, |
272 | i_rsv_conversion_work); |
273 | ext4_do_flush_completed_IO(inode: &ei->vfs_inode, head: &ei->i_rsv_conversion_list); |
274 | } |
275 | |
276 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
277 | { |
278 | ext4_io_end_t *io_end = kmem_cache_zalloc(k: io_end_cachep, flags); |
279 | |
280 | if (io_end) { |
281 | io_end->inode = inode; |
282 | INIT_LIST_HEAD(list: &io_end->list); |
283 | INIT_LIST_HEAD(list: &io_end->list_vec); |
284 | refcount_set(r: &io_end->count, n: 1); |
285 | } |
286 | return io_end; |
287 | } |
288 | |
289 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) |
290 | { |
291 | if (refcount_dec_and_test(r: &io_end->count)) { |
292 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || |
293 | list_empty(head: &io_end->list_vec)) { |
294 | ext4_release_io_end(io_end); |
295 | return; |
296 | } |
297 | ext4_add_complete_io(io_end); |
298 | } |
299 | } |
300 | |
301 | int ext4_put_io_end(ext4_io_end_t *io_end) |
302 | { |
303 | int err = 0; |
304 | |
305 | if (refcount_dec_and_test(r: &io_end->count)) { |
306 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
307 | err = ext4_convert_unwritten_io_end_vec(handle: io_end->handle, |
308 | io_end); |
309 | io_end->handle = NULL; |
310 | ext4_clear_io_unwritten_flag(io_end); |
311 | } |
312 | ext4_release_io_end(io_end); |
313 | } |
314 | return err; |
315 | } |
316 | |
317 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) |
318 | { |
319 | refcount_inc(r: &io_end->count); |
320 | return io_end; |
321 | } |
322 | |
323 | /* BIO completion function for page writeback */ |
324 | static void ext4_end_bio(struct bio *bio) |
325 | { |
326 | ext4_io_end_t *io_end = bio->bi_private; |
327 | sector_t bi_sector = bio->bi_iter.bi_sector; |
328 | |
329 | if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n" , |
330 | bio->bi_bdev, |
331 | (long long) bio->bi_iter.bi_sector, |
332 | (unsigned) bio_sectors(bio), |
333 | bio->bi_status)) { |
334 | ext4_finish_bio(bio); |
335 | bio_put(bio); |
336 | return; |
337 | } |
338 | bio->bi_end_io = NULL; |
339 | |
340 | if (bio->bi_status) { |
341 | struct inode *inode = io_end->inode; |
342 | |
343 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " |
344 | "starting block %llu)" , |
345 | bio->bi_status, inode->i_ino, |
346 | (unsigned long long) |
347 | bi_sector >> (inode->i_blkbits - 9)); |
348 | mapping_set_error(mapping: inode->i_mapping, |
349 | error: blk_status_to_errno(status: bio->bi_status)); |
350 | } |
351 | |
352 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
353 | /* |
354 | * Link bio into list hanging from io_end. We have to do it |
355 | * atomically as bio completions can be racing against each |
356 | * other. |
357 | */ |
358 | bio->bi_private = xchg(&io_end->bio, bio); |
359 | ext4_put_io_end_defer(io_end); |
360 | } else { |
361 | /* |
362 | * Drop io_end reference early. Inode can get freed once |
363 | * we finish the bio. |
364 | */ |
365 | ext4_put_io_end_defer(io_end); |
366 | ext4_finish_bio(bio); |
367 | bio_put(bio); |
368 | } |
369 | } |
370 | |
371 | void ext4_io_submit(struct ext4_io_submit *io) |
372 | { |
373 | struct bio *bio = io->io_bio; |
374 | |
375 | if (bio) { |
376 | if (io->io_wbc->sync_mode == WB_SYNC_ALL) |
377 | io->io_bio->bi_opf |= REQ_SYNC; |
378 | submit_bio(bio: io->io_bio); |
379 | } |
380 | io->io_bio = NULL; |
381 | } |
382 | |
383 | void ext4_io_submit_init(struct ext4_io_submit *io, |
384 | struct writeback_control *wbc) |
385 | { |
386 | io->io_wbc = wbc; |
387 | io->io_bio = NULL; |
388 | io->io_end = NULL; |
389 | } |
390 | |
391 | static void io_submit_init_bio(struct ext4_io_submit *io, |
392 | struct buffer_head *bh) |
393 | { |
394 | struct bio *bio; |
395 | |
396 | /* |
397 | * bio_alloc will _always_ be able to allocate a bio if |
398 | * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). |
399 | */ |
400 | bio = bio_alloc(bdev: bh->b_bdev, BIO_MAX_VECS, opf: REQ_OP_WRITE, GFP_NOIO); |
401 | fscrypt_set_bio_crypt_ctx_bh(bio, first_bh: bh, GFP_NOIO); |
402 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
403 | bio->bi_end_io = ext4_end_bio; |
404 | bio->bi_private = ext4_get_io_end(io_end: io->io_end); |
405 | io->io_bio = bio; |
406 | io->io_next_block = bh->b_blocknr; |
407 | wbc_init_bio(wbc: io->io_wbc, bio); |
408 | } |
409 | |
410 | static void io_submit_add_bh(struct ext4_io_submit *io, |
411 | struct inode *inode, |
412 | struct folio *folio, |
413 | struct folio *io_folio, |
414 | struct buffer_head *bh) |
415 | { |
416 | if (io->io_bio && (bh->b_blocknr != io->io_next_block || |
417 | !fscrypt_mergeable_bio_bh(bio: io->io_bio, next_bh: bh))) { |
418 | submit_and_retry: |
419 | ext4_io_submit(io); |
420 | } |
421 | if (io->io_bio == NULL) |
422 | io_submit_init_bio(io, bh); |
423 | if (!bio_add_folio(bio: io->io_bio, folio: io_folio, len: bh->b_size, off: bh_offset(bh))) |
424 | goto submit_and_retry; |
425 | wbc_account_cgroup_owner(wbc: io->io_wbc, page: &folio->page, bytes: bh->b_size); |
426 | io->io_next_block++; |
427 | } |
428 | |
429 | int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio, |
430 | size_t len) |
431 | { |
432 | struct folio *io_folio = folio; |
433 | struct inode *inode = folio->mapping->host; |
434 | unsigned block_start; |
435 | struct buffer_head *bh, *head; |
436 | int ret = 0; |
437 | int nr_to_submit = 0; |
438 | struct writeback_control *wbc = io->io_wbc; |
439 | bool keep_towrite = false; |
440 | |
441 | BUG_ON(!folio_test_locked(folio)); |
442 | BUG_ON(folio_test_writeback(folio)); |
443 | |
444 | folio_clear_error(folio); |
445 | |
446 | /* |
447 | * Comments copied from block_write_full_folio: |
448 | * |
449 | * The folio straddles i_size. It must be zeroed out on each and every |
450 | * writepage invocation because it may be mmapped. "A file is mapped |
451 | * in multiples of the page size. For a file that is not a multiple of |
452 | * the page size, the remaining memory is zeroed when mapped, and |
453 | * writes to that region are not written out to the file." |
454 | */ |
455 | if (len < folio_size(folio)) |
456 | folio_zero_segment(folio, start: len, xend: folio_size(folio)); |
457 | /* |
458 | * In the first loop we prepare and mark buffers to submit. We have to |
459 | * mark all buffers in the folio before submitting so that |
460 | * folio_end_writeback() cannot be called from ext4_end_bio() when IO |
461 | * on the first buffer finishes and we are still working on submitting |
462 | * the second buffer. |
463 | */ |
464 | bh = head = folio_buffers(folio); |
465 | do { |
466 | block_start = bh_offset(bh); |
467 | if (block_start >= len) { |
468 | clear_buffer_dirty(bh); |
469 | set_buffer_uptodate(bh); |
470 | continue; |
471 | } |
472 | if (!buffer_dirty(bh) || buffer_delay(bh) || |
473 | !buffer_mapped(bh) || buffer_unwritten(bh)) { |
474 | /* A hole? We can safely clear the dirty bit */ |
475 | if (!buffer_mapped(bh)) |
476 | clear_buffer_dirty(bh); |
477 | /* |
478 | * Keeping dirty some buffer we cannot write? Make sure |
479 | * to redirty the folio and keep TOWRITE tag so that |
480 | * racing WB_SYNC_ALL writeback does not skip the folio. |
481 | * This happens e.g. when doing writeout for |
482 | * transaction commit or when journalled data is not |
483 | * yet committed. |
484 | */ |
485 | if (buffer_dirty(bh) || |
486 | (buffer_jbd(bh) && buffer_jbddirty(bh))) { |
487 | if (!folio_test_dirty(folio)) |
488 | folio_redirty_for_writepage(wbc, folio); |
489 | keep_towrite = true; |
490 | } |
491 | continue; |
492 | } |
493 | if (buffer_new(bh)) |
494 | clear_buffer_new(bh); |
495 | set_buffer_async_write(bh); |
496 | clear_buffer_dirty(bh); |
497 | nr_to_submit++; |
498 | } while ((bh = bh->b_this_page) != head); |
499 | |
500 | /* Nothing to submit? Just unlock the folio... */ |
501 | if (!nr_to_submit) |
502 | return 0; |
503 | |
504 | bh = head = folio_buffers(folio); |
505 | |
506 | /* |
507 | * If any blocks are being written to an encrypted file, encrypt them |
508 | * into a bounce page. For simplicity, just encrypt until the last |
509 | * block which might be needed. This may cause some unneeded blocks |
510 | * (e.g. holes) to be unnecessarily encrypted, but this is rare and |
511 | * can't happen in the common case of blocksize == PAGE_SIZE. |
512 | */ |
513 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) { |
514 | gfp_t gfp_flags = GFP_NOFS; |
515 | unsigned int enc_bytes = round_up(len, i_blocksize(inode)); |
516 | struct page *bounce_page; |
517 | |
518 | /* |
519 | * Since bounce page allocation uses a mempool, we can only use |
520 | * a waiting mask (i.e. request guaranteed allocation) on the |
521 | * first page of the bio. Otherwise it can deadlock. |
522 | */ |
523 | if (io->io_bio) |
524 | gfp_flags = GFP_NOWAIT | __GFP_NOWARN; |
525 | retry_encrypt: |
526 | bounce_page = fscrypt_encrypt_pagecache_blocks(page: &folio->page, |
527 | len: enc_bytes, offs: 0, gfp_flags); |
528 | if (IS_ERR(ptr: bounce_page)) { |
529 | ret = PTR_ERR(ptr: bounce_page); |
530 | if (ret == -ENOMEM && |
531 | (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) { |
532 | gfp_t new_gfp_flags = GFP_NOFS; |
533 | if (io->io_bio) |
534 | ext4_io_submit(io); |
535 | else |
536 | new_gfp_flags |= __GFP_NOFAIL; |
537 | memalloc_retry_wait(gfp_flags); |
538 | gfp_flags = new_gfp_flags; |
539 | goto retry_encrypt; |
540 | } |
541 | |
542 | printk_ratelimited(KERN_ERR "%s: ret = %d\n" , __func__, ret); |
543 | folio_redirty_for_writepage(wbc, folio); |
544 | do { |
545 | if (buffer_async_write(bh)) { |
546 | clear_buffer_async_write(bh); |
547 | set_buffer_dirty(bh); |
548 | } |
549 | bh = bh->b_this_page; |
550 | } while (bh != head); |
551 | |
552 | return ret; |
553 | } |
554 | io_folio = page_folio(bounce_page); |
555 | } |
556 | |
557 | __folio_start_writeback(folio, keep_write: keep_towrite); |
558 | |
559 | /* Now submit buffers to write */ |
560 | do { |
561 | if (!buffer_async_write(bh)) |
562 | continue; |
563 | io_submit_add_bh(io, inode, folio, io_folio, bh); |
564 | } while ((bh = bh->b_this_page) != head); |
565 | |
566 | return 0; |
567 | } |
568 | |