| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Data verification functions, i.e. hooks for ->readahead() |
| 4 | * |
| 5 | * Copyright 2019 Google LLC |
| 6 | */ |
| 7 | |
| 8 | #include "fsverity_private.h" |
| 9 | |
| 10 | #include <crypto/hash.h> |
| 11 | #include <linux/bio.h> |
| 12 | |
| 13 | static struct workqueue_struct *fsverity_read_workqueue; |
| 14 | |
| 15 | /* |
| 16 | * Returns true if the hash block with index @hblock_idx in the tree, located in |
| 17 | * @hpage, has already been verified. |
| 18 | */ |
| 19 | static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, |
| 20 | unsigned long hblock_idx) |
| 21 | { |
| 22 | unsigned int blocks_per_page; |
| 23 | unsigned int i; |
| 24 | |
| 25 | /* |
| 26 | * When the Merkle tree block size and page size are the same, then the |
| 27 | * ->hash_block_verified bitmap isn't allocated, and we use PG_checked |
| 28 | * to directly indicate whether the page's block has been verified. |
| 29 | * |
| 30 | * Using PG_checked also guarantees that we re-verify hash pages that |
| 31 | * get evicted and re-instantiated from the backing storage, as new |
| 32 | * pages always start out with PG_checked cleared. |
| 33 | */ |
| 34 | if (!vi->hash_block_verified) |
| 35 | return PageChecked(page: hpage); |
| 36 | |
| 37 | /* |
| 38 | * When the Merkle tree block size and page size differ, we use a bitmap |
| 39 | * to indicate whether each hash block has been verified. |
| 40 | * |
| 41 | * However, we still need to ensure that hash pages that get evicted and |
| 42 | * re-instantiated from the backing storage are re-verified. To do |
| 43 | * this, we use PG_checked again, but now it doesn't really mean |
| 44 | * "checked". Instead, now it just serves as an indicator for whether |
| 45 | * the hash page is newly instantiated or not. If the page is new, as |
| 46 | * indicated by PG_checked=0, we clear the bitmap bits for the page's |
| 47 | * blocks since they are untrustworthy, then set PG_checked=1. |
| 48 | * Otherwise we return the bitmap bit for the requested block. |
| 49 | * |
| 50 | * Multiple threads may execute this code concurrently on the same page. |
| 51 | * This is safe because we use memory barriers to ensure that if a |
| 52 | * thread sees PG_checked=1, then it also sees the associated bitmap |
| 53 | * clearing to have occurred. Also, all writes and their corresponding |
| 54 | * reads are atomic, and all writes are safe to repeat in the event that |
| 55 | * multiple threads get into the PG_checked=0 section. (Clearing a |
| 56 | * bitmap bit again at worst causes a hash block to be verified |
| 57 | * redundantly. That event should be very rare, so it's not worth using |
| 58 | * a lock to avoid. Setting PG_checked again has no effect.) |
| 59 | */ |
| 60 | if (PageChecked(page: hpage)) { |
| 61 | /* |
| 62 | * A read memory barrier is needed here to give ACQUIRE |
| 63 | * semantics to the above PageChecked() test. |
| 64 | */ |
| 65 | smp_rmb(); |
| 66 | return test_bit(hblock_idx, vi->hash_block_verified); |
| 67 | } |
| 68 | blocks_per_page = vi->tree_params.blocks_per_page; |
| 69 | hblock_idx = round_down(hblock_idx, blocks_per_page); |
| 70 | for (i = 0; i < blocks_per_page; i++) |
| 71 | clear_bit(nr: hblock_idx + i, addr: vi->hash_block_verified); |
| 72 | /* |
| 73 | * A write memory barrier is needed here to give RELEASE semantics to |
| 74 | * the below SetPageChecked() operation. |
| 75 | */ |
| 76 | smp_wmb(); |
| 77 | SetPageChecked(hpage); |
| 78 | return false; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Verify a single data block against the file's Merkle tree. |
| 83 | * |
| 84 | * In principle, we need to verify the entire path to the root node. However, |
| 85 | * for efficiency the filesystem may cache the hash blocks. Therefore we need |
| 86 | * only ascend the tree until an already-verified hash block is seen, and then |
| 87 | * verify the path to that block. |
| 88 | * |
| 89 | * Return: %true if the data block is valid, else %false. |
| 90 | */ |
| 91 | static bool |
| 92 | verify_data_block(struct inode *inode, struct fsverity_info *vi, |
| 93 | const void *data, u64 data_pos, unsigned long max_ra_pages) |
| 94 | { |
| 95 | const struct merkle_tree_params *params = &vi->tree_params; |
| 96 | const unsigned int hsize = params->digest_size; |
| 97 | int level; |
| 98 | u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE]; |
| 99 | const u8 *want_hash; |
| 100 | u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE]; |
| 101 | /* The hash blocks that are traversed, indexed by level */ |
| 102 | struct { |
| 103 | /* Page containing the hash block */ |
| 104 | struct page *page; |
| 105 | /* Mapped address of the hash block (will be within @page) */ |
| 106 | const void *addr; |
| 107 | /* Index of the hash block in the tree overall */ |
| 108 | unsigned long index; |
| 109 | /* Byte offset of the wanted hash relative to @addr */ |
| 110 | unsigned int hoffset; |
| 111 | } hblocks[FS_VERITY_MAX_LEVELS]; |
| 112 | /* |
| 113 | * The index of the previous level's block within that level; also the |
| 114 | * index of that block's hash within the current level. |
| 115 | */ |
| 116 | u64 hidx = data_pos >> params->log_blocksize; |
| 117 | |
| 118 | /* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */ |
| 119 | BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX); |
| 120 | |
| 121 | if (unlikely(data_pos >= inode->i_size)) { |
| 122 | /* |
| 123 | * This can happen in the data page spanning EOF when the Merkle |
| 124 | * tree block size is less than the page size. The Merkle tree |
| 125 | * doesn't cover data blocks fully past EOF. But the entire |
| 126 | * page spanning EOF can be visible to userspace via a mmap, and |
| 127 | * any part past EOF should be all zeroes. Therefore, we need |
| 128 | * to verify that any data blocks fully past EOF are all zeroes. |
| 129 | */ |
| 130 | if (memchr_inv(p: data, c: 0, size: params->block_size)) { |
| 131 | fsverity_err(inode, |
| 132 | "FILE CORRUPTED! Data past EOF is not zeroed" ); |
| 133 | return false; |
| 134 | } |
| 135 | return true; |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Starting at the leaf level, ascend the tree saving hash blocks along |
| 140 | * the way until we find a hash block that has already been verified, or |
| 141 | * until we reach the root. |
| 142 | */ |
| 143 | for (level = 0; level < params->num_levels; level++) { |
| 144 | unsigned long next_hidx; |
| 145 | unsigned long hblock_idx; |
| 146 | pgoff_t hpage_idx; |
| 147 | unsigned int hblock_offset_in_page; |
| 148 | unsigned int hoffset; |
| 149 | struct page *hpage; |
| 150 | const void *haddr; |
| 151 | |
| 152 | /* |
| 153 | * The index of the block in the current level; also the index |
| 154 | * of that block's hash within the next level. |
| 155 | */ |
| 156 | next_hidx = hidx >> params->log_arity; |
| 157 | |
| 158 | /* Index of the hash block in the tree overall */ |
| 159 | hblock_idx = params->level_start[level] + next_hidx; |
| 160 | |
| 161 | /* Index of the hash page in the tree overall */ |
| 162 | hpage_idx = hblock_idx >> params->log_blocks_per_page; |
| 163 | |
| 164 | /* Byte offset of the hash block within the page */ |
| 165 | hblock_offset_in_page = |
| 166 | (hblock_idx << params->log_blocksize) & ~PAGE_MASK; |
| 167 | |
| 168 | /* Byte offset of the hash within the block */ |
| 169 | hoffset = (hidx << params->log_digestsize) & |
| 170 | (params->block_size - 1); |
| 171 | |
| 172 | hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, |
| 173 | hpage_idx, level == 0 ? min(max_ra_pages, |
| 174 | params->tree_pages - hpage_idx) : 0); |
| 175 | if (IS_ERR(ptr: hpage)) { |
| 176 | fsverity_err(inode, |
| 177 | "Error %ld reading Merkle tree page %lu" , |
| 178 | PTR_ERR(hpage), hpage_idx); |
| 179 | goto error; |
| 180 | } |
| 181 | haddr = kmap_local_page(page: hpage) + hblock_offset_in_page; |
| 182 | if (is_hash_block_verified(vi, hpage, hblock_idx)) { |
| 183 | memcpy(_want_hash, haddr + hoffset, hsize); |
| 184 | want_hash = _want_hash; |
| 185 | kunmap_local(haddr); |
| 186 | put_page(page: hpage); |
| 187 | goto descend; |
| 188 | } |
| 189 | hblocks[level].page = hpage; |
| 190 | hblocks[level].addr = haddr; |
| 191 | hblocks[level].index = hblock_idx; |
| 192 | hblocks[level].hoffset = hoffset; |
| 193 | hidx = next_hidx; |
| 194 | } |
| 195 | |
| 196 | want_hash = vi->root_hash; |
| 197 | descend: |
| 198 | /* Descend the tree verifying hash blocks. */ |
| 199 | for (; level > 0; level--) { |
| 200 | struct page *hpage = hblocks[level - 1].page; |
| 201 | const void *haddr = hblocks[level - 1].addr; |
| 202 | unsigned long hblock_idx = hblocks[level - 1].index; |
| 203 | unsigned int hoffset = hblocks[level - 1].hoffset; |
| 204 | |
| 205 | if (fsverity_hash_block(params, inode, data: haddr, out: real_hash) != 0) |
| 206 | goto error; |
| 207 | if (memcmp(p: want_hash, q: real_hash, size: hsize) != 0) |
| 208 | goto corrupted; |
| 209 | /* |
| 210 | * Mark the hash block as verified. This must be atomic and |
| 211 | * idempotent, as the same hash block might be verified by |
| 212 | * multiple threads concurrently. |
| 213 | */ |
| 214 | if (vi->hash_block_verified) |
| 215 | set_bit(nr: hblock_idx, addr: vi->hash_block_verified); |
| 216 | else |
| 217 | SetPageChecked(hpage); |
| 218 | memcpy(_want_hash, haddr + hoffset, hsize); |
| 219 | want_hash = _want_hash; |
| 220 | kunmap_local(haddr); |
| 221 | put_page(page: hpage); |
| 222 | } |
| 223 | |
| 224 | /* Finally, verify the data block. */ |
| 225 | if (fsverity_hash_block(params, inode, data, out: real_hash) != 0) |
| 226 | goto error; |
| 227 | if (memcmp(p: want_hash, q: real_hash, size: hsize) != 0) |
| 228 | goto corrupted; |
| 229 | return true; |
| 230 | |
| 231 | corrupted: |
| 232 | fsverity_err(inode, |
| 233 | "FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN" , |
| 234 | data_pos, level - 1, |
| 235 | params->hash_alg->name, hsize, want_hash, |
| 236 | params->hash_alg->name, hsize, real_hash); |
| 237 | error: |
| 238 | for (; level > 0; level--) { |
| 239 | kunmap_local(hblocks[level - 1].addr); |
| 240 | put_page(page: hblocks[level - 1].page); |
| 241 | } |
| 242 | return false; |
| 243 | } |
| 244 | |
| 245 | static bool |
| 246 | verify_data_blocks(struct folio *data_folio, size_t len, size_t offset, |
| 247 | unsigned long max_ra_pages) |
| 248 | { |
| 249 | struct inode *inode = data_folio->mapping->host; |
| 250 | struct fsverity_info *vi = inode->i_verity_info; |
| 251 | const unsigned int block_size = vi->tree_params.block_size; |
| 252 | u64 pos = (u64)data_folio->index << PAGE_SHIFT; |
| 253 | |
| 254 | if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size))) |
| 255 | return false; |
| 256 | if (WARN_ON_ONCE(!folio_test_locked(data_folio) || |
| 257 | folio_test_uptodate(data_folio))) |
| 258 | return false; |
| 259 | do { |
| 260 | void *data; |
| 261 | bool valid; |
| 262 | |
| 263 | data = kmap_local_folio(folio: data_folio, offset); |
| 264 | valid = verify_data_block(inode, vi, data, data_pos: pos + offset, |
| 265 | max_ra_pages); |
| 266 | kunmap_local(data); |
| 267 | if (!valid) |
| 268 | return false; |
| 269 | offset += block_size; |
| 270 | len -= block_size; |
| 271 | } while (len); |
| 272 | return true; |
| 273 | } |
| 274 | |
| 275 | /** |
| 276 | * fsverity_verify_blocks() - verify data in a folio |
| 277 | * @folio: the folio containing the data to verify |
| 278 | * @len: the length of the data to verify in the folio |
| 279 | * @offset: the offset of the data to verify in the folio |
| 280 | * |
| 281 | * Verify data that has just been read from a verity file. The data must be |
| 282 | * located in a pagecache folio that is still locked and not yet uptodate. The |
| 283 | * length and offset of the data must be Merkle tree block size aligned. |
| 284 | * |
| 285 | * Return: %true if the data is valid, else %false. |
| 286 | */ |
| 287 | bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset) |
| 288 | { |
| 289 | return verify_data_blocks(data_folio: folio, len, offset, max_ra_pages: 0); |
| 290 | } |
| 291 | EXPORT_SYMBOL_GPL(fsverity_verify_blocks); |
| 292 | |
| 293 | #ifdef CONFIG_BLOCK |
| 294 | /** |
| 295 | * fsverity_verify_bio() - verify a 'read' bio that has just completed |
| 296 | * @bio: the bio to verify |
| 297 | * |
| 298 | * Verify the bio's data against the file's Merkle tree. All bio data segments |
| 299 | * must be aligned to the file's Merkle tree block size. If any data fails |
| 300 | * verification, then bio->bi_status is set to an error status. |
| 301 | * |
| 302 | * This is a helper function for use by the ->readahead() method of filesystems |
| 303 | * that issue bios to read data directly into the page cache. Filesystems that |
| 304 | * populate the page cache without issuing bios (e.g. non block-based |
| 305 | * filesystems) must instead call fsverity_verify_page() directly on each page. |
| 306 | * All filesystems must also call fsverity_verify_page() on holes. |
| 307 | */ |
| 308 | void fsverity_verify_bio(struct bio *bio) |
| 309 | { |
| 310 | struct folio_iter fi; |
| 311 | unsigned long max_ra_pages = 0; |
| 312 | |
| 313 | if (bio->bi_opf & REQ_RAHEAD) { |
| 314 | /* |
| 315 | * If this bio is for data readahead, then we also do readahead |
| 316 | * of the first (largest) level of the Merkle tree. Namely, |
| 317 | * when a Merkle tree page is read, we also try to piggy-back on |
| 318 | * some additional pages -- up to 1/4 the number of data pages. |
| 319 | * |
| 320 | * This improves sequential read performance, as it greatly |
| 321 | * reduces the number of I/O requests made to the Merkle tree. |
| 322 | */ |
| 323 | max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2); |
| 324 | } |
| 325 | |
| 326 | bio_for_each_folio_all(fi, bio) { |
| 327 | if (!verify_data_blocks(data_folio: fi.folio, len: fi.length, offset: fi.offset, |
| 328 | max_ra_pages)) { |
| 329 | bio->bi_status = BLK_STS_IOERR; |
| 330 | break; |
| 331 | } |
| 332 | } |
| 333 | } |
| 334 | EXPORT_SYMBOL_GPL(fsverity_verify_bio); |
| 335 | #endif /* CONFIG_BLOCK */ |
| 336 | |
| 337 | /** |
| 338 | * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue |
| 339 | * @work: the work to enqueue |
| 340 | * |
| 341 | * Enqueue verification work for asynchronous processing. |
| 342 | */ |
| 343 | void fsverity_enqueue_verify_work(struct work_struct *work) |
| 344 | { |
| 345 | queue_work(wq: fsverity_read_workqueue, work); |
| 346 | } |
| 347 | EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work); |
| 348 | |
| 349 | void __init fsverity_init_workqueue(void) |
| 350 | { |
| 351 | /* |
| 352 | * Use a high-priority workqueue to prioritize verification work, which |
| 353 | * blocks reads from completing, over regular application tasks. |
| 354 | * |
| 355 | * For performance reasons, don't use an unbound workqueue. Using an |
| 356 | * unbound workqueue for crypto operations causes excessive scheduler |
| 357 | * latency on ARM64. |
| 358 | */ |
| 359 | fsverity_read_workqueue = alloc_workqueue(fmt: "fsverity_read_queue" , |
| 360 | flags: WQ_HIGHPRI, |
| 361 | max_active: num_online_cpus()); |
| 362 | if (!fsverity_read_workqueue) |
| 363 | panic(fmt: "failed to allocate fsverity_read_queue" ); |
| 364 | } |
| 365 | |