1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
4 | * All Rights Reserved. |
5 | */ |
6 | #ifndef __XFS_LOG_PRIV_H__ |
7 | #define __XFS_LOG_PRIV_H__ |
8 | |
9 | #include "xfs_extent_busy.h" /* for struct xfs_busy_extents */ |
10 | |
11 | struct xfs_buf; |
12 | struct xlog; |
13 | struct xlog_ticket; |
14 | struct xfs_mount; |
15 | |
16 | /* |
17 | * get client id from packed copy. |
18 | * |
19 | * this hack is here because the xlog_pack code copies four bytes |
20 | * of xlog_op_header containing the fields oh_clientid, oh_flags |
21 | * and oh_res2 into the packed copy. |
22 | * |
23 | * later on this four byte chunk is treated as an int and the |
24 | * client id is pulled out. |
25 | * |
26 | * this has endian issues, of course. |
27 | */ |
28 | static inline uint xlog_get_client_id(__be32 i) |
29 | { |
30 | return be32_to_cpu(i) >> 24; |
31 | } |
32 | |
33 | /* |
34 | * In core log state |
35 | */ |
36 | enum xlog_iclog_state { |
37 | XLOG_STATE_ACTIVE, /* Current IC log being written to */ |
38 | XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */ |
39 | XLOG_STATE_SYNCING, /* This IC log is syncing */ |
40 | XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ |
41 | XLOG_STATE_CALLBACK, /* Callback functions now */ |
42 | XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ |
43 | }; |
44 | |
45 | #define XLOG_STATE_STRINGS \ |
46 | { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \ |
47 | { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \ |
48 | { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ |
49 | { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ |
50 | { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ |
51 | { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } |
52 | |
53 | /* |
54 | * In core log flags |
55 | */ |
56 | #define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */ |
57 | #define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */ |
58 | |
59 | #define XLOG_ICL_STRINGS \ |
60 | { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ |
61 | { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } |
62 | |
63 | |
64 | /* |
65 | * Log ticket flags |
66 | */ |
67 | #define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */ |
68 | |
69 | #define XLOG_TIC_FLAGS \ |
70 | { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } |
71 | |
72 | /* |
73 | * Below are states for covering allocation transactions. |
74 | * By covering, we mean changing the h_tail_lsn in the last on-disk |
75 | * log write such that no allocation transactions will be re-done during |
76 | * recovery after a system crash. Recovery starts at the last on-disk |
77 | * log write. |
78 | * |
79 | * These states are used to insert dummy log entries to cover |
80 | * space allocation transactions which can undo non-transactional changes |
81 | * after a crash. Writes to a file with space |
82 | * already allocated do not result in any transactions. Allocations |
83 | * might include space beyond the EOF. So if we just push the EOF a |
84 | * little, the last transaction for the file could contain the wrong |
85 | * size. If there is no file system activity, after an allocation |
86 | * transaction, and the system crashes, the allocation transaction |
87 | * will get replayed and the file will be truncated. This could |
88 | * be hours/days/... after the allocation occurred. |
89 | * |
90 | * The fix for this is to do two dummy transactions when the |
91 | * system is idle. We need two dummy transaction because the h_tail_lsn |
92 | * in the log record header needs to point beyond the last possible |
93 | * non-dummy transaction. The first dummy changes the h_tail_lsn to |
94 | * the first transaction before the dummy. The second dummy causes |
95 | * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn. |
96 | * |
97 | * These dummy transactions get committed when everything |
98 | * is idle (after there has been some activity). |
99 | * |
100 | * There are 5 states used to control this. |
101 | * |
102 | * IDLE -- no logging has been done on the file system or |
103 | * we are done covering previous transactions. |
104 | * NEED -- logging has occurred and we need a dummy transaction |
105 | * when the log becomes idle. |
106 | * DONE -- we were in the NEED state and have committed a dummy |
107 | * transaction. |
108 | * NEED2 -- we detected that a dummy transaction has gone to the |
109 | * on disk log with no other transactions. |
110 | * DONE2 -- we committed a dummy transaction when in the NEED2 state. |
111 | * |
112 | * There are two places where we switch states: |
113 | * |
114 | * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2. |
115 | * We commit the dummy transaction and switch to DONE or DONE2, |
116 | * respectively. In all other states, we don't do anything. |
117 | * |
118 | * 2.) When we finish writing the on-disk log (xlog_state_clean_log). |
119 | * |
120 | * No matter what state we are in, if this isn't the dummy |
121 | * transaction going out, the next state is NEED. |
122 | * So, if we aren't in the DONE or DONE2 states, the next state |
123 | * is NEED. We can't be finishing a write of the dummy record |
124 | * unless it was committed and the state switched to DONE or DONE2. |
125 | * |
126 | * If we are in the DONE state and this was a write of the |
127 | * dummy transaction, we move to NEED2. |
128 | * |
129 | * If we are in the DONE2 state and this was a write of the |
130 | * dummy transaction, we move to IDLE. |
131 | * |
132 | * |
133 | * Writing only one dummy transaction can get appended to |
134 | * one file space allocation. When this happens, the log recovery |
135 | * code replays the space allocation and a file could be truncated. |
136 | * This is why we have the NEED2 and DONE2 states before going idle. |
137 | */ |
138 | |
139 | #define XLOG_STATE_COVER_IDLE 0 |
140 | #define XLOG_STATE_COVER_NEED 1 |
141 | #define XLOG_STATE_COVER_DONE 2 |
142 | #define XLOG_STATE_COVER_NEED2 3 |
143 | #define XLOG_STATE_COVER_DONE2 4 |
144 | |
145 | #define XLOG_COVER_OPS 5 |
146 | |
147 | typedef struct xlog_ticket { |
148 | struct list_head t_queue; /* reserve/write queue */ |
149 | struct task_struct *t_task; /* task that owns this ticket */ |
150 | xlog_tid_t t_tid; /* transaction identifier */ |
151 | atomic_t t_ref; /* ticket reference count */ |
152 | int t_curr_res; /* current reservation */ |
153 | int t_unit_res; /* unit reservation */ |
154 | char t_ocnt; /* original unit count */ |
155 | char t_cnt; /* current unit count */ |
156 | uint8_t t_flags; /* properties of reservation */ |
157 | int t_iclog_hdrs; /* iclog hdrs in t_curr_res */ |
158 | } xlog_ticket_t; |
159 | |
160 | /* |
161 | * - A log record header is 512 bytes. There is plenty of room to grow the |
162 | * xlog_rec_header_t into the reserved space. |
163 | * - ic_data follows, so a write to disk can start at the beginning of |
164 | * the iclog. |
165 | * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. |
166 | * - ic_next is the pointer to the next iclog in the ring. |
167 | * - ic_log is a pointer back to the global log structure. |
168 | * - ic_size is the full size of the log buffer, minus the cycle headers. |
169 | * - ic_offset is the current number of bytes written to in this iclog. |
170 | * - ic_refcnt is bumped when someone is writing to the log. |
171 | * - ic_state is the state of the iclog. |
172 | * |
173 | * Because of cacheline contention on large machines, we need to separate |
174 | * various resources onto different cachelines. To start with, make the |
175 | * structure cacheline aligned. The following fields can be contended on |
176 | * by independent processes: |
177 | * |
178 | * - ic_callbacks |
179 | * - ic_refcnt |
180 | * - fields protected by the global l_icloglock |
181 | * |
182 | * so we need to ensure that these fields are located in separate cachelines. |
183 | * We'll put all the read-only and l_icloglock fields in the first cacheline, |
184 | * and move everything else out to subsequent cachelines. |
185 | */ |
186 | typedef struct xlog_in_core { |
187 | wait_queue_head_t ic_force_wait; |
188 | wait_queue_head_t ic_write_wait; |
189 | struct xlog_in_core *ic_next; |
190 | struct xlog_in_core *ic_prev; |
191 | struct xlog *ic_log; |
192 | u32 ic_size; |
193 | u32 ic_offset; |
194 | enum xlog_iclog_state ic_state; |
195 | unsigned int ic_flags; |
196 | void *ic_datap; /* pointer to iclog data */ |
197 | struct list_head ic_callbacks; |
198 | |
199 | /* reference counts need their own cacheline */ |
200 | atomic_t ic_refcnt ____cacheline_aligned_in_smp; |
201 | xlog_in_core_2_t *ic_data; |
202 | #define ic_data->hic_header |
203 | #ifdef DEBUG |
204 | bool ic_fail_crc : 1; |
205 | #endif |
206 | struct semaphore ic_sema; |
207 | struct work_struct ic_end_io_work; |
208 | struct bio ic_bio; |
209 | struct bio_vec ic_bvec[]; |
210 | } xlog_in_core_t; |
211 | |
212 | /* |
213 | * The CIL context is used to aggregate per-transaction details as well be |
214 | * passed to the iclog for checkpoint post-commit processing. After being |
215 | * passed to the iclog, another context needs to be allocated for tracking the |
216 | * next set of transactions to be aggregated into a checkpoint. |
217 | */ |
218 | struct xfs_cil; |
219 | |
220 | struct xfs_cil_ctx { |
221 | struct xfs_cil *cil; |
222 | xfs_csn_t sequence; /* chkpt sequence # */ |
223 | xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ |
224 | xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ |
225 | struct xlog_in_core *commit_iclog; |
226 | struct xlog_ticket *ticket; /* chkpt ticket */ |
227 | atomic_t space_used; /* aggregate size of regions */ |
228 | struct xfs_busy_extents busy_extents; |
229 | struct list_head log_items; /* log items in chkpt */ |
230 | struct list_head lv_chain; /* logvecs being pushed */ |
231 | struct list_head iclog_entry; |
232 | struct list_head committing; /* ctx committing list */ |
233 | struct work_struct push_work; |
234 | atomic_t order_id; |
235 | |
236 | /* |
237 | * CPUs that could have added items to the percpu CIL data. Access is |
238 | * coordinated with xc_ctx_lock. |
239 | */ |
240 | struct cpumask cil_pcpmask; |
241 | }; |
242 | |
243 | /* |
244 | * Per-cpu CIL tracking items |
245 | */ |
246 | struct xlog_cil_pcp { |
247 | int32_t space_used; |
248 | uint32_t space_reserved; |
249 | struct list_head busy_extents; |
250 | struct list_head log_items; |
251 | }; |
252 | |
253 | /* |
254 | * Committed Item List structure |
255 | * |
256 | * This structure is used to track log items that have been committed but not |
257 | * yet written into the log. It is used only when the delayed logging mount |
258 | * option is enabled. |
259 | * |
260 | * This structure tracks the list of committing checkpoint contexts so |
261 | * we can avoid the problem of having to hold out new transactions during a |
262 | * flush until we have a the commit record LSN of the checkpoint. We can |
263 | * traverse the list of committing contexts in xlog_cil_push_lsn() to find a |
264 | * sequence match and extract the commit LSN directly from there. If the |
265 | * checkpoint is still in the process of committing, we can block waiting for |
266 | * the commit LSN to be determined as well. This should make synchronous |
267 | * operations almost as efficient as the old logging methods. |
268 | */ |
269 | struct xfs_cil { |
270 | struct xlog *xc_log; |
271 | unsigned long xc_flags; |
272 | atomic_t xc_iclog_hdrs; |
273 | struct workqueue_struct *xc_push_wq; |
274 | |
275 | struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; |
276 | struct xfs_cil_ctx *xc_ctx; |
277 | |
278 | spinlock_t xc_push_lock ____cacheline_aligned_in_smp; |
279 | xfs_csn_t xc_push_seq; |
280 | bool xc_push_commit_stable; |
281 | struct list_head xc_committing; |
282 | wait_queue_head_t xc_commit_wait; |
283 | wait_queue_head_t xc_start_wait; |
284 | xfs_csn_t xc_current_sequence; |
285 | wait_queue_head_t xc_push_wait; /* background push throttle */ |
286 | |
287 | void __percpu *xc_pcp; /* percpu CIL structures */ |
288 | } ____cacheline_aligned_in_smp; |
289 | |
290 | /* xc_flags bit values */ |
291 | #define XLOG_CIL_EMPTY 1 |
292 | #define XLOG_CIL_PCP_SPACE 2 |
293 | |
294 | /* |
295 | * The amount of log space we allow the CIL to aggregate is difficult to size. |
296 | * Whatever we choose, we have to make sure we can get a reservation for the |
297 | * log space effectively, that it is large enough to capture sufficient |
298 | * relogging to reduce log buffer IO significantly, but it is not too large for |
299 | * the log or induces too much latency when writing out through the iclogs. We |
300 | * track both space consumed and the number of vectors in the checkpoint |
301 | * context, so we need to decide which to use for limiting. |
302 | * |
303 | * Every log buffer we write out during a push needs a header reserved, which |
304 | * is at least one sector and more for v2 logs. Hence we need a reservation of |
305 | * at least 512 bytes per 32k of log space just for the LR headers. That means |
306 | * 16KB of reservation per megabyte of delayed logging space we will consume, |
307 | * plus various headers. The number of headers will vary based on the num of |
308 | * io vectors, so limiting on a specific number of vectors is going to result |
309 | * in transactions of varying size. IOWs, it is more consistent to track and |
310 | * limit space consumed in the log rather than by the number of objects being |
311 | * logged in order to prevent checkpoint ticket overruns. |
312 | * |
313 | * Further, use of static reservations through the log grant mechanism is |
314 | * problematic. It introduces a lot of complexity (e.g. reserve grant vs write |
315 | * grant) and a significant deadlock potential because regranting write space |
316 | * can block on log pushes. Hence if we have to regrant log space during a log |
317 | * push, we can deadlock. |
318 | * |
319 | * However, we can avoid this by use of a dynamic "reservation stealing" |
320 | * technique during transaction commit whereby unused reservation space in the |
321 | * transaction ticket is transferred to the CIL ctx commit ticket to cover the |
322 | * space needed by the checkpoint transaction. This means that we never need to |
323 | * specifically reserve space for the CIL checkpoint transaction, nor do we |
324 | * need to regrant space once the checkpoint completes. This also means the |
325 | * checkpoint transaction ticket is specific to the checkpoint context, rather |
326 | * than the CIL itself. |
327 | * |
328 | * With dynamic reservations, we can effectively make up arbitrary limits for |
329 | * the checkpoint size so long as they don't violate any other size rules. |
330 | * Recovery imposes a rule that no transaction exceed half the log, so we are |
331 | * limited by that. Furthermore, the log transaction reservation subsystem |
332 | * tries to keep 25% of the log free, so we need to keep below that limit or we |
333 | * risk running out of free log space to start any new transactions. |
334 | * |
335 | * In order to keep background CIL push efficient, we only need to ensure the |
336 | * CIL is large enough to maintain sufficient in-memory relogging to avoid |
337 | * repeated physical writes of frequently modified metadata. If we allow the CIL |
338 | * to grow to a substantial fraction of the log, then we may be pinning hundreds |
339 | * of megabytes of metadata in memory until the CIL flushes. This can cause |
340 | * issues when we are running low on memory - pinned memory cannot be reclaimed, |
341 | * and the CIL consumes a lot of memory. Hence we need to set an upper physical |
342 | * size limit for the CIL that limits the maximum amount of memory pinned by the |
343 | * CIL but does not limit performance by reducing relogging efficiency |
344 | * significantly. |
345 | * |
346 | * As such, the CIL push threshold ends up being the smaller of two thresholds: |
347 | * - a threshold large enough that it allows CIL to be pushed and progress to be |
348 | * made without excessive blocking of incoming transaction commits. This is |
349 | * defined to be 12.5% of the log space - half the 25% push threshold of the |
350 | * AIL. |
351 | * - small enough that it doesn't pin excessive amounts of memory but maintains |
352 | * close to peak relogging efficiency. This is defined to be 16x the iclog |
353 | * buffer window (32MB) as measurements have shown this to be roughly the |
354 | * point of diminishing performance increases under highly concurrent |
355 | * modification workloads. |
356 | * |
357 | * To prevent the CIL from overflowing upper commit size bounds, we introduce a |
358 | * new threshold at which we block committing transactions until the background |
359 | * CIL commit commences and switches to a new context. While this is not a hard |
360 | * limit, it forces the process committing a transaction to the CIL to block and |
361 | * yeild the CPU, giving the CIL push work a chance to be scheduled and start |
362 | * work. This prevents a process running lots of transactions from overfilling |
363 | * the CIL because it is not yielding the CPU. We set the blocking limit at |
364 | * twice the background push space threshold so we keep in line with the AIL |
365 | * push thresholds. |
366 | * |
367 | * Note: this is not a -hard- limit as blocking is applied after the transaction |
368 | * is inserted into the CIL and the push has been triggered. It is largely a |
369 | * throttling mechanism that allows the CIL push to be scheduled and run. A hard |
370 | * limit will be difficult to implement without introducing global serialisation |
371 | * in the CIL commit fast path, and it's not at all clear that we actually need |
372 | * such hard limits given the ~7 years we've run without a hard limit before |
373 | * finding the first situation where a checkpoint size overflow actually |
374 | * occurred. Hence the simple throttle, and an ASSERT check to tell us that |
375 | * we've overrun the max size. |
376 | */ |
377 | #define XLOG_CIL_SPACE_LIMIT(log) \ |
378 | min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4) |
379 | |
380 | #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \ |
381 | (XLOG_CIL_SPACE_LIMIT(log) * 2) |
382 | |
383 | /* |
384 | * ticket grant locks, queues and accounting have their own cachlines |
385 | * as these are quite hot and can be operated on concurrently. |
386 | */ |
387 | struct xlog_grant_head { |
388 | spinlock_t lock ____cacheline_aligned_in_smp; |
389 | struct list_head waiters; |
390 | atomic64_t grant; |
391 | }; |
392 | |
393 | /* |
394 | * The reservation head lsn is not made up of a cycle number and block number. |
395 | * Instead, it uses a cycle number and byte number. Logs don't expect to |
396 | * overflow 31 bits worth of byte offset, so using a byte number will mean |
397 | * that round off problems won't occur when releasing partial reservations. |
398 | */ |
399 | struct xlog { |
400 | /* The following fields don't need locking */ |
401 | struct xfs_mount *l_mp; /* mount point */ |
402 | struct xfs_ail *l_ailp; /* AIL log is working with */ |
403 | struct xfs_cil *l_cilp; /* CIL log is working with */ |
404 | struct xfs_buftarg *l_targ; /* buftarg of log */ |
405 | struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ |
406 | struct delayed_work l_work; /* background flush work */ |
407 | long l_opstate; /* operational state */ |
408 | uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ |
409 | struct list_head *l_buf_cancel_table; |
410 | struct list_head r_dfops; /* recovered log intent items */ |
411 | int l_iclog_hsize; /* size of iclog header */ |
412 | int l_iclog_heads; /* # of iclog header sectors */ |
413 | uint l_sectBBsize; /* sector size in BBs (2^n) */ |
414 | int l_iclog_size; /* size of log in bytes */ |
415 | int l_iclog_bufs; /* number of iclog buffers */ |
416 | xfs_daddr_t l_logBBstart; /* start block of log */ |
417 | int l_logsize; /* size of log in bytes */ |
418 | int l_logBBsize; /* size of log in BB chunks */ |
419 | |
420 | /* The following block of fields are changed while holding icloglock */ |
421 | wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp; |
422 | /* waiting for iclog flush */ |
423 | int l_covered_state;/* state of "covering disk |
424 | * log entries" */ |
425 | xlog_in_core_t *l_iclog; /* head log queue */ |
426 | spinlock_t l_icloglock; /* grab to change iclog state */ |
427 | int l_curr_cycle; /* Cycle number of log writes */ |
428 | int l_prev_cycle; /* Cycle number before last |
429 | * block increment */ |
430 | int l_curr_block; /* current logical log block */ |
431 | int l_prev_block; /* previous logical log block */ |
432 | |
433 | /* |
434 | * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and |
435 | * read without needing to hold specific locks. To avoid operations |
436 | * contending with other hot objects, place each of them on a separate |
437 | * cacheline. |
438 | */ |
439 | /* lsn of last LR on disk */ |
440 | atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; |
441 | /* lsn of 1st LR with unflushed * buffers */ |
442 | atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; |
443 | |
444 | struct xlog_grant_head l_reserve_head; |
445 | struct xlog_grant_head l_write_head; |
446 | |
447 | struct xfs_kobj l_kobj; |
448 | |
449 | /* log recovery lsn tracking (for buffer submission */ |
450 | xfs_lsn_t l_recovery_lsn; |
451 | |
452 | uint32_t l_iclog_roundoff;/* padding roundoff */ |
453 | |
454 | /* Users of log incompat features should take a read lock. */ |
455 | struct rw_semaphore l_incompat_users; |
456 | }; |
457 | |
458 | /* |
459 | * Bits for operational state |
460 | */ |
461 | #define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ |
462 | #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ |
463 | #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being |
464 | shutdown */ |
465 | #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ |
466 | |
467 | static inline bool |
468 | xlog_recovery_needed(struct xlog *log) |
469 | { |
470 | return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); |
471 | } |
472 | |
473 | static inline bool |
474 | xlog_in_recovery(struct xlog *log) |
475 | { |
476 | return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); |
477 | } |
478 | |
479 | static inline bool |
480 | xlog_is_shutdown(struct xlog *log) |
481 | { |
482 | return test_bit(XLOG_IO_ERROR, &log->l_opstate); |
483 | } |
484 | |
485 | /* |
486 | * Wait until the xlog_force_shutdown() has marked the log as shut down |
487 | * so xlog_is_shutdown() will always return true. |
488 | */ |
489 | static inline void |
490 | xlog_shutdown_wait( |
491 | struct xlog *log) |
492 | { |
493 | wait_var_event(&log->l_opstate, xlog_is_shutdown(log)); |
494 | } |
495 | |
496 | /* common routines */ |
497 | extern int |
498 | xlog_recover( |
499 | struct xlog *log); |
500 | extern int |
501 | xlog_recover_finish( |
502 | struct xlog *log); |
503 | extern void |
504 | xlog_recover_cancel(struct xlog *); |
505 | |
506 | extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, |
507 | char *dp, int size); |
508 | |
509 | extern struct kmem_cache *xfs_log_ticket_cache; |
510 | struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, |
511 | int count, bool permanent); |
512 | |
513 | void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); |
514 | void xlog_print_trans(struct xfs_trans *); |
515 | int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, |
516 | struct list_head *lv_chain, struct xlog_ticket *tic, |
517 | uint32_t len); |
518 | void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); |
519 | void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); |
520 | |
521 | void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, |
522 | int eventual_size); |
523 | int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, |
524 | struct xlog_ticket *ticket); |
525 | |
526 | /* |
527 | * When we crack an atomic LSN, we sample it first so that the value will not |
528 | * change while we are cracking it into the component values. This means we |
529 | * will always get consistent component values to work from. This should always |
530 | * be used to sample and crack LSNs that are stored and updated in atomic |
531 | * variables. |
532 | */ |
533 | static inline void |
534 | xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block) |
535 | { |
536 | xfs_lsn_t val = atomic64_read(lsn); |
537 | |
538 | *cycle = CYCLE_LSN(val); |
539 | *block = BLOCK_LSN(val); |
540 | } |
541 | |
542 | /* |
543 | * Calculate and assign a value to an atomic LSN variable from component pieces. |
544 | */ |
545 | static inline void |
546 | xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) |
547 | { |
548 | atomic64_set(v: lsn, i: xlog_assign_lsn(cycle, block)); |
549 | } |
550 | |
551 | /* |
552 | * When we crack the grant head, we sample it first so that the value will not |
553 | * change while we are cracking it into the component values. This means we |
554 | * will always get consistent component values to work from. |
555 | */ |
556 | static inline void |
557 | xlog_crack_grant_head_val(int64_t val, int *cycle, int *space) |
558 | { |
559 | *cycle = val >> 32; |
560 | *space = val & 0xffffffff; |
561 | } |
562 | |
563 | static inline void |
564 | xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) |
565 | { |
566 | xlog_crack_grant_head_val(val: atomic64_read(v: head), cycle, space); |
567 | } |
568 | |
569 | static inline int64_t |
570 | xlog_assign_grant_head_val(int cycle, int space) |
571 | { |
572 | return ((int64_t)cycle << 32) | space; |
573 | } |
574 | |
575 | static inline void |
576 | xlog_assign_grant_head(atomic64_t *head, int cycle, int space) |
577 | { |
578 | atomic64_set(v: head, i: xlog_assign_grant_head_val(cycle, space)); |
579 | } |
580 | |
581 | /* |
582 | * Committed Item List interfaces |
583 | */ |
584 | int xlog_cil_init(struct xlog *log); |
585 | void xlog_cil_init_post_recovery(struct xlog *log); |
586 | void xlog_cil_destroy(struct xlog *log); |
587 | bool xlog_cil_empty(struct xlog *log); |
588 | void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, |
589 | xfs_csn_t *commit_seq, bool regrant); |
590 | void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, |
591 | struct xlog_in_core *iclog); |
592 | |
593 | |
594 | /* |
595 | * CIL force routines |
596 | */ |
597 | void xlog_cil_flush(struct xlog *log); |
598 | xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); |
599 | |
600 | static inline void |
601 | xlog_cil_force(struct xlog *log) |
602 | { |
603 | xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); |
604 | } |
605 | |
606 | /* |
607 | * Wrapper function for waiting on a wait queue serialised against wakeups |
608 | * by a spinlock. This matches the semantics of all the wait queues used in the |
609 | * log code. |
610 | */ |
611 | static inline void |
612 | xlog_wait( |
613 | struct wait_queue_head *wq, |
614 | struct spinlock *lock) |
615 | __releases(lock) |
616 | { |
617 | DECLARE_WAITQUEUE(wait, current); |
618 | |
619 | add_wait_queue_exclusive(wq_head: wq, wq_entry: &wait); |
620 | __set_current_state(TASK_UNINTERRUPTIBLE); |
621 | spin_unlock(lock); |
622 | schedule(); |
623 | remove_wait_queue(wq_head: wq, wq_entry: &wait); |
624 | } |
625 | |
626 | int xlog_wait_on_iclog(struct xlog_in_core *iclog); |
627 | |
628 | /* |
629 | * The LSN is valid so long as it is behind the current LSN. If it isn't, this |
630 | * means that the next log record that includes this metadata could have a |
631 | * smaller LSN. In turn, this means that the modification in the log would not |
632 | * replay. |
633 | */ |
634 | static inline bool |
635 | xlog_valid_lsn( |
636 | struct xlog *log, |
637 | xfs_lsn_t lsn) |
638 | { |
639 | int cur_cycle; |
640 | int cur_block; |
641 | bool valid = true; |
642 | |
643 | /* |
644 | * First, sample the current lsn without locking to avoid added |
645 | * contention from metadata I/O. The current cycle and block are updated |
646 | * (in xlog_state_switch_iclogs()) and read here in a particular order |
647 | * to avoid false negatives (e.g., thinking the metadata LSN is valid |
648 | * when it is not). |
649 | * |
650 | * The current block is always rewound before the cycle is bumped in |
651 | * xlog_state_switch_iclogs() to ensure the current LSN is never seen in |
652 | * a transiently forward state. Instead, we can see the LSN in a |
653 | * transiently behind state if we happen to race with a cycle wrap. |
654 | */ |
655 | cur_cycle = READ_ONCE(log->l_curr_cycle); |
656 | smp_rmb(); |
657 | cur_block = READ_ONCE(log->l_curr_block); |
658 | |
659 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
660 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { |
661 | /* |
662 | * If the metadata LSN appears invalid, it's possible the check |
663 | * above raced with a wrap to the next log cycle. Grab the lock |
664 | * to check for sure. |
665 | */ |
666 | spin_lock(lock: &log->l_icloglock); |
667 | cur_cycle = log->l_curr_cycle; |
668 | cur_block = log->l_curr_block; |
669 | spin_unlock(lock: &log->l_icloglock); |
670 | |
671 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
672 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) |
673 | valid = false; |
674 | } |
675 | |
676 | return valid; |
677 | } |
678 | |
679 | /* |
680 | * Log vector and shadow buffers can be large, so we need to use kvmalloc() here |
681 | * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts |
682 | * to fall back to vmalloc, so we can't actually do anything useful with gfp |
683 | * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc() |
684 | * will do direct reclaim and compaction in the slow path, both of which are |
685 | * horrendously expensive. We just want kmalloc to fail fast and fall back to |
686 | * vmalloc if it can't get somethign straight away from the free lists or |
687 | * buddy allocator. Hence we have to open code kvmalloc outselves here. |
688 | * |
689 | * This assumes that the caller uses memalloc_nofs_save task context here, so |
690 | * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS |
691 | * allocations. This is actually the only way to make vmalloc() do GFP_NOFS |
692 | * allocations, so lets just all pretend this is a GFP_KERNEL context |
693 | * operation.... |
694 | */ |
695 | static inline void * |
696 | xlog_kvmalloc( |
697 | size_t buf_size) |
698 | { |
699 | gfp_t flags = GFP_KERNEL; |
700 | void *p; |
701 | |
702 | flags &= ~__GFP_DIRECT_RECLAIM; |
703 | flags |= __GFP_NOWARN | __GFP_NORETRY; |
704 | do { |
705 | p = kmalloc(size: buf_size, flags); |
706 | if (!p) |
707 | p = vmalloc(size: buf_size); |
708 | } while (!p); |
709 | |
710 | return p; |
711 | } |
712 | |
713 | #endif /* __XFS_LOG_PRIV_H__ */ |
714 | |