1 | /* Copyright (C) 1993-2024 Free Software Foundation, Inc. |
2 | This file is part of the GNU C Library. |
3 | |
4 | The GNU C Library is free software; you can redistribute it and/or |
5 | modify it under the terms of the GNU Lesser General Public |
6 | License as published by the Free Software Foundation; either |
7 | version 2.1 of the License, or (at your option) any later version. |
8 | |
9 | The GNU C Library is distributed in the hope that it will be useful, |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | Lesser General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU Lesser General Public |
15 | License along with the GNU C Library; if not, see |
16 | <https://www.gnu.org/licenses/>. |
17 | |
18 | As a special exception, if you link the code in this file with |
19 | files compiled with a GNU compiler to produce an executable, |
20 | that does not cause the resulting executable to be covered by |
21 | the GNU Lesser General Public License. This exception does not |
22 | however invalidate any other reasons why the executable file |
23 | might be covered by the GNU Lesser General Public License. |
24 | This exception applies to code released by its copyright holders |
25 | in files containing the exception. */ |
26 | |
27 | /* Generic or default I/O operations. */ |
28 | |
29 | #include "libioP.h" |
30 | #include <stdlib.h> |
31 | #include <string.h> |
32 | #include <stdbool.h> |
33 | #include <sched.h> |
34 | |
35 | #ifdef _IO_MTSAFE_IO |
36 | static _IO_lock_t list_all_lock = _IO_lock_initializer; |
37 | #endif |
38 | |
39 | static FILE *run_fp; |
40 | |
41 | #ifdef _IO_MTSAFE_IO |
42 | static void |
43 | flush_cleanup (void *not_used) |
44 | { |
45 | if (run_fp != NULL) |
46 | _IO_funlockfile (run_fp); |
47 | _IO_lock_unlock (list_all_lock); |
48 | } |
49 | #endif |
50 | |
51 | void |
52 | _IO_un_link (struct _IO_FILE_plus *fp) |
53 | { |
54 | if (fp->file._flags & _IO_LINKED) |
55 | { |
56 | FILE **f; |
57 | #ifdef _IO_MTSAFE_IO |
58 | _IO_cleanup_region_start_noarg (flush_cleanup); |
59 | _IO_lock_lock (list_all_lock); |
60 | run_fp = (FILE *) fp; |
61 | _IO_flockfile ((FILE *) fp); |
62 | #endif |
63 | if (_IO_list_all == NULL) |
64 | ; |
65 | else if (fp == _IO_list_all) |
66 | _IO_list_all = (struct _IO_FILE_plus *) _IO_list_all->file._chain; |
67 | else |
68 | for (f = &_IO_list_all->file._chain; *f; f = &(*f)->_chain) |
69 | if (*f == (FILE *) fp) |
70 | { |
71 | *f = fp->file._chain; |
72 | break; |
73 | } |
74 | fp->file._flags &= ~_IO_LINKED; |
75 | #ifdef _IO_MTSAFE_IO |
76 | _IO_funlockfile ((FILE *) fp); |
77 | run_fp = NULL; |
78 | _IO_lock_unlock (list_all_lock); |
79 | _IO_cleanup_region_end (0); |
80 | #endif |
81 | } |
82 | } |
83 | libc_hidden_def (_IO_un_link) |
84 | |
85 | void |
86 | _IO_link_in (struct _IO_FILE_plus *fp) |
87 | { |
88 | if ((fp->file._flags & _IO_LINKED) == 0) |
89 | { |
90 | fp->file._flags |= _IO_LINKED; |
91 | #ifdef _IO_MTSAFE_IO |
92 | _IO_cleanup_region_start_noarg (flush_cleanup); |
93 | _IO_lock_lock (list_all_lock); |
94 | run_fp = (FILE *) fp; |
95 | _IO_flockfile ((FILE *) fp); |
96 | #endif |
97 | fp->file._chain = (FILE *) _IO_list_all; |
98 | _IO_list_all = fp; |
99 | #ifdef _IO_MTSAFE_IO |
100 | _IO_funlockfile ((FILE *) fp); |
101 | run_fp = NULL; |
102 | _IO_lock_unlock (list_all_lock); |
103 | _IO_cleanup_region_end (0); |
104 | #endif |
105 | } |
106 | } |
107 | libc_hidden_def (_IO_link_in) |
108 | |
109 | /* Return minimum _pos markers |
110 | Assumes the current get area is the main get area. */ |
111 | ssize_t _IO_least_marker (FILE *fp, char *end_p); |
112 | |
113 | ssize_t |
114 | _IO_least_marker (FILE *fp, char *end_p) |
115 | { |
116 | ssize_t least_so_far = end_p - fp->_IO_read_base; |
117 | struct _IO_marker *mark; |
118 | for (mark = fp->_markers; mark != NULL; mark = mark->_next) |
119 | if (mark->_pos < least_so_far) |
120 | least_so_far = mark->_pos; |
121 | return least_so_far; |
122 | } |
123 | |
124 | /* Switch current get area from backup buffer to (start of) main get area. */ |
125 | |
126 | void |
127 | _IO_switch_to_main_get_area (FILE *fp) |
128 | { |
129 | char *tmp; |
130 | fp->_flags &= ~_IO_IN_BACKUP; |
131 | /* Swap _IO_read_end and _IO_save_end. */ |
132 | tmp = fp->_IO_read_end; |
133 | fp->_IO_read_end = fp->_IO_save_end; |
134 | fp->_IO_save_end= tmp; |
135 | /* Swap _IO_read_base and _IO_save_base. */ |
136 | tmp = fp->_IO_read_base; |
137 | fp->_IO_read_base = fp->_IO_save_base; |
138 | fp->_IO_save_base = tmp; |
139 | /* Set _IO_read_ptr. */ |
140 | fp->_IO_read_ptr = fp->_IO_read_base; |
141 | } |
142 | |
143 | /* Switch current get area from main get area to (end of) backup area. */ |
144 | |
145 | void |
146 | _IO_switch_to_backup_area (FILE *fp) |
147 | { |
148 | char *tmp; |
149 | fp->_flags |= _IO_IN_BACKUP; |
150 | /* Swap _IO_read_end and _IO_save_end. */ |
151 | tmp = fp->_IO_read_end; |
152 | fp->_IO_read_end = fp->_IO_save_end; |
153 | fp->_IO_save_end = tmp; |
154 | /* Swap _IO_read_base and _IO_save_base. */ |
155 | tmp = fp->_IO_read_base; |
156 | fp->_IO_read_base = fp->_IO_save_base; |
157 | fp->_IO_save_base = tmp; |
158 | /* Set _IO_read_ptr. */ |
159 | fp->_IO_read_ptr = fp->_IO_read_end; |
160 | } |
161 | |
162 | int |
163 | _IO_switch_to_get_mode (FILE *fp) |
164 | { |
165 | if (fp->_IO_write_ptr > fp->_IO_write_base) |
166 | if (_IO_OVERFLOW (fp, EOF) == EOF) |
167 | return EOF; |
168 | if (_IO_in_backup (fp)) |
169 | fp->_IO_read_base = fp->_IO_backup_base; |
170 | else |
171 | { |
172 | fp->_IO_read_base = fp->_IO_buf_base; |
173 | if (fp->_IO_write_ptr > fp->_IO_read_end) |
174 | fp->_IO_read_end = fp->_IO_write_ptr; |
175 | } |
176 | fp->_IO_read_ptr = fp->_IO_write_ptr; |
177 | |
178 | fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = fp->_IO_read_ptr; |
179 | |
180 | fp->_flags &= ~_IO_CURRENTLY_PUTTING; |
181 | return 0; |
182 | } |
183 | libc_hidden_def (_IO_switch_to_get_mode) |
184 | |
185 | void |
186 | _IO_free_backup_area (FILE *fp) |
187 | { |
188 | if (_IO_in_backup (fp)) |
189 | _IO_switch_to_main_get_area (fp); /* Just in case. */ |
190 | free (ptr: fp->_IO_save_base); |
191 | fp->_IO_save_base = NULL; |
192 | fp->_IO_save_end = NULL; |
193 | fp->_IO_backup_base = NULL; |
194 | } |
195 | libc_hidden_def (_IO_free_backup_area) |
196 | |
197 | int |
198 | __overflow (FILE *f, int ch) |
199 | { |
200 | /* This is a single-byte stream. */ |
201 | if (f->_mode == 0) |
202 | _IO_fwide (f, -1); |
203 | return _IO_OVERFLOW (f, ch); |
204 | } |
205 | libc_hidden_def (__overflow) |
206 | |
207 | static int |
208 | save_for_backup (FILE *fp, char *end_p) |
209 | { |
210 | /* Append [_IO_read_base..end_p] to backup area. */ |
211 | ssize_t least_mark = _IO_least_marker (fp, end_p); |
212 | /* needed_size is how much space we need in the backup area. */ |
213 | size_t needed_size = (end_p - fp->_IO_read_base) - least_mark; |
214 | /* FIXME: Dubious arithmetic if pointers are NULL */ |
215 | size_t current_Bsize = fp->_IO_save_end - fp->_IO_save_base; |
216 | size_t avail; /* Extra space available for future expansion. */ |
217 | ssize_t delta; |
218 | struct _IO_marker *mark; |
219 | if (needed_size > current_Bsize) |
220 | { |
221 | char *new_buffer; |
222 | avail = 100; |
223 | new_buffer = (char *) malloc (size: avail + needed_size); |
224 | if (new_buffer == NULL) |
225 | return EOF; /* FIXME */ |
226 | if (least_mark < 0) |
227 | { |
228 | __mempcpy (__mempcpy (new_buffer + avail, |
229 | fp->_IO_save_end + least_mark, |
230 | -least_mark), |
231 | fp->_IO_read_base, |
232 | end_p - fp->_IO_read_base); |
233 | } |
234 | else |
235 | memcpy (new_buffer + avail, |
236 | fp->_IO_read_base + least_mark, |
237 | needed_size); |
238 | free (ptr: fp->_IO_save_base); |
239 | fp->_IO_save_base = new_buffer; |
240 | fp->_IO_save_end = new_buffer + avail + needed_size; |
241 | } |
242 | else |
243 | { |
244 | avail = current_Bsize - needed_size; |
245 | if (least_mark < 0) |
246 | { |
247 | memmove (fp->_IO_save_base + avail, |
248 | fp->_IO_save_end + least_mark, |
249 | -least_mark); |
250 | memcpy (fp->_IO_save_base + avail - least_mark, |
251 | fp->_IO_read_base, |
252 | end_p - fp->_IO_read_base); |
253 | } |
254 | else if (needed_size > 0) |
255 | memcpy (fp->_IO_save_base + avail, |
256 | fp->_IO_read_base + least_mark, |
257 | needed_size); |
258 | } |
259 | fp->_IO_backup_base = fp->_IO_save_base + avail; |
260 | /* Adjust all the streammarkers. */ |
261 | delta = end_p - fp->_IO_read_base; |
262 | for (mark = fp->_markers; mark != NULL; mark = mark->_next) |
263 | mark->_pos -= delta; |
264 | return 0; |
265 | } |
266 | |
267 | int |
268 | __underflow (FILE *fp) |
269 | { |
270 | if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1) |
271 | return EOF; |
272 | |
273 | if (fp->_mode == 0) |
274 | _IO_fwide (fp, -1); |
275 | if (_IO_in_put_mode (fp)) |
276 | if (_IO_switch_to_get_mode (fp) == EOF) |
277 | return EOF; |
278 | if (fp->_IO_read_ptr < fp->_IO_read_end) |
279 | return *(unsigned char *) fp->_IO_read_ptr; |
280 | if (_IO_in_backup (fp)) |
281 | { |
282 | _IO_switch_to_main_get_area (fp); |
283 | if (fp->_IO_read_ptr < fp->_IO_read_end) |
284 | return *(unsigned char *) fp->_IO_read_ptr; |
285 | } |
286 | if (_IO_have_markers (fp)) |
287 | { |
288 | if (save_for_backup (fp, end_p: fp->_IO_read_end)) |
289 | return EOF; |
290 | } |
291 | else if (_IO_have_backup (fp)) |
292 | _IO_free_backup_area (fp); |
293 | return _IO_UNDERFLOW (fp); |
294 | } |
295 | libc_hidden_def (__underflow) |
296 | |
297 | int |
298 | __uflow (FILE *fp) |
299 | { |
300 | if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1) |
301 | return EOF; |
302 | |
303 | if (fp->_mode == 0) |
304 | _IO_fwide (fp, -1); |
305 | if (_IO_in_put_mode (fp)) |
306 | if (_IO_switch_to_get_mode (fp) == EOF) |
307 | return EOF; |
308 | if (fp->_IO_read_ptr < fp->_IO_read_end) |
309 | return *(unsigned char *) fp->_IO_read_ptr++; |
310 | if (_IO_in_backup (fp)) |
311 | { |
312 | _IO_switch_to_main_get_area (fp); |
313 | if (fp->_IO_read_ptr < fp->_IO_read_end) |
314 | return *(unsigned char *) fp->_IO_read_ptr++; |
315 | } |
316 | if (_IO_have_markers (fp)) |
317 | { |
318 | if (save_for_backup (fp, end_p: fp->_IO_read_end)) |
319 | return EOF; |
320 | } |
321 | else if (_IO_have_backup (fp)) |
322 | _IO_free_backup_area (fp); |
323 | return _IO_UFLOW (fp); |
324 | } |
325 | libc_hidden_def (__uflow) |
326 | |
327 | void |
328 | _IO_setb (FILE *f, char *b, char *eb, int a) |
329 | { |
330 | if (f->_IO_buf_base && !(f->_flags & _IO_USER_BUF)) |
331 | free (ptr: f->_IO_buf_base); |
332 | f->_IO_buf_base = b; |
333 | f->_IO_buf_end = eb; |
334 | if (a) |
335 | f->_flags &= ~_IO_USER_BUF; |
336 | else |
337 | f->_flags |= _IO_USER_BUF; |
338 | } |
339 | libc_hidden_def (_IO_setb) |
340 | |
341 | void |
342 | _IO_doallocbuf (FILE *fp) |
343 | { |
344 | if (fp->_IO_buf_base) |
345 | return; |
346 | if (!(fp->_flags & _IO_UNBUFFERED) || fp->_mode > 0) |
347 | if (_IO_DOALLOCATE (fp) != EOF) |
348 | return; |
349 | _IO_setb (f: fp, b: fp->_shortbuf, eb: fp->_shortbuf+1, a: 0); |
350 | } |
351 | libc_hidden_def (_IO_doallocbuf) |
352 | |
353 | int |
354 | _IO_default_underflow (FILE *fp) |
355 | { |
356 | return EOF; |
357 | } |
358 | |
359 | int |
360 | _IO_default_uflow (FILE *fp) |
361 | { |
362 | int ch = _IO_UNDERFLOW (fp); |
363 | if (ch == EOF) |
364 | return EOF; |
365 | return *(unsigned char *) fp->_IO_read_ptr++; |
366 | } |
367 | libc_hidden_def (_IO_default_uflow) |
368 | |
369 | size_t |
370 | _IO_default_xsputn (FILE *f, const void *data, size_t n) |
371 | { |
372 | const char *s = (char *) data; |
373 | size_t more = n; |
374 | if (more <= 0) |
375 | return 0; |
376 | for (;;) |
377 | { |
378 | /* Space available. */ |
379 | if (f->_IO_write_ptr < f->_IO_write_end) |
380 | { |
381 | size_t count = f->_IO_write_end - f->_IO_write_ptr; |
382 | if (count > more) |
383 | count = more; |
384 | if (count > 20) |
385 | { |
386 | f->_IO_write_ptr = __mempcpy (f->_IO_write_ptr, s, count); |
387 | s += count; |
388 | } |
389 | else if (count) |
390 | { |
391 | char *p = f->_IO_write_ptr; |
392 | ssize_t i; |
393 | for (i = count; --i >= 0; ) |
394 | *p++ = *s++; |
395 | f->_IO_write_ptr = p; |
396 | } |
397 | more -= count; |
398 | } |
399 | if (more == 0 || _IO_OVERFLOW (f, (unsigned char) *s++) == EOF) |
400 | break; |
401 | more--; |
402 | } |
403 | return n - more; |
404 | } |
405 | libc_hidden_def (_IO_default_xsputn) |
406 | |
407 | size_t |
408 | _IO_sgetn (FILE *fp, void *data, size_t n) |
409 | { |
410 | /* FIXME handle putback buffer here! */ |
411 | return _IO_XSGETN (fp, data, n); |
412 | } |
413 | libc_hidden_def (_IO_sgetn) |
414 | |
415 | size_t |
416 | _IO_default_xsgetn (FILE *fp, void *data, size_t n) |
417 | { |
418 | size_t more = n; |
419 | char *s = (char*) data; |
420 | for (;;) |
421 | { |
422 | /* Data available. */ |
423 | if (fp->_IO_read_ptr < fp->_IO_read_end) |
424 | { |
425 | size_t count = fp->_IO_read_end - fp->_IO_read_ptr; |
426 | if (count > more) |
427 | count = more; |
428 | if (count > 20) |
429 | { |
430 | s = __mempcpy (s, fp->_IO_read_ptr, count); |
431 | fp->_IO_read_ptr += count; |
432 | } |
433 | else if (count) |
434 | { |
435 | char *p = fp->_IO_read_ptr; |
436 | int i = (int) count; |
437 | while (--i >= 0) |
438 | *s++ = *p++; |
439 | fp->_IO_read_ptr = p; |
440 | } |
441 | more -= count; |
442 | } |
443 | if (more == 0 || __underflow (fp) == EOF) |
444 | break; |
445 | } |
446 | return n - more; |
447 | } |
448 | libc_hidden_def (_IO_default_xsgetn) |
449 | |
450 | FILE * |
451 | _IO_default_setbuf (FILE *fp, char *p, ssize_t len) |
452 | { |
453 | if (_IO_SYNC (fp) == EOF) |
454 | return NULL; |
455 | if (p == NULL || len == 0) |
456 | { |
457 | fp->_flags |= _IO_UNBUFFERED; |
458 | _IO_setb (f: fp, b: fp->_shortbuf, eb: fp->_shortbuf+1, a: 0); |
459 | } |
460 | else |
461 | { |
462 | fp->_flags &= ~_IO_UNBUFFERED; |
463 | _IO_setb (f: fp, b: p, eb: p+len, a: 0); |
464 | } |
465 | fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = 0; |
466 | fp->_IO_read_base = fp->_IO_read_ptr = fp->_IO_read_end = 0; |
467 | return fp; |
468 | } |
469 | |
470 | off64_t |
471 | _IO_default_seekpos (FILE *fp, off64_t pos, int mode) |
472 | { |
473 | return _IO_SEEKOFF (fp, pos, 0, mode); |
474 | } |
475 | |
476 | int |
477 | _IO_default_doallocate (FILE *fp) |
478 | { |
479 | char *buf; |
480 | |
481 | buf = malloc(BUFSIZ); |
482 | if (__glibc_unlikely (buf == NULL)) |
483 | return EOF; |
484 | |
485 | _IO_setb (f: fp, b: buf, eb: buf+BUFSIZ, a: 1); |
486 | return 1; |
487 | } |
488 | libc_hidden_def (_IO_default_doallocate) |
489 | |
490 | void |
491 | _IO_init_internal (FILE *fp, int flags) |
492 | { |
493 | _IO_no_init (fp, flags, -1, NULL, NULL); |
494 | } |
495 | |
496 | void |
497 | _IO_init (FILE *fp, int flags) |
498 | { |
499 | IO_set_accept_foreign_vtables (flag: &_IO_vtable_check); |
500 | _IO_init_internal (fp, flags); |
501 | } |
502 | |
503 | static int stdio_needs_locking; |
504 | |
505 | /* In a single-threaded process most stdio locks can be omitted. After |
506 | _IO_enable_locks is called, locks are not optimized away any more. |
507 | It must be first called while the process is still single-threaded. |
508 | |
509 | This lock optimization can be disabled on a per-file basis by setting |
510 | _IO_FLAGS2_NEED_LOCK, because a file can have user-defined callbacks |
511 | or can be locked with flockfile and then a thread may be created |
512 | between a lock and unlock, so omitting the lock is not valid. |
513 | |
514 | Here we have to make sure that the flag is set on all existing files |
515 | and files created later. */ |
516 | void |
517 | _IO_enable_locks (void) |
518 | { |
519 | _IO_ITER i; |
520 | |
521 | if (stdio_needs_locking) |
522 | return; |
523 | stdio_needs_locking = 1; |
524 | for (i = _IO_iter_begin (); i != _IO_iter_end (); i = _IO_iter_next (i)) |
525 | _IO_iter_file (i)->_flags2 |= _IO_FLAGS2_NEED_LOCK; |
526 | } |
527 | libc_hidden_def (_IO_enable_locks) |
528 | |
529 | void |
530 | _IO_old_init (FILE *fp, int flags) |
531 | { |
532 | fp->_flags = _IO_MAGIC|flags; |
533 | fp->_flags2 = 0; |
534 | if (stdio_needs_locking) |
535 | fp->_flags2 |= _IO_FLAGS2_NEED_LOCK; |
536 | fp->_IO_buf_base = NULL; |
537 | fp->_IO_buf_end = NULL; |
538 | fp->_IO_read_base = NULL; |
539 | fp->_IO_read_ptr = NULL; |
540 | fp->_IO_read_end = NULL; |
541 | fp->_IO_write_base = NULL; |
542 | fp->_IO_write_ptr = NULL; |
543 | fp->_IO_write_end = NULL; |
544 | fp->_chain = NULL; /* Not necessary. */ |
545 | |
546 | fp->_IO_save_base = NULL; |
547 | fp->_IO_backup_base = NULL; |
548 | fp->_IO_save_end = NULL; |
549 | fp->_markers = NULL; |
550 | fp->_cur_column = 0; |
551 | #if _IO_JUMPS_OFFSET |
552 | fp->_vtable_offset = 0; |
553 | #endif |
554 | #ifdef _IO_MTSAFE_IO |
555 | if (fp->_lock != NULL) |
556 | _IO_lock_init (*fp->_lock); |
557 | #endif |
558 | } |
559 | |
560 | void |
561 | _IO_no_init (FILE *fp, int flags, int orientation, |
562 | struct _IO_wide_data *wd, const struct _IO_jump_t *jmp) |
563 | { |
564 | _IO_old_init (fp, flags); |
565 | fp->_mode = orientation; |
566 | if (orientation >= 0) |
567 | { |
568 | fp->_wide_data = wd; |
569 | fp->_wide_data->_IO_buf_base = NULL; |
570 | fp->_wide_data->_IO_buf_end = NULL; |
571 | fp->_wide_data->_IO_read_base = NULL; |
572 | fp->_wide_data->_IO_read_ptr = NULL; |
573 | fp->_wide_data->_IO_read_end = NULL; |
574 | fp->_wide_data->_IO_write_base = NULL; |
575 | fp->_wide_data->_IO_write_ptr = NULL; |
576 | fp->_wide_data->_IO_write_end = NULL; |
577 | fp->_wide_data->_IO_save_base = NULL; |
578 | fp->_wide_data->_IO_backup_base = NULL; |
579 | fp->_wide_data->_IO_save_end = NULL; |
580 | |
581 | fp->_wide_data->_wide_vtable = jmp; |
582 | } |
583 | else |
584 | /* Cause predictable crash when a wide function is called on a byte |
585 | stream. */ |
586 | fp->_wide_data = (struct _IO_wide_data *) -1L; |
587 | fp->_freeres_list = NULL; |
588 | } |
589 | |
590 | int |
591 | _IO_default_sync (FILE *fp) |
592 | { |
593 | return 0; |
594 | } |
595 | |
596 | /* The way the C++ classes are mapped into the C functions in the |
597 | current implementation, this function can get called twice! */ |
598 | |
599 | void |
600 | _IO_default_finish (FILE *fp, int dummy) |
601 | { |
602 | struct _IO_marker *mark; |
603 | if (fp->_IO_buf_base && !(fp->_flags & _IO_USER_BUF)) |
604 | { |
605 | free (ptr: fp->_IO_buf_base); |
606 | fp->_IO_buf_base = fp->_IO_buf_end = NULL; |
607 | } |
608 | |
609 | for (mark = fp->_markers; mark != NULL; mark = mark->_next) |
610 | mark->_sbuf = NULL; |
611 | |
612 | if (fp->_IO_save_base) |
613 | { |
614 | free (ptr: fp->_IO_save_base); |
615 | fp->_IO_save_base = NULL; |
616 | } |
617 | |
618 | _IO_un_link (fp: (struct _IO_FILE_plus *) fp); |
619 | |
620 | #ifdef _IO_MTSAFE_IO |
621 | if (fp->_lock != NULL) |
622 | _IO_lock_fini (*fp->_lock); |
623 | #endif |
624 | } |
625 | libc_hidden_def (_IO_default_finish) |
626 | |
627 | off64_t |
628 | _IO_default_seekoff (FILE *fp, off64_t offset, int dir, int mode) |
629 | { |
630 | return _IO_pos_BAD; |
631 | } |
632 | |
633 | int |
634 | _IO_sputbackc (FILE *fp, int c) |
635 | { |
636 | int result; |
637 | |
638 | if (fp->_IO_read_ptr > fp->_IO_read_base |
639 | && (unsigned char)fp->_IO_read_ptr[-1] == (unsigned char)c) |
640 | { |
641 | fp->_IO_read_ptr--; |
642 | result = (unsigned char) c; |
643 | } |
644 | else |
645 | result = _IO_PBACKFAIL (fp, c); |
646 | |
647 | if (result != EOF) |
648 | fp->_flags &= ~_IO_EOF_SEEN; |
649 | |
650 | return result; |
651 | } |
652 | libc_hidden_def (_IO_sputbackc) |
653 | |
654 | int |
655 | _IO_sungetc (FILE *fp) |
656 | { |
657 | int result; |
658 | |
659 | if (fp->_IO_read_ptr > fp->_IO_read_base) |
660 | { |
661 | fp->_IO_read_ptr--; |
662 | result = (unsigned char) *fp->_IO_read_ptr; |
663 | } |
664 | else |
665 | result = _IO_PBACKFAIL (fp, EOF); |
666 | |
667 | if (result != EOF) |
668 | fp->_flags &= ~_IO_EOF_SEEN; |
669 | |
670 | return result; |
671 | } |
672 | |
673 | unsigned |
674 | _IO_adjust_column (unsigned start, const char *line, int count) |
675 | { |
676 | const char *ptr = line + count; |
677 | while (ptr > line) |
678 | if (*--ptr == '\n') |
679 | return line + count - ptr - 1; |
680 | return start + count; |
681 | } |
682 | libc_hidden_def (_IO_adjust_column) |
683 | |
684 | int |
685 | _IO_flush_all (void) |
686 | { |
687 | int result = 0; |
688 | FILE *fp; |
689 | |
690 | #ifdef _IO_MTSAFE_IO |
691 | _IO_cleanup_region_start_noarg (flush_cleanup); |
692 | _IO_lock_lock (list_all_lock); |
693 | #endif |
694 | |
695 | for (fp = (FILE *) _IO_list_all; fp != NULL; fp = fp->_chain) |
696 | { |
697 | run_fp = fp; |
698 | _IO_flockfile (fp); |
699 | |
700 | if (((fp->_mode <= 0 && fp->_IO_write_ptr > fp->_IO_write_base) |
701 | || (_IO_vtable_offset (fp) == 0 |
702 | && fp->_mode > 0 && (fp->_wide_data->_IO_write_ptr |
703 | > fp->_wide_data->_IO_write_base)) |
704 | ) |
705 | && _IO_OVERFLOW (fp, EOF) == EOF) |
706 | result = EOF; |
707 | |
708 | _IO_funlockfile (fp); |
709 | run_fp = NULL; |
710 | } |
711 | |
712 | #ifdef _IO_MTSAFE_IO |
713 | _IO_lock_unlock (list_all_lock); |
714 | _IO_cleanup_region_end (0); |
715 | #endif |
716 | |
717 | return result; |
718 | } |
719 | libc_hidden_def (_IO_flush_all) |
720 | |
721 | void |
722 | _IO_flush_all_linebuffered (void) |
723 | { |
724 | FILE *fp; |
725 | |
726 | #ifdef _IO_MTSAFE_IO |
727 | _IO_cleanup_region_start_noarg (flush_cleanup); |
728 | _IO_lock_lock (list_all_lock); |
729 | #endif |
730 | |
731 | for (fp = (FILE *) _IO_list_all; fp != NULL; fp = fp->_chain) |
732 | { |
733 | run_fp = fp; |
734 | _IO_flockfile (fp); |
735 | |
736 | if ((fp->_flags & _IO_NO_WRITES) == 0 && fp->_flags & _IO_LINE_BUF) |
737 | _IO_OVERFLOW (fp, EOF); |
738 | |
739 | _IO_funlockfile (fp); |
740 | run_fp = NULL; |
741 | } |
742 | |
743 | #ifdef _IO_MTSAFE_IO |
744 | _IO_lock_unlock (list_all_lock); |
745 | _IO_cleanup_region_end (0); |
746 | #endif |
747 | } |
748 | libc_hidden_def (_IO_flush_all_linebuffered) |
749 | weak_alias (_IO_flush_all_linebuffered, _flushlbf) |
750 | |
751 | |
752 | /* The following is a bit tricky. In general, we want to unbuffer the |
753 | streams so that all output which follows is seen. If we are not |
754 | looking for memory leaks it does not make much sense to free the |
755 | actual buffer because this will happen anyway once the program |
756 | terminated. If we do want to look for memory leaks we have to free |
757 | the buffers. Whether something is freed is determined by the |
758 | function called by __libc_freeres (those are not called as part of |
759 | the atexit routine, different from _IO_cleanup). The problem is we do |
760 | not know whether the freeres code is called first or _IO_cleanup. |
761 | if the former is the case, we set the DEALLOC_BUFFER variable to |
762 | true and _IO_unbuffer_all will take care of the rest. If |
763 | _IO_unbuffer_all is called first we add the streams to a list |
764 | which the freeres function later can walk through. */ |
765 | static void _IO_unbuffer_all (void); |
766 | |
767 | static bool dealloc_buffers; |
768 | static FILE *freeres_list; |
769 | |
770 | static void |
771 | _IO_unbuffer_all (void) |
772 | { |
773 | FILE *fp; |
774 | |
775 | #ifdef _IO_MTSAFE_IO |
776 | _IO_cleanup_region_start_noarg (flush_cleanup); |
777 | _IO_lock_lock (list_all_lock); |
778 | #endif |
779 | |
780 | for (fp = (FILE *) _IO_list_all; fp; fp = fp->_chain) |
781 | { |
782 | int legacy = 0; |
783 | |
784 | run_fp = fp; |
785 | _IO_flockfile (fp); |
786 | |
787 | #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_1) |
788 | if (__glibc_unlikely (_IO_vtable_offset (fp) != 0)) |
789 | legacy = 1; |
790 | #endif |
791 | |
792 | if (! (fp->_flags & _IO_UNBUFFERED) |
793 | /* Iff stream is un-orientated, it wasn't used. */ |
794 | && (legacy || fp->_mode != 0)) |
795 | { |
796 | if (! legacy && ! dealloc_buffers && !(fp->_flags & _IO_USER_BUF)) |
797 | { |
798 | fp->_flags |= _IO_USER_BUF; |
799 | |
800 | fp->_freeres_list = freeres_list; |
801 | freeres_list = fp; |
802 | fp->_freeres_buf = fp->_IO_buf_base; |
803 | } |
804 | |
805 | _IO_SETBUF (fp, NULL, 0); |
806 | |
807 | if (! legacy && fp->_mode > 0) |
808 | _IO_wsetb (fp, NULL, NULL, 0); |
809 | } |
810 | |
811 | /* Make sure that never again the wide char functions can be |
812 | used. */ |
813 | if (! legacy) |
814 | fp->_mode = -1; |
815 | |
816 | _IO_funlockfile (fp); |
817 | run_fp = NULL; |
818 | } |
819 | |
820 | #ifdef _IO_MTSAFE_IO |
821 | _IO_lock_unlock (list_all_lock); |
822 | _IO_cleanup_region_end (0); |
823 | #endif |
824 | } |
825 | |
826 | void |
827 | __libio_freemem (void) |
828 | { |
829 | dealloc_buffers = true; |
830 | |
831 | while (freeres_list != NULL) |
832 | { |
833 | free (ptr: freeres_list->_freeres_buf); |
834 | |
835 | freeres_list = freeres_list->_freeres_list; |
836 | } |
837 | } |
838 | |
839 | |
840 | int |
841 | _IO_cleanup (void) |
842 | { |
843 | int result = _IO_flush_all (); |
844 | |
845 | /* We currently don't have a reliable mechanism for making sure that |
846 | C++ static destructors are executed in the correct order. |
847 | So it is possible that other static destructors might want to |
848 | write to cout - and they're supposed to be able to do so. |
849 | |
850 | The following will make the standard streambufs be unbuffered, |
851 | which forces any output from late destructors to be written out. */ |
852 | _IO_unbuffer_all (); |
853 | |
854 | return result; |
855 | } |
856 | |
857 | |
858 | void |
859 | _IO_init_marker (struct _IO_marker *marker, FILE *fp) |
860 | { |
861 | marker->_sbuf = fp; |
862 | if (_IO_in_put_mode (fp)) |
863 | _IO_switch_to_get_mode (fp); |
864 | if (_IO_in_backup (fp)) |
865 | marker->_pos = fp->_IO_read_ptr - fp->_IO_read_end; |
866 | else |
867 | marker->_pos = fp->_IO_read_ptr - fp->_IO_read_base; |
868 | |
869 | /* Should perhaps sort the chain? */ |
870 | marker->_next = fp->_markers; |
871 | fp->_markers = marker; |
872 | } |
873 | |
874 | void |
875 | _IO_remove_marker (struct _IO_marker *marker) |
876 | { |
877 | /* Unlink from sb's chain. */ |
878 | struct _IO_marker **ptr = &marker->_sbuf->_markers; |
879 | for (; ; ptr = &(*ptr)->_next) |
880 | { |
881 | if (*ptr == NULL) |
882 | break; |
883 | else if (*ptr == marker) |
884 | { |
885 | *ptr = marker->_next; |
886 | return; |
887 | } |
888 | } |
889 | /* FIXME: if _sbuf has a backup area that is no longer needed, |
890 | should we delete it now, or wait until the next underflow? */ |
891 | } |
892 | |
893 | #define BAD_DELTA EOF |
894 | |
895 | int |
896 | _IO_marker_difference (struct _IO_marker *mark1, struct _IO_marker *mark2) |
897 | { |
898 | return mark1->_pos - mark2->_pos; |
899 | } |
900 | |
901 | /* Return difference between MARK and current position of MARK's stream. */ |
902 | int |
903 | _IO_marker_delta (struct _IO_marker *mark) |
904 | { |
905 | int cur_pos; |
906 | if (mark->_sbuf == NULL) |
907 | return BAD_DELTA; |
908 | if (_IO_in_backup (mark->_sbuf)) |
909 | cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_end; |
910 | else |
911 | cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_base; |
912 | return mark->_pos - cur_pos; |
913 | } |
914 | |
915 | int |
916 | _IO_seekmark (FILE *fp, struct _IO_marker *mark, int delta) |
917 | { |
918 | if (mark->_sbuf != fp) |
919 | return EOF; |
920 | if (mark->_pos >= 0) |
921 | { |
922 | if (_IO_in_backup (fp)) |
923 | _IO_switch_to_main_get_area (fp); |
924 | fp->_IO_read_ptr = fp->_IO_read_base + mark->_pos; |
925 | } |
926 | else |
927 | { |
928 | if (!_IO_in_backup (fp)) |
929 | _IO_switch_to_backup_area (fp); |
930 | fp->_IO_read_ptr = fp->_IO_read_end + mark->_pos; |
931 | } |
932 | return 0; |
933 | } |
934 | |
935 | void |
936 | _IO_unsave_markers (FILE *fp) |
937 | { |
938 | struct _IO_marker *mark = fp->_markers; |
939 | if (mark) |
940 | { |
941 | fp->_markers = 0; |
942 | } |
943 | |
944 | if (_IO_have_backup (fp)) |
945 | _IO_free_backup_area (fp); |
946 | } |
947 | libc_hidden_def (_IO_unsave_markers) |
948 | |
949 | int |
950 | _IO_default_pbackfail (FILE *fp, int c) |
951 | { |
952 | if (fp->_IO_read_ptr > fp->_IO_read_base && !_IO_in_backup (fp) |
953 | && (unsigned char) fp->_IO_read_ptr[-1] == c) |
954 | --fp->_IO_read_ptr; |
955 | else |
956 | { |
957 | /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/ |
958 | if (!_IO_in_backup (fp)) |
959 | { |
960 | /* We need to keep the invariant that the main get area |
961 | logically follows the backup area. */ |
962 | if (fp->_IO_read_ptr > fp->_IO_read_base && _IO_have_backup (fp)) |
963 | { |
964 | if (save_for_backup (fp, end_p: fp->_IO_read_ptr)) |
965 | return EOF; |
966 | } |
967 | else if (!_IO_have_backup (fp)) |
968 | { |
969 | /* No backup buffer: allocate one. */ |
970 | /* Use nshort buffer, if unused? (probably not) FIXME */ |
971 | int backup_size = 128; |
972 | char *bbuf = (char *) malloc (size: backup_size); |
973 | if (bbuf == NULL) |
974 | return EOF; |
975 | fp->_IO_save_base = bbuf; |
976 | fp->_IO_save_end = fp->_IO_save_base + backup_size; |
977 | fp->_IO_backup_base = fp->_IO_save_end; |
978 | } |
979 | fp->_IO_read_base = fp->_IO_read_ptr; |
980 | _IO_switch_to_backup_area (fp); |
981 | } |
982 | else if (fp->_IO_read_ptr <= fp->_IO_read_base) |
983 | { |
984 | /* Increase size of existing backup buffer. */ |
985 | size_t new_size; |
986 | size_t old_size = fp->_IO_read_end - fp->_IO_read_base; |
987 | char *new_buf; |
988 | new_size = 2 * old_size; |
989 | new_buf = (char *) malloc (size: new_size); |
990 | if (new_buf == NULL) |
991 | return EOF; |
992 | memcpy (new_buf + (new_size - old_size), fp->_IO_read_base, |
993 | old_size); |
994 | free (ptr: fp->_IO_read_base); |
995 | _IO_setg (fp, new_buf, new_buf + (new_size - old_size), |
996 | new_buf + new_size); |
997 | fp->_IO_backup_base = fp->_IO_read_ptr; |
998 | } |
999 | |
1000 | *--fp->_IO_read_ptr = c; |
1001 | } |
1002 | return (unsigned char) c; |
1003 | } |
1004 | libc_hidden_def (_IO_default_pbackfail) |
1005 | |
1006 | off64_t |
1007 | _IO_default_seek (FILE *fp, off64_t offset, int dir) |
1008 | { |
1009 | return _IO_pos_BAD; |
1010 | } |
1011 | |
1012 | int |
1013 | _IO_default_stat (FILE *fp, void *st) |
1014 | { |
1015 | return EOF; |
1016 | } |
1017 | |
1018 | ssize_t |
1019 | _IO_default_read (FILE *fp, void *data, ssize_t n) |
1020 | { |
1021 | return -1; |
1022 | } |
1023 | |
1024 | ssize_t |
1025 | _IO_default_write (FILE *fp, const void *data, ssize_t n) |
1026 | { |
1027 | return 0; |
1028 | } |
1029 | |
1030 | int |
1031 | _IO_default_showmanyc (FILE *fp) |
1032 | { |
1033 | return -1; |
1034 | } |
1035 | |
1036 | void |
1037 | _IO_default_imbue (FILE *fp, void *locale) |
1038 | { |
1039 | } |
1040 | |
1041 | _IO_ITER |
1042 | _IO_iter_begin (void) |
1043 | { |
1044 | return (_IO_ITER) _IO_list_all; |
1045 | } |
1046 | libc_hidden_def (_IO_iter_begin) |
1047 | |
1048 | _IO_ITER |
1049 | _IO_iter_end (void) |
1050 | { |
1051 | return NULL; |
1052 | } |
1053 | libc_hidden_def (_IO_iter_end) |
1054 | |
1055 | _IO_ITER |
1056 | _IO_iter_next (_IO_ITER iter) |
1057 | { |
1058 | return iter->_chain; |
1059 | } |
1060 | libc_hidden_def (_IO_iter_next) |
1061 | |
1062 | FILE * |
1063 | _IO_iter_file (_IO_ITER iter) |
1064 | { |
1065 | return iter; |
1066 | } |
1067 | libc_hidden_def (_IO_iter_file) |
1068 | |
1069 | void |
1070 | _IO_list_lock (void) |
1071 | { |
1072 | #ifdef _IO_MTSAFE_IO |
1073 | _IO_lock_lock (list_all_lock); |
1074 | #endif |
1075 | } |
1076 | libc_hidden_def (_IO_list_lock) |
1077 | |
1078 | void |
1079 | _IO_list_unlock (void) |
1080 | { |
1081 | #ifdef _IO_MTSAFE_IO |
1082 | _IO_lock_unlock (list_all_lock); |
1083 | #endif |
1084 | } |
1085 | libc_hidden_def (_IO_list_unlock) |
1086 | |
1087 | void |
1088 | _IO_list_resetlock (void) |
1089 | { |
1090 | #ifdef _IO_MTSAFE_IO |
1091 | _IO_lock_init (list_all_lock); |
1092 | #endif |
1093 | } |
1094 | libc_hidden_def (_IO_list_resetlock) |
1095 | |