1use crate::fd::{AsFd, BorrowedFd, OwnedFd};
2use crate::ffi::{CStr, CString};
3use crate::fs::{
4 fcntl_getfl, fstat, fstatfs, fstatvfs, openat, FileType, Mode, OFlags, Stat, StatFs, StatVfs,
5};
6use crate::io;
7#[cfg(feature = "process")]
8use crate::process::fchdir;
9use crate::utils::as_ptr;
10use alloc::borrow::ToOwned;
11use alloc::vec::Vec;
12use core::fmt;
13use core::mem::size_of;
14use linux_raw_sys::general::{linux_dirent64, SEEK_SET};
15
16/// `DIR*`
17pub struct Dir {
18 /// The `OwnedFd` that we read directory entries from.
19 fd: OwnedFd,
20
21 /// Have we seen any errors in this iteration?
22 any_errors: bool,
23
24 /// Should we rewind the stream on the next iteration?
25 rewind: bool,
26
27 /// The buffer for `linux_dirent64` entries.
28 buf: Vec<u8>,
29
30 /// Where we are in the buffer.
31 pos: usize,
32}
33
34impl Dir {
35 /// Construct a `Dir` that reads entries from the given directory
36 /// file descriptor.
37 #[inline]
38 pub fn read_from<Fd: AsFd>(fd: Fd) -> io::Result<Self> {
39 Self::_read_from(fd.as_fd())
40 }
41
42 #[inline]
43 fn _read_from(fd: BorrowedFd<'_>) -> io::Result<Self> {
44 let flags = fcntl_getfl(fd)?;
45 let fd_for_dir = openat(fd, cstr!("."), flags | OFlags::CLOEXEC, Mode::empty())?;
46
47 Ok(Self {
48 fd: fd_for_dir,
49 any_errors: false,
50 rewind: false,
51 buf: Vec::new(),
52 pos: 0,
53 })
54 }
55
56 /// `rewinddir(self)`
57 #[inline]
58 pub fn rewind(&mut self) {
59 self.any_errors = false;
60 self.rewind = true;
61 self.pos = self.buf.len();
62 }
63
64 /// `readdir(self)`, where `None` means the end of the directory.
65 pub fn read(&mut self) -> Option<io::Result<DirEntry>> {
66 // If we've seen errors, don't continue to try to read anyting further.
67 if self.any_errors {
68 return None;
69 }
70
71 // If a rewind was requested, seek to the beginning.
72 if self.rewind {
73 self.rewind = false;
74 match io::retry_on_intr(|| {
75 crate::backend::fs::syscalls::_seek(self.fd.as_fd(), 0, SEEK_SET)
76 }) {
77 Ok(_) => (),
78 Err(err) => {
79 self.any_errors = true;
80 return Some(Err(err));
81 }
82 }
83 }
84
85 // Compute linux_dirent64 field offsets.
86 let z = linux_dirent64 {
87 d_ino: 0_u64,
88 d_off: 0_i64,
89 d_type: 0_u8,
90 d_reclen: 0_u16,
91 d_name: Default::default(),
92 };
93 let base = as_ptr(&z) as usize;
94 let offsetof_d_reclen = (as_ptr(&z.d_reclen) as usize) - base;
95 let offsetof_d_name = (as_ptr(&z.d_name) as usize) - base;
96 let offsetof_d_ino = (as_ptr(&z.d_ino) as usize) - base;
97 let offsetof_d_type = (as_ptr(&z.d_type) as usize) - base;
98
99 // Test if we need more entries, and if so, read more.
100 if self.buf.len() - self.pos < size_of::<linux_dirent64>() {
101 match self.read_more()? {
102 Ok(()) => (),
103 Err(err) => return Some(Err(err)),
104 }
105 }
106
107 // We successfully read an entry. Extract the fields.
108 let pos = self.pos;
109
110 // Do an unaligned u16 load.
111 let d_reclen = u16::from_ne_bytes([
112 self.buf[pos + offsetof_d_reclen],
113 self.buf[pos + offsetof_d_reclen + 1],
114 ]);
115 assert!(self.buf.len() - pos >= d_reclen as usize);
116 self.pos += d_reclen as usize;
117
118 // Read the NUL-terminated name from the `d_name` field. Without
119 // `unsafe`, we need to scan for the NUL twice: once to obtain a size
120 // for the slice, and then once within `CStr::from_bytes_with_nul`.
121 let name_start = pos + offsetof_d_name;
122 let name_len = self.buf[name_start..]
123 .iter()
124 .position(|x| *x == b'\0')
125 .unwrap();
126 let name = CStr::from_bytes_with_nul(&self.buf[name_start..][..=name_len]).unwrap();
127 let name = name.to_owned();
128 assert!(name.as_bytes().len() <= self.buf.len() - name_start);
129
130 // Do an unaligned u64 load.
131 let d_ino = u64::from_ne_bytes([
132 self.buf[pos + offsetof_d_ino],
133 self.buf[pos + offsetof_d_ino + 1],
134 self.buf[pos + offsetof_d_ino + 2],
135 self.buf[pos + offsetof_d_ino + 3],
136 self.buf[pos + offsetof_d_ino + 4],
137 self.buf[pos + offsetof_d_ino + 5],
138 self.buf[pos + offsetof_d_ino + 6],
139 self.buf[pos + offsetof_d_ino + 7],
140 ]);
141
142 let d_type = self.buf[pos + offsetof_d_type];
143
144 // Check that our types correspond to the `linux_dirent64` types.
145 let _ = linux_dirent64 {
146 d_ino,
147 d_off: 0,
148 d_type,
149 d_reclen,
150 d_name: Default::default(),
151 };
152
153 Some(Ok(DirEntry {
154 d_ino,
155 d_type,
156 name,
157 }))
158 }
159
160 fn read_more(&mut self) -> Option<io::Result<()>> {
161 // The first few times we're called, we allocate a relatively small
162 // buffer, because many directories are small. If we're called more,
163 // use progressively larger allocations, up to a fixed maximum.
164 //
165 // The specific sizes and policy here have not been tuned in detail yet
166 // and may need to be adjusted. In doing so, we should be careful to
167 // avoid unbounded buffer growth. This buffer only exists to share the
168 // cost of a `getdents` call over many entries, so if it gets too big,
169 // cache and heap usage will outweigh the benefit. And ultimately,
170 // directories can contain more entries than we can allocate contiguous
171 // memory for, so we'll always need to cap the size at some point.
172 if self.buf.len() < 1024 * size_of::<linux_dirent64>() {
173 self.buf.reserve(32 * size_of::<linux_dirent64>());
174 }
175 self.buf.resize(self.buf.capacity(), 0);
176 let nread = match io::retry_on_intr(|| {
177 crate::backend::fs::syscalls::getdents(self.fd.as_fd(), &mut self.buf)
178 }) {
179 Ok(nread) => nread,
180 Err(io::Errno::NOENT) => {
181 self.any_errors = true;
182 return None;
183 }
184 Err(err) => {
185 self.any_errors = true;
186 return Some(Err(err));
187 }
188 };
189 self.buf.resize(nread, 0);
190 self.pos = 0;
191 if nread == 0 {
192 None
193 } else {
194 Some(Ok(()))
195 }
196 }
197
198 /// `fstat(self)`
199 #[inline]
200 pub fn stat(&self) -> io::Result<Stat> {
201 fstat(&self.fd)
202 }
203
204 /// `fstatfs(self)`
205 #[inline]
206 pub fn statfs(&self) -> io::Result<StatFs> {
207 fstatfs(&self.fd)
208 }
209
210 /// `fstatvfs(self)`
211 #[inline]
212 pub fn statvfs(&self) -> io::Result<StatVfs> {
213 fstatvfs(&self.fd)
214 }
215
216 /// `fchdir(self)`
217 #[cfg(feature = "process")]
218 #[cfg_attr(doc_cfg, doc(cfg(feature = "process")))]
219 #[inline]
220 pub fn chdir(&self) -> io::Result<()> {
221 fchdir(&self.fd)
222 }
223}
224
225impl Iterator for Dir {
226 type Item = io::Result<DirEntry>;
227
228 #[inline]
229 fn next(&mut self) -> Option<Self::Item> {
230 Self::read(self)
231 }
232}
233
234impl fmt::Debug for Dir {
235 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
236 f.debug_struct("Dir").field(name:"fd", &self.fd).finish()
237 }
238}
239
240/// `struct dirent`
241#[derive(Debug)]
242pub struct DirEntry {
243 d_ino: u64,
244 d_type: u8,
245 name: CString,
246}
247
248impl DirEntry {
249 /// Returns the file name of this directory entry.
250 #[inline]
251 pub fn file_name(&self) -> &CStr {
252 &self.name
253 }
254
255 /// Returns the type of this directory entry.
256 #[inline]
257 pub fn file_type(&self) -> FileType {
258 FileType::from_dirent_d_type(self.d_type)
259 }
260
261 /// Return the inode number of this directory entry.
262 #[inline]
263 pub fn ino(&self) -> u64 {
264 self.d_ino
265 }
266}
267
268#[test]
269fn dir_iterator_handles_io_errors() {
270 // create a dir, keep the FD, then delete the dir
271 let tmp = tempfile::tempdir().unwrap();
272 let fd = crate::fs::openat(
273 crate::fs::CWD,
274 tmp.path(),
275 crate::fs::OFlags::RDONLY | crate::fs::OFlags::CLOEXEC,
276 crate::fs::Mode::empty(),
277 )
278 .unwrap();
279
280 let file_fd = crate::fs::openat(
281 &fd,
282 tmp.path().join("test.txt"),
283 crate::fs::OFlags::WRONLY | crate::fs::OFlags::CREATE,
284 crate::fs::Mode::RWXU,
285 )
286 .unwrap();
287
288 let mut dir = Dir::read_from(&fd).unwrap();
289
290 // Reach inside the `Dir` and replace its directory with a file, which
291 // will cause the subsequent `getdents64` to fail.
292 crate::io::dup2(&file_fd, &mut dir.fd).unwrap();
293
294 assert!(matches!(dir.next(), Some(Err(_))));
295 assert!(matches!(dir.next(), None));
296}
297