| 1 | //! Miscellaneous helpers for running commands |
| 2 | |
| 3 | use std::{ |
| 4 | borrow::Cow, |
| 5 | collections::hash_map, |
| 6 | ffi::OsString, |
| 7 | fmt::Display, |
| 8 | fs, |
| 9 | hash::Hasher, |
| 10 | io::{self, Read, Write}, |
| 11 | path::Path, |
| 12 | process::{Child, ChildStderr, Command, Stdio}, |
| 13 | sync::{ |
| 14 | atomic::{AtomicBool, Ordering}, |
| 15 | Arc, |
| 16 | }, |
| 17 | }; |
| 18 | |
| 19 | use crate::{Error, ErrorKind, Object}; |
| 20 | |
| 21 | #[derive (Clone, Debug)] |
| 22 | pub(crate) struct CargoOutput { |
| 23 | pub(crate) metadata: bool, |
| 24 | pub(crate) warnings: bool, |
| 25 | pub(crate) debug: bool, |
| 26 | pub(crate) output: OutputKind, |
| 27 | checked_dbg_var: Arc<AtomicBool>, |
| 28 | } |
| 29 | |
| 30 | /// Different strategies for handling compiler output (to stdout) |
| 31 | #[derive (Clone, Debug)] |
| 32 | pub(crate) enum OutputKind { |
| 33 | /// Forward the output to this process' stdout ([`Stdio::inherit()`]) |
| 34 | Forward, |
| 35 | /// Discard the output ([`Stdio::null()`]) |
| 36 | Discard, |
| 37 | /// Capture the result (`[Stdio::piped()`]) |
| 38 | Capture, |
| 39 | } |
| 40 | |
| 41 | impl CargoOutput { |
| 42 | pub(crate) fn new() -> Self { |
| 43 | #[allow (clippy::disallowed_methods)] |
| 44 | Self { |
| 45 | metadata: true, |
| 46 | warnings: true, |
| 47 | output: OutputKind::Forward, |
| 48 | debug: match std::env::var_os("CC_ENABLE_DEBUG_OUTPUT" ) { |
| 49 | Some(v) => v != "0" && v != "false" && v != "" , |
| 50 | None => false, |
| 51 | }, |
| 52 | checked_dbg_var: Arc::new(AtomicBool::new(false)), |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | pub(crate) fn print_metadata(&self, s: &dyn Display) { |
| 57 | if self.metadata { |
| 58 | println!(" {}" , s); |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | pub(crate) fn print_warning(&self, arg: &dyn Display) { |
| 63 | if self.warnings { |
| 64 | println!("cargo:warning= {}" , arg); |
| 65 | } |
| 66 | } |
| 67 | |
| 68 | pub(crate) fn print_debug(&self, arg: &dyn Display) { |
| 69 | if self.metadata && !self.checked_dbg_var.load(Ordering::Relaxed) { |
| 70 | self.checked_dbg_var.store(true, Ordering::Relaxed); |
| 71 | println!("cargo:rerun-if-env-changed=CC_ENABLE_DEBUG_OUTPUT" ); |
| 72 | } |
| 73 | if self.debug { |
| 74 | println!(" {}" , arg); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | fn stdio_for_warnings(&self) -> Stdio { |
| 79 | if self.warnings { |
| 80 | Stdio::piped() |
| 81 | } else { |
| 82 | Stdio::null() |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | fn stdio_for_output(&self) -> Stdio { |
| 87 | match self.output { |
| 88 | OutputKind::Capture => Stdio::piped(), |
| 89 | OutputKind::Forward => Stdio::inherit(), |
| 90 | OutputKind::Discard => Stdio::null(), |
| 91 | } |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | pub(crate) struct StderrForwarder { |
| 96 | inner: Option<(ChildStderr, Vec<u8>)>, |
| 97 | #[cfg (feature = "parallel" )] |
| 98 | is_non_blocking: bool, |
| 99 | #[cfg (feature = "parallel" )] |
| 100 | bytes_available_failed: bool, |
| 101 | /// number of bytes buffered in inner |
| 102 | bytes_buffered: usize, |
| 103 | } |
| 104 | |
| 105 | const MIN_BUFFER_CAPACITY: usize = 100; |
| 106 | |
| 107 | impl StderrForwarder { |
| 108 | pub(crate) fn new(child: &mut Child) -> Self { |
| 109 | Self { |
| 110 | inner: child |
| 111 | .stderr |
| 112 | .take() |
| 113 | .map(|stderr| (stderr, Vec::with_capacity(MIN_BUFFER_CAPACITY))), |
| 114 | bytes_buffered: 0, |
| 115 | #[cfg (feature = "parallel" )] |
| 116 | is_non_blocking: false, |
| 117 | #[cfg (feature = "parallel" )] |
| 118 | bytes_available_failed: false, |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | fn forward_available(&mut self) -> bool { |
| 123 | if let Some((stderr, buffer)) = self.inner.as_mut() { |
| 124 | loop { |
| 125 | // For non-blocking we check to see if there is data available, so we should try to |
| 126 | // read at least that much. For blocking, always read at least the minimum amount. |
| 127 | #[cfg (not(feature = "parallel" ))] |
| 128 | let to_reserve = MIN_BUFFER_CAPACITY; |
| 129 | #[cfg (feature = "parallel" )] |
| 130 | let to_reserve = if self.is_non_blocking && !self.bytes_available_failed { |
| 131 | match crate::parallel::stderr::bytes_available(stderr) { |
| 132 | #[cfg (windows)] |
| 133 | Ok(0) => break false, |
| 134 | #[cfg (unix)] |
| 135 | Ok(0) => { |
| 136 | // On Unix, depending on the implementation, we may sometimes get 0 in a |
| 137 | // loop (either there is data available or the pipe is broken), so |
| 138 | // continue with the non-blocking read anyway. |
| 139 | MIN_BUFFER_CAPACITY |
| 140 | } |
| 141 | #[cfg (windows)] |
| 142 | Err(_) => { |
| 143 | // On Windows, if we get an error then the pipe is broken, so flush |
| 144 | // the buffer and bail. |
| 145 | if !buffer.is_empty() { |
| 146 | write_warning(&buffer[..]); |
| 147 | } |
| 148 | self.inner = None; |
| 149 | break true; |
| 150 | } |
| 151 | #[cfg (unix)] |
| 152 | Err(_) => { |
| 153 | // On Unix, depending on the implementation, we may get spurious |
| 154 | // errors so make a note not to use bytes_available again and try |
| 155 | // the non-blocking read anyway. |
| 156 | self.bytes_available_failed = true; |
| 157 | MIN_BUFFER_CAPACITY |
| 158 | } |
| 159 | #[cfg (target_family = "wasm" )] |
| 160 | Err(_) => panic!("bytes_available should always succeed on wasm" ), |
| 161 | Ok(bytes_available) => MIN_BUFFER_CAPACITY.max(bytes_available), |
| 162 | } |
| 163 | } else { |
| 164 | MIN_BUFFER_CAPACITY |
| 165 | }; |
| 166 | if self.bytes_buffered + to_reserve > buffer.len() { |
| 167 | buffer.resize(self.bytes_buffered + to_reserve, 0); |
| 168 | } |
| 169 | |
| 170 | match stderr.read(&mut buffer[self.bytes_buffered..]) { |
| 171 | Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { |
| 172 | // No data currently, yield back. |
| 173 | break false; |
| 174 | } |
| 175 | Err(err) if err.kind() == std::io::ErrorKind::Interrupted => { |
| 176 | // Interrupted, try again. |
| 177 | continue; |
| 178 | } |
| 179 | Ok(bytes_read) if bytes_read != 0 => { |
| 180 | self.bytes_buffered += bytes_read; |
| 181 | let mut consumed = 0; |
| 182 | for line in buffer[..self.bytes_buffered].split_inclusive(|&b| b == b' \n' ) { |
| 183 | // Only forward complete lines, leave the rest in the buffer. |
| 184 | if let Some((b' \n' , line)) = line.split_last() { |
| 185 | consumed += line.len() + 1; |
| 186 | write_warning(line); |
| 187 | } |
| 188 | } |
| 189 | if consumed > 0 && consumed < self.bytes_buffered { |
| 190 | // Remove the consumed bytes from buffer |
| 191 | buffer.copy_within(consumed.., 0); |
| 192 | } |
| 193 | self.bytes_buffered -= consumed; |
| 194 | } |
| 195 | res => { |
| 196 | // End of stream: flush remaining data and bail. |
| 197 | if self.bytes_buffered > 0 { |
| 198 | write_warning(&buffer[..self.bytes_buffered]); |
| 199 | } |
| 200 | if let Err(err) = res { |
| 201 | write_warning( |
| 202 | format!("Failed to read from child stderr: {err}" ).as_bytes(), |
| 203 | ); |
| 204 | } |
| 205 | self.inner.take(); |
| 206 | break true; |
| 207 | } |
| 208 | } |
| 209 | } |
| 210 | } else { |
| 211 | true |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | #[cfg (feature = "parallel" )] |
| 216 | pub(crate) fn set_non_blocking(&mut self) -> Result<(), Error> { |
| 217 | assert!(!self.is_non_blocking); |
| 218 | |
| 219 | #[cfg (unix)] |
| 220 | if let Some((stderr, _)) = self.inner.as_ref() { |
| 221 | crate::parallel::stderr::set_non_blocking(stderr)?; |
| 222 | } |
| 223 | |
| 224 | self.is_non_blocking = true; |
| 225 | Ok(()) |
| 226 | } |
| 227 | |
| 228 | #[cfg (feature = "parallel" )] |
| 229 | fn forward_all(&mut self) { |
| 230 | while !self.forward_available() {} |
| 231 | } |
| 232 | |
| 233 | #[cfg (not(feature = "parallel" ))] |
| 234 | fn forward_all(&mut self) { |
| 235 | let forward_result = self.forward_available(); |
| 236 | assert!(forward_result, "Should have consumed all data" ); |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | fn write_warning(line: &[u8]) { |
| 241 | let stdout: Stdout = io::stdout(); |
| 242 | let mut stdout: StdoutLock<'static> = stdout.lock(); |
| 243 | stdout.write_all(buf:b"cargo:warning=" ).unwrap(); |
| 244 | stdout.write_all(buf:line).unwrap(); |
| 245 | stdout.write_all(buf:b" \n" ).unwrap(); |
| 246 | } |
| 247 | |
| 248 | fn wait_on_child( |
| 249 | cmd: &Command, |
| 250 | child: &mut Child, |
| 251 | cargo_output: &CargoOutput, |
| 252 | ) -> Result<(), Error> { |
| 253 | StderrForwarder::new(child).forward_all(); |
| 254 | |
| 255 | let status: ExitStatus = match child.wait() { |
| 256 | Ok(s: ExitStatus) => s, |
| 257 | Err(e: Error) => { |
| 258 | return Err(Error::new( |
| 259 | kind:ErrorKind::ToolExecError, |
| 260 | message:format!("failed to wait on spawned child process ` {cmd:?}`: {e}" ), |
| 261 | )); |
| 262 | } |
| 263 | }; |
| 264 | |
| 265 | cargo_output.print_debug(&status); |
| 266 | |
| 267 | if status.success() { |
| 268 | Ok(()) |
| 269 | } else { |
| 270 | Err(Error::new( |
| 271 | kind:ErrorKind::ToolExecError, |
| 272 | message:format!("command did not execute successfully (status code {status}): {cmd:?}" ), |
| 273 | )) |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | /// Find the destination object path for each file in the input source files, |
| 278 | /// and store them in the output Object. |
| 279 | pub(crate) fn objects_from_files(files: &[Arc<Path>], dst: &Path) -> Result<Vec<Object>, Error> { |
| 280 | let mut objects = Vec::with_capacity(files.len()); |
| 281 | for file in files { |
| 282 | let basename = file |
| 283 | .file_name() |
| 284 | .ok_or_else(|| { |
| 285 | Error::new( |
| 286 | ErrorKind::InvalidArgument, |
| 287 | "No file_name for object file path!" , |
| 288 | ) |
| 289 | })? |
| 290 | .to_string_lossy(); |
| 291 | let dirname = file |
| 292 | .parent() |
| 293 | .ok_or_else(|| { |
| 294 | Error::new( |
| 295 | ErrorKind::InvalidArgument, |
| 296 | "No parent for object file path!" , |
| 297 | ) |
| 298 | })? |
| 299 | .to_string_lossy(); |
| 300 | |
| 301 | // Hash the dirname. This should prevent conflicts if we have multiple |
| 302 | // object files with the same filename in different subfolders. |
| 303 | let mut hasher = hash_map::DefaultHasher::new(); |
| 304 | |
| 305 | // Make the dirname relative (if possible) to avoid full system paths influencing the sha |
| 306 | // and making the output system-dependent |
| 307 | // |
| 308 | // NOTE: Here we allow using std::env::var (instead of Build::getenv) because |
| 309 | // CARGO_* variables always trigger a rebuild when changed |
| 310 | #[allow (clippy::disallowed_methods)] |
| 311 | let dirname = if let Some(root) = std::env::var_os("CARGO_MANIFEST_DIR" ) { |
| 312 | let root = root.to_string_lossy(); |
| 313 | Cow::Borrowed(dirname.strip_prefix(&*root).unwrap_or(&dirname)) |
| 314 | } else { |
| 315 | dirname |
| 316 | }; |
| 317 | |
| 318 | hasher.write(dirname.as_bytes()); |
| 319 | if let Some(extension) = file.extension() { |
| 320 | hasher.write(extension.to_string_lossy().as_bytes()); |
| 321 | } |
| 322 | let obj = dst |
| 323 | .join(format!(" {:016x}- {}" , hasher.finish(), basename)) |
| 324 | .with_extension("o" ); |
| 325 | |
| 326 | match obj.parent() { |
| 327 | Some(s) => fs::create_dir_all(s)?, |
| 328 | None => { |
| 329 | return Err(Error::new( |
| 330 | ErrorKind::InvalidArgument, |
| 331 | "dst is an invalid path with no parent" , |
| 332 | )); |
| 333 | } |
| 334 | }; |
| 335 | |
| 336 | objects.push(Object::new(file.to_path_buf(), obj)); |
| 337 | } |
| 338 | |
| 339 | Ok(objects) |
| 340 | } |
| 341 | |
| 342 | pub(crate) fn run(cmd: &mut Command, cargo_output: &CargoOutput) -> Result<(), Error> { |
| 343 | let mut child: Child = spawn(cmd, cargo_output)?; |
| 344 | wait_on_child(cmd, &mut child, cargo_output) |
| 345 | } |
| 346 | |
| 347 | pub(crate) fn run_output(cmd: &mut Command, cargo_output: &CargoOutput) -> Result<Vec<u8>, Error> { |
| 348 | // We specifically need the output to be captured, so override default |
| 349 | let mut captured_cargo_output: CargoOutput = cargo_output.clone(); |
| 350 | captured_cargo_output.output = OutputKind::Capture; |
| 351 | let mut child: Child = spawn(cmd, &captured_cargo_output)?; |
| 352 | |
| 353 | let mut stdout: Vec = vec![]; |
| 354 | childResult |
| 355 | .stdout |
| 356 | .take() |
| 357 | .unwrap() |
| 358 | .read_to_end(&mut stdout) |
| 359 | .unwrap(); |
| 360 | |
| 361 | // Don't care about this output, use the normal settings |
| 362 | wait_on_child(cmd, &mut child, cargo_output)?; |
| 363 | |
| 364 | Ok(stdout) |
| 365 | } |
| 366 | |
| 367 | pub(crate) fn spawn(cmd: &mut Command, cargo_output: &CargoOutput) -> Result<Child, Error> { |
| 368 | struct ResetStderr<'cmd>(&'cmd mut Command); |
| 369 | |
| 370 | impl Drop for ResetStderr<'_> { |
| 371 | fn drop(&mut self) { |
| 372 | // Reset stderr to default to release pipe_writer so that print thread will |
| 373 | // not block forever. |
| 374 | self.0.stderr(Stdio::inherit()); |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | cargo_output.print_debug(&format_args!("running: {:?}" , cmd)); |
| 379 | |
| 380 | let cmd = ResetStderr(cmd); |
| 381 | let child = cmd |
| 382 | .0 |
| 383 | .stderr(cargo_output.stdio_for_warnings()) |
| 384 | .stdout(cargo_output.stdio_for_output()) |
| 385 | .spawn(); |
| 386 | match child { |
| 387 | Ok(child) => Ok(child), |
| 388 | Err(ref e) if e.kind() == io::ErrorKind::NotFound => { |
| 389 | let extra = if cfg!(windows) { |
| 390 | " (see https://docs.rs/cc/latest/cc/#compile-time-requirements for help)" |
| 391 | } else { |
| 392 | "" |
| 393 | }; |
| 394 | Err(Error::new( |
| 395 | ErrorKind::ToolNotFound, |
| 396 | format!("failed to find tool {:?}: {e}{extra}" , cmd.0.get_program()), |
| 397 | )) |
| 398 | } |
| 399 | Err(e) => Err(Error::new( |
| 400 | ErrorKind::ToolExecError, |
| 401 | format!("command ` {:?}` failed to start: {e}" , cmd.0), |
| 402 | )), |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | pub(crate) struct CmdAddOutputFileArgs { |
| 407 | pub(crate) cuda: bool, |
| 408 | pub(crate) is_assembler_msvc: bool, |
| 409 | pub(crate) msvc: bool, |
| 410 | pub(crate) clang: bool, |
| 411 | pub(crate) gnu: bool, |
| 412 | pub(crate) is_asm: bool, |
| 413 | pub(crate) is_arm: bool, |
| 414 | } |
| 415 | |
| 416 | pub(crate) fn command_add_output_file(cmd: &mut Command, dst: &Path, args: CmdAddOutputFileArgs) { |
| 417 | if args.is_assembler_msvc |
| 418 | || !(!args.msvc || args.clang || args.gnu || args.cuda || (args.is_asm && args.is_arm)) |
| 419 | { |
| 420 | let mut s: OsString = OsString::from("-Fo" ); |
| 421 | s.push(dst); |
| 422 | cmd.arg(s); |
| 423 | } else { |
| 424 | cmd.arg("-o" ).arg(dst); |
| 425 | } |
| 426 | } |
| 427 | |
| 428 | #[cfg (feature = "parallel" )] |
| 429 | pub(crate) fn try_wait_on_child( |
| 430 | cmd: &Command, |
| 431 | child: &mut Child, |
| 432 | stdout: &mut dyn io::Write, |
| 433 | stderr_forwarder: &mut StderrForwarder, |
| 434 | ) -> Result<Option<()>, Error> { |
| 435 | stderr_forwarder.forward_available(); |
| 436 | |
| 437 | match child.try_wait() { |
| 438 | Ok(Some(status)) => { |
| 439 | stderr_forwarder.forward_all(); |
| 440 | |
| 441 | let _ = writeln!(stdout, "{}" , status); |
| 442 | |
| 443 | if status.success() { |
| 444 | Ok(Some(())) |
| 445 | } else { |
| 446 | Err(Error::new( |
| 447 | ErrorKind::ToolExecError, |
| 448 | format!("command did not execute successfully (status code {status}): {cmd:?}" ), |
| 449 | )) |
| 450 | } |
| 451 | } |
| 452 | Ok(None) => Ok(None), |
| 453 | Err(e) => { |
| 454 | stderr_forwarder.forward_all(); |
| 455 | Err(Error::new( |
| 456 | ErrorKind::ToolExecError, |
| 457 | format!("failed to wait on spawned child process `{cmd:?}`: {e}" ), |
| 458 | )) |
| 459 | } |
| 460 | } |
| 461 | } |
| 462 | |