1//===- FuzzerFork.cpp - run fuzzing in separate subprocesses --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// Spawn and orchestrate separate fuzzing processes.
9//===----------------------------------------------------------------------===//
10
11#include "FuzzerCommand.h"
12#include "FuzzerFork.h"
13#include "FuzzerIO.h"
14#include "FuzzerInternal.h"
15#include "FuzzerMerge.h"
16#include "FuzzerSHA1.h"
17#include "FuzzerTracePC.h"
18#include "FuzzerUtil.h"
19
20#include <atomic>
21#include <chrono>
22#include <condition_variable>
23#include <fstream>
24#include <memory>
25#include <mutex>
26#include <queue>
27#include <sstream>
28#include <thread>
29
30namespace fuzzer {
31
32struct Stats {
33 size_t number_of_executed_units = 0;
34 size_t peak_rss_mb = 0;
35 size_t average_exec_per_sec = 0;
36};
37
38static Stats ParseFinalStatsFromLog(const std::string &LogPath) {
39 std::ifstream In(LogPath);
40 std::string Line;
41 Stats Res;
42 struct {
43 const char *Name;
44 size_t *Var;
45 } NameVarPairs[] = {
46 {.Name: "stat::number_of_executed_units:", .Var: &Res.number_of_executed_units},
47 {.Name: "stat::peak_rss_mb:", .Var: &Res.peak_rss_mb},
48 {.Name: "stat::average_exec_per_sec:", .Var: &Res.average_exec_per_sec},
49 {.Name: nullptr, .Var: nullptr},
50 };
51 while (std::getline(in&: In, str&: Line, delim: '\n')) {
52 if (Line.find(s: "stat::") != 0) continue;
53 std::istringstream ISS(Line);
54 std::string Name;
55 size_t Val;
56 ISS >> Name >> Val;
57 for (size_t i = 0; NameVarPairs[i].Name; i++)
58 if (Name == NameVarPairs[i].Name)
59 *NameVarPairs[i].Var = Val;
60 }
61 return Res;
62}
63
64struct FuzzJob {
65 // Inputs.
66 Command Cmd;
67 std::string CorpusDir;
68 std::string FeaturesDir;
69 std::string LogPath;
70 std::string SeedListPath;
71 std::string CFPath;
72 size_t JobId;
73
74 int DftTimeInSeconds = 0;
75
76 // Fuzzing Outputs.
77 int ExitCode;
78
79 ~FuzzJob() {
80 RemoveFile(Path: CFPath);
81 RemoveFile(Path: LogPath);
82 RemoveFile(Path: SeedListPath);
83 RmDirRecursive(Dir: CorpusDir);
84 RmDirRecursive(Dir: FeaturesDir);
85 }
86};
87
88struct GlobalEnv {
89 std::vector<std::string> Args;
90 std::vector<std::string> CorpusDirs;
91 std::string MainCorpusDir;
92 std::string TempDir;
93 std::string DFTDir;
94 std::string DataFlowBinary;
95 std::set<uint32_t> Features, Cov;
96 std::set<std::string> FilesWithDFT;
97 std::vector<std::string> Files;
98 std::vector<std::size_t> FilesSizes;
99 Random *Rand;
100 std::chrono::system_clock::time_point ProcessStartTime;
101 int Verbosity = 0;
102 int Group = 0;
103 int NumCorpuses = 8;
104
105 size_t NumTimeouts = 0;
106 size_t NumOOMs = 0;
107 size_t NumCrashes = 0;
108
109
110 size_t NumRuns = 0;
111
112 std::string StopFile() { return DirPlusFile(DirPath: TempDir, FileName: "STOP"); }
113
114 size_t secondsSinceProcessStartUp() const {
115 return std::chrono::duration_cast<std::chrono::seconds>(
116 d: std::chrono::system_clock::now() - ProcessStartTime)
117 .count();
118 }
119
120 FuzzJob *CreateNewJob(size_t JobId) {
121 Command Cmd(Args);
122 Cmd.removeFlag(Flag: "fork");
123 Cmd.removeFlag(Flag: "runs");
124 Cmd.removeFlag(Flag: "collect_data_flow");
125 for (auto &C : CorpusDirs) // Remove all corpora from the args.
126 Cmd.removeArgument(Arg: C);
127 Cmd.addFlag(Flag: "reload", Value: "0"); // working in an isolated dir, no reload.
128 Cmd.addFlag(Flag: "print_final_stats", Value: "1");
129 Cmd.addFlag(Flag: "print_funcs", Value: "0"); // no need to spend time symbolizing.
130 Cmd.addFlag(Flag: "max_total_time", Value: std::to_string(val: std::min(a: (size_t)300, b: JobId)));
131 Cmd.addFlag(Flag: "stop_file", Value: StopFile());
132 if (!DataFlowBinary.empty()) {
133 Cmd.addFlag(Flag: "data_flow_trace", Value: DFTDir);
134 if (!Cmd.hasFlag(Flag: "focus_function"))
135 Cmd.addFlag(Flag: "focus_function", Value: "auto");
136 }
137 auto Job = new FuzzJob;
138 std::string Seeds;
139 if (size_t CorpusSubsetSize =
140 std::min(a: Files.size(), b: (size_t)sqrt(x: Files.size() + 2))) {
141 auto Time1 = std::chrono::system_clock::now();
142 if (Group) { // whether to group the corpus.
143 size_t AverageCorpusSize = Files.size() / NumCorpuses + 1;
144 size_t StartIndex = ((JobId - 1) % NumCorpuses) * AverageCorpusSize;
145 for (size_t i = 0; i < CorpusSubsetSize; i++) {
146 size_t RandNum = (*Rand)(AverageCorpusSize);
147 size_t Index = RandNum + StartIndex;
148 Index = Index < Files.size() ? Index
149 : Rand->SkewTowardsLast(n: Files.size());
150 auto &SF = Files[Index];
151 Seeds += (Seeds.empty() ? "" : ",") + SF;
152 CollectDFT(InputPath: SF);
153 }
154 } else {
155 for (size_t i = 0; i < CorpusSubsetSize; i++) {
156 auto &SF = Files[Rand->SkewTowardsLast(n: Files.size())];
157 Seeds += (Seeds.empty() ? "" : ",") + SF;
158 CollectDFT(InputPath: SF);
159 }
160 }
161 auto Time2 = std::chrono::system_clock::now();
162 auto DftTimeInSeconds = duration_cast<seconds>(d: Time2 - Time1).count();
163 assert(DftTimeInSeconds < std::numeric_limits<int>::max());
164 Job->DftTimeInSeconds = static_cast<int>(DftTimeInSeconds);
165 }
166 if (!Seeds.empty()) {
167 Job->SeedListPath =
168 DirPlusFile(DirPath: TempDir, FileName: std::to_string(val: JobId) + ".seeds");
169 WriteToFile(Data: Seeds, Path: Job->SeedListPath);
170 Cmd.addFlag(Flag: "seed_inputs", Value: "@" + Job->SeedListPath);
171 }
172 Job->LogPath = DirPlusFile(DirPath: TempDir, FileName: std::to_string(val: JobId) + ".log");
173 Job->CorpusDir = DirPlusFile(DirPath: TempDir, FileName: "C" + std::to_string(val: JobId));
174 Job->FeaturesDir = DirPlusFile(DirPath: TempDir, FileName: "F" + std::to_string(val: JobId));
175 Job->CFPath = DirPlusFile(DirPath: TempDir, FileName: std::to_string(val: JobId) + ".merge");
176 Job->JobId = JobId;
177
178
179 Cmd.addArgument(Arg: Job->CorpusDir);
180 Cmd.addFlag(Flag: "features_dir", Value: Job->FeaturesDir);
181
182 for (auto &D : {Job->CorpusDir, Job->FeaturesDir}) {
183 RmDirRecursive(Dir: D);
184 MkDir(Path: D);
185 }
186
187 Cmd.setOutputFile(Job->LogPath);
188 Cmd.combineOutAndErr();
189
190 Job->Cmd = Cmd;
191
192 if (Verbosity >= 2)
193 Printf(Fmt: "Job %zd/%p Created: %s\n", JobId, Job,
194 Job->Cmd.toString().c_str());
195 // Start from very short runs and gradually increase them.
196 return Job;
197 }
198
199 void RunOneMergeJob(FuzzJob *Job) {
200 auto Stats = ParseFinalStatsFromLog(LogPath: Job->LogPath);
201 NumRuns += Stats.number_of_executed_units;
202
203 std::vector<SizedFile> TempFiles, MergeCandidates;
204 // Read all newly created inputs and their feature sets.
205 // Choose only those inputs that have new features.
206 GetSizedFilesFromDir(Dir: Job->CorpusDir, V: &TempFiles);
207 std::sort(first: TempFiles.begin(), last: TempFiles.end());
208 for (auto &F : TempFiles) {
209 auto FeatureFile = F.File;
210 FeatureFile.replace(pos: 0, n: Job->CorpusDir.size(), str: Job->FeaturesDir);
211 auto FeatureBytes = FileToVector(Path: FeatureFile, MaxSize: 0, ExitOnError: false);
212 assert((FeatureBytes.size() % sizeof(uint32_t)) == 0);
213 std::vector<uint32_t> NewFeatures(FeatureBytes.size() / sizeof(uint32_t));
214 memcpy(dest: NewFeatures.data(), src: FeatureBytes.data(), n: FeatureBytes.size());
215 for (auto Ft : NewFeatures) {
216 if (!Features.count(x: Ft)) {
217 MergeCandidates.push_back(x: F);
218 break;
219 }
220 }
221 }
222 // if (!FilesToAdd.empty() || Job->ExitCode != 0)
223 Printf(Fmt: "#%zd: cov: %zd ft: %zd corp: %zd exec/s: %zd "
224 "oom/timeout/crash: %zd/%zd/%zd time: %zds job: %zd dft_time: %d\n",
225 NumRuns, Cov.size(), Features.size(), Files.size(),
226 Stats.average_exec_per_sec, NumOOMs, NumTimeouts, NumCrashes,
227 secondsSinceProcessStartUp(), Job->JobId, Job->DftTimeInSeconds);
228
229 if (MergeCandidates.empty()) return;
230
231 std::vector<std::string> FilesToAdd;
232 std::set<uint32_t> NewFeatures, NewCov;
233 bool IsSetCoverMerge =
234 !Job->Cmd.getFlagValue(Flag: "set_cover_merge").compare(s: "1");
235 CrashResistantMerge(Args, OldCorpus: {}, NewCorpus: MergeCandidates, NewFiles: &FilesToAdd, InitialFeatures: Features,
236 NewFeatures: &NewFeatures, InitialCov: Cov, NewCov: &NewCov, CFPath: Job->CFPath, Verbose: false,
237 IsSetCoverMerge);
238 for (auto &Path : FilesToAdd) {
239 auto U = FileToVector(Path);
240 auto NewPath = DirPlusFile(DirPath: MainCorpusDir, FileName: Hash(U));
241 WriteToFile(U, Path: NewPath);
242 if (Group) { // Insert the queue according to the size of the seed.
243 size_t UnitSize = U.size();
244 auto Idx =
245 std::upper_bound(first: FilesSizes.begin(), last: FilesSizes.end(), val: UnitSize) -
246 FilesSizes.begin();
247 FilesSizes.insert(position: FilesSizes.begin() + Idx, x: UnitSize);
248 Files.insert(position: Files.begin() + Idx, x: NewPath);
249 } else {
250 Files.push_back(x: NewPath);
251 }
252 }
253 Features.insert(first: NewFeatures.begin(), last: NewFeatures.end());
254 Cov.insert(first: NewCov.begin(), last: NewCov.end());
255 for (auto Idx : NewCov)
256 if (auto *TE = TPC.PCTableEntryByIdx(Idx))
257 if (TPC.PcIsFuncEntry(TE))
258 PrintPC(SymbolizedFMT: " NEW_FUNC: %p %F %L\n", FallbackFMT: "",
259 PC: TPC.GetNextInstructionPc(PC: TE->PC));
260 }
261
262 void CollectDFT(const std::string &InputPath) {
263 if (DataFlowBinary.empty()) return;
264 if (!FilesWithDFT.insert(x: InputPath).second) return;
265 Command Cmd(Args);
266 Cmd.removeFlag(Flag: "fork");
267 Cmd.removeFlag(Flag: "runs");
268 Cmd.addFlag(Flag: "data_flow_trace", Value: DFTDir);
269 Cmd.addArgument(Arg: InputPath);
270 for (auto &C : CorpusDirs) // Remove all corpora from the args.
271 Cmd.removeArgument(Arg: C);
272 Cmd.setOutputFile(DirPlusFile(DirPath: TempDir, FileName: "dft.log"));
273 Cmd.combineOutAndErr();
274 // Printf("CollectDFT: %s\n", Cmd.toString().c_str());
275 ExecuteCommand(Cmd);
276 }
277
278};
279
280struct JobQueue {
281 std::queue<FuzzJob *> Qu;
282 std::mutex Mu;
283 std::condition_variable Cv;
284
285 void Push(FuzzJob *Job) {
286 {
287 std::lock_guard<std::mutex> Lock(Mu);
288 Qu.push(x: Job);
289 }
290 Cv.notify_one();
291 }
292 FuzzJob *Pop() {
293 std::unique_lock<std::mutex> Lk(Mu);
294 // std::lock_guard<std::mutex> Lock(Mu);
295 Cv.wait(lock&: Lk, p: [&]{return !Qu.empty();});
296 assert(!Qu.empty());
297 auto Job = Qu.front();
298 Qu.pop();
299 return Job;
300 }
301};
302
303void WorkerThread(JobQueue *FuzzQ, JobQueue *MergeQ) {
304 while (auto Job = FuzzQ->Pop()) {
305 // Printf("WorkerThread: job %p\n", Job);
306 Job->ExitCode = ExecuteCommand(Cmd: Job->Cmd);
307 MergeQ->Push(Job);
308 }
309}
310
311// This is just a skeleton of an experimental -fork=1 feature.
312void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
313 const std::vector<std::string> &Args,
314 const std::vector<std::string> &CorpusDirs, int NumJobs) {
315 Printf(Fmt: "INFO: -fork=%d: fuzzing in separate process(s)\n", NumJobs);
316
317 GlobalEnv Env;
318 Env.Args = Args;
319 Env.CorpusDirs = CorpusDirs;
320 Env.Rand = &Rand;
321 Env.Verbosity = Options.Verbosity;
322 Env.ProcessStartTime = std::chrono::system_clock::now();
323 Env.DataFlowBinary = Options.CollectDataFlow;
324 Env.Group = Options.ForkCorpusGroups;
325
326 std::vector<SizedFile> SeedFiles;
327 for (auto &Dir : CorpusDirs)
328 GetSizedFilesFromDir(Dir, V: &SeedFiles);
329 std::sort(first: SeedFiles.begin(), last: SeedFiles.end());
330 Env.TempDir = TempPath(Prefix: "FuzzWithFork", Extension: ".dir");
331 Env.DFTDir = DirPlusFile(DirPath: Env.TempDir, FileName: "DFT");
332 RmDirRecursive(Dir: Env.TempDir); // in case there is a leftover from old runs.
333 MkDir(Path: Env.TempDir);
334 MkDir(Path: Env.DFTDir);
335
336
337 if (CorpusDirs.empty())
338 MkDir(Path: Env.MainCorpusDir = DirPlusFile(DirPath: Env.TempDir, FileName: "C"));
339 else
340 Env.MainCorpusDir = CorpusDirs[0];
341
342 if (Options.KeepSeed) {
343 for (auto &File : SeedFiles)
344 Env.Files.push_back(x: File.File);
345 } else {
346 auto CFPath = DirPlusFile(DirPath: Env.TempDir, FileName: "merge.txt");
347 std::set<uint32_t> NewFeatures, NewCov;
348 CrashResistantMerge(Args: Env.Args, OldCorpus: {}, NewCorpus: SeedFiles, NewFiles: &Env.Files, InitialFeatures: Env.Features,
349 NewFeatures: &NewFeatures, InitialCov: Env.Cov, NewCov: &NewCov, CFPath,
350 /*Verbose=*/false, /*IsSetCoverMerge=*/false);
351 Env.Features.insert(first: NewFeatures.begin(), last: NewFeatures.end());
352 Env.Cov.insert(first: NewFeatures.begin(), last: NewFeatures.end());
353 RemoveFile(Path: CFPath);
354 }
355
356 if (Env.Group) {
357 for (auto &path : Env.Files)
358 Env.FilesSizes.push_back(x: FileSize(Path: path));
359 }
360
361 Printf(Fmt: "INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\n", NumJobs,
362 Env.Files.size(), Env.TempDir.c_str());
363
364 int ExitCode = 0;
365
366 JobQueue FuzzQ, MergeQ;
367
368 auto StopJobs = [&]() {
369 for (int i = 0; i < NumJobs; i++)
370 FuzzQ.Push(Job: nullptr);
371 MergeQ.Push(Job: nullptr);
372 WriteToFile(U: Unit({1}), Path: Env.StopFile());
373 };
374
375 size_t MergeCycle = 20;
376 size_t JobExecuted = 0;
377 size_t JobId = 1;
378 std::vector<std::thread> Threads;
379 for (int t = 0; t < NumJobs; t++) {
380 Threads.push_back(x: std::thread(WorkerThread, &FuzzQ, &MergeQ));
381 FuzzQ.Push(Job: Env.CreateNewJob(JobId: JobId++));
382 }
383
384 while (true) {
385 std::unique_ptr<FuzzJob> Job(MergeQ.Pop());
386 if (!Job)
387 break;
388 ExitCode = Job->ExitCode;
389 if (ExitCode == Options.InterruptExitCode) {
390 Printf(Fmt: "==%lu== libFuzzer: a child was interrupted; exiting\n", GetPid());
391 StopJobs();
392 break;
393 }
394 Fuzzer::MaybeExitGracefully();
395
396 Env.RunOneMergeJob(Job: Job.get());
397
398 // merge the corpus .
399 JobExecuted++;
400 if (Env.Group && JobExecuted >= MergeCycle) {
401 std::vector<SizedFile> CurrentSeedFiles;
402 for (auto &Dir : CorpusDirs)
403 GetSizedFilesFromDir(Dir, V: &CurrentSeedFiles);
404 std::sort(first: CurrentSeedFiles.begin(), last: CurrentSeedFiles.end());
405
406 auto CFPath = DirPlusFile(DirPath: Env.TempDir, FileName: "merge.txt");
407 std::set<uint32_t> TmpNewFeatures, TmpNewCov;
408 std::set<uint32_t> TmpFeatures, TmpCov;
409 Env.Files.clear();
410 Env.FilesSizes.clear();
411 CrashResistantMerge(Args: Env.Args, OldCorpus: {}, NewCorpus: CurrentSeedFiles, NewFiles: &Env.Files,
412 InitialFeatures: TmpFeatures, NewFeatures: &TmpNewFeatures, InitialCov: TmpCov, NewCov: &TmpNewCov,
413 CFPath, /*Verbose=*/false, /*IsSetCoverMerge=*/false);
414 for (auto &path : Env.Files)
415 Env.FilesSizes.push_back(x: FileSize(Path: path));
416 RemoveFile(Path: CFPath);
417 JobExecuted = 0;
418 MergeCycle += 5;
419 }
420
421 // Since the number of corpus seeds will gradually increase, in order to
422 // control the number in each group to be about three times the number of
423 // seeds selected each time, the number of groups is dynamically adjusted.
424 if (Env.Files.size() < 2000)
425 Env.NumCorpuses = 12;
426 else if (Env.Files.size() < 6000)
427 Env.NumCorpuses = 20;
428 else if (Env.Files.size() < 12000)
429 Env.NumCorpuses = 32;
430 else if (Env.Files.size() < 16000)
431 Env.NumCorpuses = 40;
432 else if (Env.Files.size() < 24000)
433 Env.NumCorpuses = 60;
434 else
435 Env.NumCorpuses = 80;
436
437 // Continue if our crash is one of the ignored ones.
438 if (Options.IgnoreTimeouts && ExitCode == Options.TimeoutExitCode)
439 Env.NumTimeouts++;
440 else if (Options.IgnoreOOMs && ExitCode == Options.OOMExitCode)
441 Env.NumOOMs++;
442 else if (ExitCode != 0) {
443 Env.NumCrashes++;
444 if (Options.IgnoreCrashes) {
445 std::ifstream In(Job->LogPath);
446 std::string Line;
447 while (std::getline(in&: In, str&: Line, delim: '\n'))
448 if (Line.find(s: "ERROR:") != Line.npos ||
449 Line.find(s: "runtime error:") != Line.npos)
450 Printf(Fmt: "%s\n", Line.c_str());
451 } else {
452 // And exit if we don't ignore this crash.
453 Printf(Fmt: "INFO: log from the inner process:\n%s",
454 FileToString(Path: Job->LogPath).c_str());
455 StopJobs();
456 break;
457 }
458 }
459
460 // Stop if we are over the time budget.
461 // This is not precise, since other threads are still running
462 // and we will wait while joining them.
463 // We also don't stop instantly: other jobs need to finish.
464 if (Options.MaxTotalTimeSec > 0 &&
465 Env.secondsSinceProcessStartUp() >= (size_t)Options.MaxTotalTimeSec) {
466 Printf(Fmt: "INFO: fuzzed for %zd seconds, wrapping up soon\n",
467 Env.secondsSinceProcessStartUp());
468 StopJobs();
469 break;
470 }
471 if (Env.NumRuns >= Options.MaxNumberOfRuns) {
472 Printf(Fmt: "INFO: fuzzed for %zd iterations, wrapping up soon\n",
473 Env.NumRuns);
474 StopJobs();
475 break;
476 }
477
478 FuzzQ.Push(Job: Env.CreateNewJob(JobId: JobId++));
479 }
480
481 for (auto &T : Threads)
482 T.join();
483
484 // The workers have terminated. Don't try to remove the directory before they
485 // terminate to avoid a race condition preventing cleanup on Windows.
486 RmDirRecursive(Dir: Env.TempDir);
487
488 // Use the exit code from the last child process.
489 Printf(Fmt: "INFO: exiting: %d time: %zds\n", ExitCode,
490 Env.secondsSinceProcessStartUp());
491 exit(status: ExitCode);
492}
493
494} // namespace fuzzer
495

source code of compiler-rt/lib/fuzzer/FuzzerFork.cpp