1//===- bolt/runtime/hugify.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===---------------------------------------------------------------------===//
8
9#if defined (__x86_64__) && !defined(__APPLE__)
10
11#include "common.h"
12
13#pragma GCC visibility push(hidden)
14
15// Enables a very verbose logging to stderr useful when debugging
16// #define ENABLE_DEBUG
17
18#ifdef ENABLE_DEBUG
19#define DEBUG(X) \
20 { X; }
21#else
22#define DEBUG(X) \
23 {}
24#endif
25
26// Function constains trampoline to _start,
27// so we can resume regular execution of the function that we hooked.
28extern void __bolt_hugify_start_program();
29
30// The __hot_start and __hot_end symbols set by Bolt. We use them to figure
31// out the rage for marking huge pages.
32extern uint64_t __hot_start;
33extern uint64_t __hot_end;
34
35static void getKernelVersion(uint32_t *Val) {
36 // release should be in the format: %d.%d.%d
37 // major, minor, release
38 struct UtsNameTy UtsName;
39 int Ret = __uname(Buf: &UtsName);
40 const char *Buf = UtsName.release;
41 const char *End = Buf + strLen(Str: Buf);
42 const char Delims[2][2] = {".", "."};
43
44 for (int i = 0; i < 3; ++i) {
45 if (!scanUInt32(Buf, End, Ret&: Val[i])) {
46 return;
47 }
48 if (i < sizeof(Delims) / sizeof(Delims[0])) {
49 const char *Ptr = Delims[i];
50 while (*Ptr != '\0') {
51 if (*Ptr != *Buf) {
52 return;
53 }
54 ++Ptr;
55 ++Buf;
56 }
57 }
58 }
59}
60
61/// Check whether the kernel supports THP via corresponding sysfs entry.
62/// thp works only starting from 5.10
63static bool hasPagecacheTHPSupport() {
64 char Buf[64];
65
66 int FD = __open(pathname: "/sys/kernel/mm/transparent_hugepage/enabled",
67 flags: 0 /* O_RDONLY */, mode: 0);
68 if (FD < 0)
69 return false;
70
71 memset(Buf, C: 0, Size: sizeof(Buf));
72 const size_t Res = __read(fd: FD, buf: Buf, count: sizeof(Buf));
73 if (Res < 0)
74 return false;
75
76 if (!strStr(Haystack: Buf, Needle: "[always]") && !strStr(Haystack: Buf, Needle: "[madvise]"))
77 return false;
78
79 struct KernelVersionTy {
80 uint32_t major;
81 uint32_t minor;
82 uint32_t release;
83 };
84
85 KernelVersionTy KernelVersion;
86
87 getKernelVersion(Val: (uint32_t *)&KernelVersion);
88 if (KernelVersion.major >= 5 && KernelVersion.minor >= 10)
89 return true;
90
91 return false;
92}
93
94static void hugifyForOldKernel(uint8_t *From, uint8_t *To) {
95 const size_t Size = To - From;
96
97 uint8_t *Mem = reinterpret_cast<uint8_t *>(
98 __mmap(addr: 0, size: Size, prot: 0x3 /* PROT_READ | PROT_WRITE */,
99 flags: 0x22 /* MAP_PRIVATE | MAP_ANONYMOUS */, fd: -1, offset: 0));
100
101 if (Mem == ((void *)-1) /* MAP_FAILED */) {
102 char Msg[] = "[hugify] could not allocate memory for text move\n";
103 reportError(Msg, Size: sizeof(Msg));
104 }
105
106 DEBUG(reportNumber("[hugify] allocated temporary address: ", (uint64_t)Mem,
107 16);)
108 DEBUG(reportNumber("[hugify] allocated size: ", (uint64_t)Size, 16);)
109
110 // Copy the hot code to a temporary location.
111 memcpy(Dest: Mem, Src: From, Len: Size);
112
113 __prctl(Option: 41 /* PR_SET_THP_DISABLE */, Arg2: 0, Arg3: 0, Arg4: 0, Arg5: 0);
114 // Maps out the existing hot code.
115 if (__mmap(addr: reinterpret_cast<uint64_t>(From), size: Size,
116 prot: 0x3 /* PROT_READ | PROT_WRITE */,
117 flags: 0x32 /* MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE */, fd: -1,
118 offset: 0) == ((void *)-1) /*MAP_FAILED*/) {
119 char Msg[] =
120 "[hugify] failed to mmap memory for large page move terminating\n";
121 reportError(Msg, Size: sizeof(Msg));
122 }
123
124 // Mark the hot code page to be huge page.
125 if (__madvise(addr: From, length: Size, advice: 14 /* MADV_HUGEPAGE */) == -1) {
126 char Msg[] = "[hugify] setting MADV_HUGEPAGE is failed\n";
127 reportError(Msg, Size: sizeof(Msg));
128 }
129
130 // Copy the hot code back.
131 memcpy(Dest: From, Src: Mem, Len: Size);
132
133 // Change permission back to read-only, ignore failure
134 __mprotect(addr: From, len: Size, prot: 0x5 /* PROT_READ | PROT_EXEC */);
135
136 __munmap(addr: Mem, size: Size);
137}
138
139extern "C" void __bolt_hugify_self_impl() {
140 uint8_t *HotStart = (uint8_t *)&__hot_start;
141 uint8_t *HotEnd = (uint8_t *)&__hot_end;
142 // Make sure the start and end are aligned with huge page address
143 const size_t HugePageBytes = 2L * 1024 * 1024;
144 uint8_t *From = HotStart - ((intptr_t)HotStart & (HugePageBytes - 1));
145 uint8_t *To = HotEnd + (HugePageBytes - 1);
146 To -= (intptr_t)To & (HugePageBytes - 1);
147
148 DEBUG(reportNumber("[hugify] hot start: ", (uint64_t)HotStart, 16);)
149 DEBUG(reportNumber("[hugify] hot end: ", (uint64_t)HotEnd, 16);)
150 DEBUG(reportNumber("[hugify] aligned huge page from: ", (uint64_t)From, 16);)
151 DEBUG(reportNumber("[hugify] aligned huge page to: ", (uint64_t)To, 16);)
152
153 if (!hasPagecacheTHPSupport()) {
154 DEBUG(report(
155 "[hugify] workaround with memory alignment for kernel < 5.10\n");)
156 hugifyForOldKernel(From, To);
157 return;
158 }
159
160 if (__madvise(addr: From, length: (To - From), advice: 14 /* MADV_HUGEPAGE */) == -1) {
161 char Msg[] = "[hugify] failed to allocate large page\n";
162 // TODO: allow user to control the failure behavior.
163 reportError(Msg, Size: sizeof(Msg));
164 }
165}
166
167/// This is hooking ELF's entry, it needs to save all machine state.
168extern "C" __attribute((naked)) void __bolt_hugify_self() {
169#if defined(__x86_64__)
170 __asm__ __volatile__(SAVE_ALL "call __bolt_hugify_self_impl\n" RESTORE_ALL
171 "jmp __bolt_hugify_start_program\n" ::
172 :);
173#else
174 exit(1);
175#endif
176}
177#endif
178

source code of bolt/runtime/hugify.cpp