MemUnreachable.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. /*
  2. * Copyright (C) 2016 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <inttypes.h>
  17. #include <string.h>
  18. #include <functional>
  19. #include <iomanip>
  20. #include <mutex>
  21. #include <sstream>
  22. #include <string>
  23. #include <unordered_map>
  24. #include <android-base/macros.h>
  25. #include <backtrace.h>
  26. #include "Allocator.h"
  27. #include "Binder.h"
  28. #include "HeapWalker.h"
  29. #include "Leak.h"
  30. #include "LeakFolding.h"
  31. #include "LeakPipe.h"
  32. #include "ProcessMappings.h"
  33. #include "PtracerThread.h"
  34. #include "ScopedDisableMalloc.h"
  35. #include "Semaphore.h"
  36. #include "ThreadCapture.h"
  37. #include "bionic.h"
  38. #include "log.h"
  39. #include "memunreachable/memunreachable.h"
  40. using namespace std::chrono_literals;
  41. namespace android {
  42. const size_t Leak::contents_length;
  43. class MemUnreachable {
  44. public:
  45. MemUnreachable(pid_t pid, Allocator<void> allocator)
  46. : pid_(pid), allocator_(allocator), heap_walker_(allocator_) {}
  47. bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
  48. const allocator::vector<Mapping>& mappings,
  49. const allocator::vector<uintptr_t>& refs);
  50. bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks,
  51. size_t* leak_bytes);
  52. size_t Allocations() { return heap_walker_.Allocations(); }
  53. size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
  54. private:
  55. bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
  56. allocator::vector<Mapping>& heap_mappings,
  57. allocator::vector<Mapping>& anon_mappings,
  58. allocator::vector<Mapping>& globals_mappings,
  59. allocator::vector<Mapping>& stack_mappings);
  60. DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
  61. pid_t pid_;
  62. Allocator<void> allocator_;
  63. HeapWalker heap_walker_;
  64. };
  65. static void HeapIterate(const Mapping& heap_mapping,
  66. const std::function<void(uintptr_t, size_t)>& func) {
  67. malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
  68. [](uintptr_t base, size_t size, void* arg) {
  69. auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
  70. (*f)(base, size);
  71. },
  72. const_cast<void*>(reinterpret_cast<const void*>(&func)));
  73. }
  74. bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
  75. const allocator::vector<Mapping>& mappings,
  76. const allocator::vector<uintptr_t>& refs) {
  77. MEM_ALOGI("searching process %d for allocations", pid_);
  78. for (auto it = mappings.begin(); it != mappings.end(); it++) {
  79. heap_walker_.Mapping(it->begin, it->end);
  80. }
  81. allocator::vector<Mapping> heap_mappings{mappings};
  82. allocator::vector<Mapping> anon_mappings{mappings};
  83. allocator::vector<Mapping> globals_mappings{mappings};
  84. allocator::vector<Mapping> stack_mappings{mappings};
  85. if (!ClassifyMappings(mappings, heap_mappings, anon_mappings, globals_mappings, stack_mappings)) {
  86. return false;
  87. }
  88. for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
  89. MEM_ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
  90. HeapIterate(*it,
  91. [&](uintptr_t base, size_t size) { heap_walker_.Allocation(base, base + size); });
  92. }
  93. for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
  94. MEM_ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
  95. heap_walker_.Allocation(it->begin, it->end);
  96. }
  97. for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
  98. MEM_ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
  99. heap_walker_.Root(it->begin, it->end);
  100. }
  101. for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
  102. for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
  103. if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
  104. MEM_ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
  105. heap_walker_.Root(thread_it->stack.first, it->end);
  106. }
  107. }
  108. heap_walker_.Root(thread_it->regs);
  109. }
  110. heap_walker_.Root(refs);
  111. MEM_ALOGI("searching done");
  112. return true;
  113. }
  114. bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
  115. size_t* num_leaks, size_t* leak_bytes) {
  116. MEM_ALOGI("sweeping process %d for unreachable memory", pid_);
  117. leaks.clear();
  118. if (!heap_walker_.DetectLeaks()) {
  119. return false;
  120. }
  121. allocator::vector<Range> leaked1{allocator_};
  122. heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
  123. MEM_ALOGI("sweeping done");
  124. MEM_ALOGI("folding related leaks");
  125. LeakFolding folding(allocator_, heap_walker_);
  126. if (!folding.FoldLeaks()) {
  127. return false;
  128. }
  129. allocator::vector<LeakFolding::Leak> leaked{allocator_};
  130. if (!folding.Leaked(leaked, num_leaks, leak_bytes)) {
  131. return false;
  132. }
  133. allocator::unordered_map<Leak::Backtrace, Leak*> backtrace_map{allocator_};
  134. // Prevent reallocations of backing memory so we can store pointers into it
  135. // in backtrace_map.
  136. leaks.reserve(leaked.size());
  137. for (auto& it : leaked) {
  138. leaks.emplace_back();
  139. Leak* leak = &leaks.back();
  140. ssize_t num_backtrace_frames = malloc_backtrace(
  141. reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames);
  142. if (num_backtrace_frames > 0) {
  143. leak->backtrace.num_frames = num_backtrace_frames;
  144. auto inserted = backtrace_map.emplace(leak->backtrace, leak);
  145. if (!inserted.second) {
  146. // Leak with same backtrace already exists, drop this one and
  147. // increment similar counts on the existing one.
  148. leaks.pop_back();
  149. Leak* similar_leak = inserted.first->second;
  150. similar_leak->similar_count++;
  151. similar_leak->similar_size += it.range.size();
  152. similar_leak->similar_referenced_count += it.referenced_count;
  153. similar_leak->similar_referenced_size += it.referenced_size;
  154. similar_leak->total_size += it.range.size();
  155. similar_leak->total_size += it.referenced_size;
  156. continue;
  157. }
  158. }
  159. leak->begin = it.range.begin;
  160. leak->size = it.range.size();
  161. leak->referenced_count = it.referenced_count;
  162. leak->referenced_size = it.referenced_size;
  163. leak->total_size = leak->size + leak->referenced_size;
  164. memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
  165. std::min(leak->size, Leak::contents_length));
  166. }
  167. MEM_ALOGI("folding done");
  168. std::sort(leaks.begin(), leaks.end(),
  169. [](const Leak& a, const Leak& b) { return a.total_size > b.total_size; });
  170. if (leaks.size() > limit) {
  171. leaks.resize(limit);
  172. }
  173. return true;
  174. }
  175. static bool has_prefix(const allocator::string& s, const char* prefix) {
  176. int ret = s.compare(0, strlen(prefix), prefix);
  177. return ret == 0;
  178. }
  179. static bool is_sanitizer_mapping(const allocator::string& s) {
  180. return s == "[anon:low shadow]" || s == "[anon:high shadow]" || has_prefix(s, "[anon:hwasan");
  181. }
  182. bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
  183. allocator::vector<Mapping>& heap_mappings,
  184. allocator::vector<Mapping>& anon_mappings,
  185. allocator::vector<Mapping>& globals_mappings,
  186. allocator::vector<Mapping>& stack_mappings) {
  187. heap_mappings.clear();
  188. anon_mappings.clear();
  189. globals_mappings.clear();
  190. stack_mappings.clear();
  191. allocator::string current_lib{allocator_};
  192. for (auto it = mappings.begin(); it != mappings.end(); it++) {
  193. if (it->execute) {
  194. current_lib = it->name;
  195. continue;
  196. }
  197. if (!it->read) {
  198. continue;
  199. }
  200. const allocator::string mapping_name{it->name, allocator_};
  201. if (mapping_name == "[anon:.bss]") {
  202. // named .bss section
  203. globals_mappings.emplace_back(*it);
  204. } else if (mapping_name == current_lib) {
  205. // .rodata or .data section
  206. globals_mappings.emplace_back(*it);
  207. } else if (mapping_name == "[anon:libc_malloc]") {
  208. // named malloc mapping
  209. heap_mappings.emplace_back(*it);
  210. } else if (has_prefix(mapping_name, "[anon:dalvik-")) {
  211. // named dalvik heap mapping
  212. globals_mappings.emplace_back(*it);
  213. } else if (has_prefix(mapping_name, "[stack")) {
  214. // named stack mapping
  215. stack_mappings.emplace_back(*it);
  216. } else if (mapping_name.size() == 0) {
  217. globals_mappings.emplace_back(*it);
  218. } else if (has_prefix(mapping_name, "[anon:") &&
  219. mapping_name != "[anon:leak_detector_malloc]" &&
  220. !is_sanitizer_mapping(mapping_name)) {
  221. // TODO(ccross): it would be nice to treat named anonymous mappings as
  222. // possible leaks, but naming something in a .bss or .data section makes
  223. // it impossible to distinguish them from mmaped and then named mappings.
  224. globals_mappings.emplace_back(*it);
  225. }
  226. }
  227. return true;
  228. }
  229. template <typename T>
  230. static inline const char* plural(T val) {
  231. return (val == 1) ? "" : "s";
  232. }
  233. bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
  234. int parent_pid = getpid();
  235. int parent_tid = gettid();
  236. Heap heap;
  237. Semaphore continue_parent_sem;
  238. LeakPipe pipe;
  239. PtracerThread thread{[&]() -> int {
  240. /////////////////////////////////////////////
  241. // Collection thread
  242. /////////////////////////////////////////////
  243. MEM_ALOGI("collecting thread info for process %d...", parent_pid);
  244. ThreadCapture thread_capture(parent_pid, heap);
  245. allocator::vector<ThreadInfo> thread_info(heap);
  246. allocator::vector<Mapping> mappings(heap);
  247. allocator::vector<uintptr_t> refs(heap);
  248. // ptrace all the threads
  249. if (!thread_capture.CaptureThreads()) {
  250. continue_parent_sem.Post();
  251. return 1;
  252. }
  253. // collect register contents and stacks
  254. if (!thread_capture.CapturedThreadInfo(thread_info)) {
  255. continue_parent_sem.Post();
  256. return 1;
  257. }
  258. // snapshot /proc/pid/maps
  259. if (!ProcessMappings(parent_pid, mappings)) {
  260. continue_parent_sem.Post();
  261. return 1;
  262. }
  263. if (!BinderReferences(refs)) {
  264. continue_parent_sem.Post();
  265. return 1;
  266. }
  267. // malloc must be enabled to call fork, at_fork handlers take the same
  268. // locks as ScopedDisableMalloc. All threads are paused in ptrace, so
  269. // memory state is still consistent. Unfreeze the original thread so it
  270. // can drop the malloc locks, it will block until the collection thread
  271. // exits.
  272. thread_capture.ReleaseThread(parent_tid);
  273. continue_parent_sem.Post();
  274. // fork a process to do the heap walking
  275. int ret = fork();
  276. if (ret < 0) {
  277. return 1;
  278. } else if (ret == 0) {
  279. /////////////////////////////////////////////
  280. // Heap walker process
  281. /////////////////////////////////////////////
  282. // Examine memory state in the child using the data collected above and
  283. // the CoW snapshot of the process memory contents.
  284. if (!pipe.OpenSender()) {
  285. _exit(1);
  286. }
  287. MemUnreachable unreachable{parent_pid, heap};
  288. if (!unreachable.CollectAllocations(thread_info, mappings, refs)) {
  289. _exit(2);
  290. }
  291. size_t num_allocations = unreachable.Allocations();
  292. size_t allocation_bytes = unreachable.AllocationBytes();
  293. allocator::vector<Leak> leaks{heap};
  294. size_t num_leaks = 0;
  295. size_t leak_bytes = 0;
  296. bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
  297. ok = ok && pipe.Sender().Send(num_allocations);
  298. ok = ok && pipe.Sender().Send(allocation_bytes);
  299. ok = ok && pipe.Sender().Send(num_leaks);
  300. ok = ok && pipe.Sender().Send(leak_bytes);
  301. ok = ok && pipe.Sender().SendVector(leaks);
  302. if (!ok) {
  303. _exit(3);
  304. }
  305. _exit(0);
  306. } else {
  307. // Nothing left to do in the collection thread, return immediately,
  308. // releasing all the captured threads.
  309. MEM_ALOGI("collection thread done");
  310. return 0;
  311. }
  312. }};
  313. /////////////////////////////////////////////
  314. // Original thread
  315. /////////////////////////////////////////////
  316. {
  317. // Disable malloc to get a consistent view of memory
  318. ScopedDisableMalloc disable_malloc;
  319. // Start the collection thread
  320. thread.Start();
  321. // Wait for the collection thread to signal that it is ready to fork the
  322. // heap walker process.
  323. continue_parent_sem.Wait(30s);
  324. // Re-enable malloc so the collection thread can fork.
  325. }
  326. // Wait for the collection thread to exit
  327. int ret = thread.Join();
  328. if (ret != 0) {
  329. return false;
  330. }
  331. // Get a pipe from the heap walker process. Transferring a new pipe fd
  332. // ensures no other forked processes can have it open, so when the heap
  333. // walker process dies the remote side of the pipe will close.
  334. if (!pipe.OpenReceiver()) {
  335. return false;
  336. }
  337. bool ok = true;
  338. ok = ok && pipe.Receiver().Receive(&info.num_allocations);
  339. ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
  340. ok = ok && pipe.Receiver().Receive(&info.num_leaks);
  341. ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
  342. ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
  343. if (!ok) {
  344. return false;
  345. }
  346. MEM_ALOGI("unreachable memory detection done");
  347. MEM_ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
  348. info.leak_bytes, info.num_leaks, plural(info.num_leaks), info.allocation_bytes,
  349. info.num_allocations, plural(info.num_allocations));
  350. return true;
  351. }
  352. std::string Leak::ToString(bool log_contents) const {
  353. std::ostringstream oss;
  354. oss << " " << std::dec << size;
  355. oss << " bytes unreachable at ";
  356. oss << std::hex << begin;
  357. oss << std::endl;
  358. if (referenced_count > 0) {
  359. oss << std::dec;
  360. oss << " referencing " << referenced_size << " unreachable bytes";
  361. oss << " in " << referenced_count << " allocation" << plural(referenced_count);
  362. oss << std::endl;
  363. }
  364. if (similar_count > 0) {
  365. oss << std::dec;
  366. oss << " and " << similar_size << " similar unreachable bytes";
  367. oss << " in " << similar_count << " allocation" << plural(similar_count);
  368. oss << std::endl;
  369. if (similar_referenced_count > 0) {
  370. oss << " referencing " << similar_referenced_size << " unreachable bytes";
  371. oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count);
  372. oss << std::endl;
  373. }
  374. }
  375. if (log_contents) {
  376. const int bytes_per_line = 16;
  377. const size_t bytes = std::min(size, contents_length);
  378. if (bytes == size) {
  379. oss << " contents:" << std::endl;
  380. } else {
  381. oss << " first " << bytes << " bytes of contents:" << std::endl;
  382. }
  383. for (size_t i = 0; i < bytes; i += bytes_per_line) {
  384. oss << " " << std::hex << begin + i << ": ";
  385. size_t j;
  386. oss << std::setfill('0');
  387. for (j = i; j < bytes && j < i + bytes_per_line; j++) {
  388. oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
  389. }
  390. oss << std::setfill(' ');
  391. for (; j < i + bytes_per_line; j++) {
  392. oss << " ";
  393. }
  394. for (j = i; j < bytes && j < i + bytes_per_line; j++) {
  395. char c = contents[j];
  396. if (c < ' ' || c >= 0x7f) {
  397. c = '.';
  398. }
  399. oss << c;
  400. }
  401. oss << std::endl;
  402. }
  403. }
  404. if (backtrace.num_frames > 0) {
  405. oss << backtrace_string(backtrace.frames, backtrace.num_frames);
  406. }
  407. return oss.str();
  408. }
  409. std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
  410. std::ostringstream oss;
  411. oss << " " << leak_bytes << " bytes in ";
  412. oss << num_leaks << " unreachable allocation" << plural(num_leaks);
  413. oss << std::endl;
  414. oss << " ABI: '" ABI_STRING "'" << std::endl;
  415. oss << std::endl;
  416. for (auto it = leaks.begin(); it != leaks.end(); it++) {
  417. oss << it->ToString(log_contents);
  418. oss << std::endl;
  419. }
  420. return oss.str();
  421. }
  422. UnreachableMemoryInfo::~UnreachableMemoryInfo() {
  423. // Clear the memory that holds the leaks, otherwise the next attempt to
  424. // detect leaks may find the old data (for example in the jemalloc tcache)
  425. // and consider all the leaks to be referenced.
  426. memset(leaks.data(), 0, leaks.capacity() * sizeof(Leak));
  427. std::vector<Leak> tmp;
  428. leaks.swap(tmp);
  429. // Disable and re-enable malloc to flush the jemalloc tcache to make sure
  430. // there are no copies of the leaked pointer addresses there.
  431. malloc_disable();
  432. malloc_enable();
  433. }
  434. std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
  435. UnreachableMemoryInfo info;
  436. if (!GetUnreachableMemory(info, limit)) {
  437. return "Failed to get unreachable memory\n"
  438. "If you are trying to get unreachable memory from a system app\n"
  439. "(like com.android.systemui), disable selinux first using\n"
  440. "setenforce 0\n";
  441. }
  442. return info.ToString(log_contents);
  443. }
  444. } // namespace android
  445. bool LogUnreachableMemory(bool log_contents, size_t limit) {
  446. android::UnreachableMemoryInfo info;
  447. if (!android::GetUnreachableMemory(info, limit)) {
  448. return false;
  449. }
  450. for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
  451. MEM_ALOGE("%s", it->ToString(log_contents).c_str());
  452. }
  453. return true;
  454. }
  455. bool NoLeaks() {
  456. android::UnreachableMemoryInfo info;
  457. if (!android::GetUnreachableMemory(info, 0)) {
  458. return false;
  459. }
  460. return info.num_leaks == 0;
  461. }